summaryrefslogtreecommitdiff
path: root/drivers/iio
diff options
context:
space:
mode:
authorOctavian Purdila <octavian.purdila@intel.com>2015-03-22 20:33:39 +0200
committerJonathan Cameron <jic23@kernel.org>2015-03-29 16:17:10 +0100
commitf4f4673b7535eff4ee1a8cfb1685fa1e1a0cb79d (patch)
treec2914fbba47032f26e1c1031e3eb17c41b2c3402 /drivers/iio
parent37d3455672732b29a477732a94abfe95e199f0ce (diff)
iio: add support for hardware fifo
Some devices have hardware buffers that can store a number of samples for later consumption. Hardware usually provides interrupts to notify the processor when the FIFO is full or when it has reached a certain watermark level. This helps with reducing the number of interrupts to the host processor and thus it helps decreasing the power consumption. This patch enables usage of hardware FIFOs for IIO devices in conjunction with software device buffers. When the hardware FIFO is enabled the samples are stored in the hardware FIFO. The samples are later flushed to the device software buffer when the number of entries in the hardware FIFO reaches the hardware watermark or when a flush operation is triggered by the user when doing a non-blocking read on an empty software device buffer. In order to implement hardware FIFO support the device drivers must implement the following new operations: setting and getting the hardware FIFO watermark level, flushing the hardware FIFO to the software device buffer. The device must also expose information about the hardware FIFO such it's minimum and maximum watermark and if necessary a list of supported watermark values. Finally, the device driver must activate the hardware FIFO when the device buffer is enabled, if the current device settings allows it. The software device buffer watermark is passed by the IIO core to the device driver as a hint for the hardware FIFO watermark. The device driver can adjust this value to allow for hardware limitations (such as capping it to the maximum hardware watermark or adjust it to a value that is supported by the hardware). It can also disable the hardware watermark (and implicitly the hardware FIFO) it this value is below the minimum hardware watermark. Since a driver may support hardware FIFO only when not in triggered buffer mode (due to different semantics of hardware FIFO sampling and triggered sampling) this patch changes the IIO core code to allow falling back to non-triggered buffered mode if no trigger is enabled. Signed-off-by: Octavian Purdila <octavian.purdila@intel.com> Reviewed-by: Lars-Peter Clausen <lars@metafoo.de> Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Diffstat (limited to 'drivers/iio')
-rw-r--r--drivers/iio/industrialio-buffer.c58
1 files changed, 45 insertions, 13 deletions
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a24b2e005eb3..df919f44d513 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -42,18 +42,47 @@ static size_t iio_buffer_data_available(struct iio_buffer *buf)
return buf->access->data_available(buf);
}
+static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
+ struct iio_buffer *buf, size_t required)
+{
+ if (!indio_dev->info->hwfifo_flush_to_buffer)
+ return -ENODEV;
+
+ return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
+}
+
static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
- size_t to_wait)
+ size_t to_wait, int to_flush)
{
+ size_t avail;
+ int flushed = 0;
+
/* wakeup if the device was unregistered */
if (!indio_dev->info)
return true;
/* drain the buffer if it was disabled */
- if (!iio_buffer_is_active(buf))
+ if (!iio_buffer_is_active(buf)) {
to_wait = min_t(size_t, to_wait, 1);
+ to_flush = 0;
+ }
+
+ avail = iio_buffer_data_available(buf);
- if (iio_buffer_data_available(buf) >= to_wait)
+ if (avail >= to_wait) {
+ /* force a flush for non-blocking reads */
+ if (!to_wait && !avail && to_flush)
+ iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
+ return true;
+ }
+
+ if (to_flush)
+ flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
+ to_wait - avail);
+ if (flushed <= 0)
+ return false;
+
+ if (avail + flushed >= to_wait)
return true;
return false;
@@ -72,6 +101,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_buffer *rb = indio_dev->buffer;
size_t datum_size;
size_t to_wait = 0;
+ size_t to_read;
int ret;
if (!indio_dev->info)
@@ -89,12 +119,14 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
if (!datum_size)
return 0;
+ to_read = min_t(size_t, n / datum_size, rb->watermark);
+
if (!(filp->f_flags & O_NONBLOCK))
- to_wait = min_t(size_t, n / datum_size, rb->watermark);
+ to_wait = to_read;
do {
ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait));
+ iio_buffer_ready(indio_dev, rb, to_wait, to_read));
if (ret)
return ret;
@@ -122,7 +154,7 @@ unsigned int iio_buffer_poll(struct file *filp,
return -ENODEV;
poll_wait(filp, &rb->pollq, wait);
- if (iio_buffer_ready(indio_dev, rb, rb->watermark))
+ if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
return POLLIN | POLLRDNORM;
return 0;
}
@@ -661,19 +693,16 @@ static int __iio_update_buffers(struct iio_dev *indio_dev,
}
}
/* Definitely possible for devices to support both of these. */
- if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
- if (!indio_dev->trig) {
- printk(KERN_INFO "Buffer not started: no trigger\n");
- ret = -EINVAL;
- /* Can only occur on first buffer */
- goto error_run_postdisable;
- }
+ if ((indio_dev->modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
} else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
} else if (indio_dev->modes & INDIO_BUFFER_SOFTWARE) {
indio_dev->currentmode = INDIO_BUFFER_SOFTWARE;
} else { /* Should never be reached */
+ /* Can only occur on first buffer */
+ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
+ pr_info("Buffer not started: no trigger\n");
ret = -EINVAL;
goto error_run_postdisable;
}
@@ -825,6 +854,9 @@ static ssize_t iio_buffer_store_watermark(struct device *dev,
}
buffer->watermark = val;
+
+ if (indio_dev->info->hwfifo_set_watermark)
+ indio_dev->info->hwfifo_set_watermark(indio_dev, val);
out:
mutex_unlock(&indio_dev->mlock);