linux/drivers/staging/iio/ring_sw.c

434 lines
12 KiB
C
Raw Normal View History

/* The industrial I/O simple minimally locked ring buffer.
*
* Copyright (c) 2008 Jonathan Cameron
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include "ring_sw.h"
static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
int bytes_per_datum, int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
__iio_init_ring_buffer(&ring->buf, bytes_per_datum, length);
spin_lock_init(&ring->use_lock);
ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL);
ring->read_p = 0;
ring->write_p = 0;
ring->last_written_p = 0;
ring->half_p = 0;
return ring->data ? 0 : -ENOMEM;
}
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
ring->use_count++;
spin_unlock(&ring->use_lock);
}
EXPORT_SYMBOL(iio_mark_sw_rb_in_use);
void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
ring->use_count--;
spin_unlock(&ring->use_lock);
}
EXPORT_SYMBOL(iio_unmark_sw_rb_in_use);
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
/* Lock always held if their is a chance this may be called */
/* Only one of these per ring may run concurrently - enforced by drivers */
int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
unsigned char *data,
s64 timestamp)
{
int ret = 0;
int code;
unsigned char *temp_ptr, *change_test_ptr;
/* initial store */
if (unlikely(ring->write_p == 0)) {
ring->write_p = ring->data;
/* Doesn't actually matter if this is out of the set
* as long as the read pointer is valid before this
* passes it - guaranteed as set later in this function.
*/
ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2;
}
/* Copy data to where ever the current write pointer says */
memcpy(ring->write_p, data, ring->buf.bpd);
barrier();
/* Update the pointer used to get most recent value.
* Always valid as either points to latest or second latest value.
* Before this runs it is null and read attempts fail with -EAGAIN.
*/
ring->last_written_p = ring->write_p;
barrier();
/* temp_ptr used to ensure we never have an invalid pointer
* it may be slightly lagging, but never invalid
*/
temp_ptr = ring->write_p + ring->buf.bpd;
/* End of ring, back to the beginning */
if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd)
temp_ptr = ring->data;
/* Update the write pointer
* always valid as long as this is the only function able to write.
* Care needed with smp systems to ensure more than one ring fill
* is never scheduled.
*/
ring->write_p = temp_ptr;
if (ring->read_p == 0)
ring->read_p = ring->data;
/* Buffer full - move the read pointer and create / escalate
* ring event */
/* Tricky case - if the read pointer moves before we adjust it.
* Handle by not pushing if it has moved - may result in occasional
* unnecessary buffer full events when it wasn't quite true.
*/
else if (ring->write_p == ring->read_p) {
change_test_ptr = ring->read_p;
temp_ptr = change_test_ptr + ring->buf.bpd;
if (temp_ptr
== ring->data + ring->buf.length*ring->buf.bpd) {
temp_ptr = ring->data;
}
/* We are moving pointer on one because the ring is full. Any
* change to the read pointer will be this or greater.
*/
if (change_test_ptr == ring->read_p)
ring->read_p = temp_ptr;
spin_lock(&ring->buf.shared_ev_pointer.lock);
ret = iio_push_or_escallate_ring_event(&ring->buf,
IIO_EVENT_CODE_RING_100_FULL,
timestamp);
spin_unlock(&ring->buf.shared_ev_pointer.lock);
if (ret)
goto error_ret;
}
/* investigate if our event barrier has been passed */
/* There are definite 'issues' with this and chances of
* simultaneous read */
/* Also need to use loop count to ensure this only happens once */
ring->half_p += ring->buf.bpd;
if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd)
ring->half_p = ring->data;
if (ring->half_p == ring->read_p) {
spin_lock(&ring->buf.shared_ev_pointer.lock);
code = IIO_EVENT_CODE_RING_50_FULL;
ret = __iio_push_event(&ring->buf.ev_int,
code,
timestamp,
&ring->buf.shared_ev_pointer);
spin_unlock(&ring->buf.shared_ev_pointer.lock);
}
error_ret:
return ret;
}
int iio_rip_sw_rb(struct iio_ring_buffer *r,
size_t count, u8 **data, int *dead_offset)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
int ret, max_copied;
int bytes_to_rip;
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
* Return an error.
*/
if (count % ring->buf.bpd) {
ret = -EINVAL;
printk(KERN_INFO "Ring buffer read request not whole number of"
"samples: Request bytes %zd, Current bpd %d\n",
count, ring->buf.bpd);
goto error_ret;
}
/* Limit size to whole of ring buffer */
bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count);
*data = kmalloc(bytes_to_rip, GFP_KERNEL);
if (*data == NULL) {
ret = -ENOMEM;
goto error_ret;
}
/* build local copy */
initial_read_p = ring->read_p;
if (unlikely(initial_read_p == 0)) { /* No data here as yet */
ret = 0;
goto error_free_data_cpy;
}
initial_write_p = ring->write_p;
/* Need a consistent pair */
while ((initial_read_p != ring->read_p)
|| (initial_write_p != ring->write_p)) {
initial_read_p = ring->read_p;
initial_write_p = ring->write_p;
}
if (initial_write_p == initial_read_p) {
/* No new data available.*/
ret = 0;
goto error_free_data_cpy;
}
if (initial_write_p >= initial_read_p + bytes_to_rip) {
/* write_p is greater than necessary, all is easy */
max_copied = bytes_to_rip;
memcpy(*data, initial_read_p, max_copied);
end_read_p = initial_read_p + max_copied;
} else if (initial_write_p > initial_read_p) {
/*not enough data to cpy */
max_copied = initial_write_p - initial_read_p;
memcpy(*data, initial_read_p, max_copied);
end_read_p = initial_write_p;
} else {
/* going through 'end' of ring buffer */
max_copied = ring->data
+ ring->buf.length*ring->buf.bpd - initial_read_p;
memcpy(*data, initial_read_p, max_copied);
/* possible we are done if we align precisely with end */
if (max_copied == bytes_to_rip)
end_read_p = ring->data;
else if (initial_write_p
> ring->data + bytes_to_rip - max_copied) {
/* enough data to finish */
memcpy(*data + max_copied, ring->data,
bytes_to_rip - max_copied);
max_copied = bytes_to_rip;
end_read_p = ring->data + (bytes_to_rip - max_copied);
} else { /* not enough data */
memcpy(*data + max_copied, ring->data,
initial_write_p - ring->data);
max_copied += initial_write_p - ring->data;
end_read_p = initial_write_p;
}
}
/* Now to verify which section was cleanly copied - i.e. how far
* read pointer has been pushed */
current_read_p = ring->read_p;
if (initial_read_p <= current_read_p)
*dead_offset = current_read_p - initial_read_p;
else
*dead_offset = ring->buf.length*ring->buf.bpd
- (initial_read_p - current_read_p);
/* possible issue if the initial write has been lapped or indeed
* the point we were reading to has been passed */
/* No valid data read.
* In this case the read pointer is already correct having been
* pushed further than we would look. */
if (max_copied - *dead_offset < 0) {
ret = 0;
goto error_free_data_cpy;
}
/* setup the next read position */
/* Beware, this may fail due to concurrency fun and games.
* Possible that sufficient fill commands have run to push the read
* pointer past where we would be after the rip. If this occurs, leave
* it be.
*/
/* Tricky - deal with loops */
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
return max_copied - *dead_offset;
error_free_data_cpy:
kfree(*data);
error_ret:
return ret;
}
EXPORT_SYMBOL(iio_rip_sw_rb);
int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
return iio_store_to_sw_ring(ring, data, timestamp);
}
EXPORT_SYMBOL(iio_store_to_sw_rb);
int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
unsigned char *data)
{
unsigned char *last_written_p_copy;
iio_mark_sw_rb_in_use(&ring->buf);
again:
barrier();
last_written_p_copy = ring->last_written_p;
barrier(); /*unnessecary? */
/* Check there is anything here */
if (last_written_p_copy == 0)
return -EAGAIN;
memcpy(data, last_written_p_copy, ring->buf.bpd);
if (unlikely(ring->last_written_p >= last_written_p_copy))
goto again;
iio_unmark_sw_rb_in_use(&ring->buf);
return 0;
}
int iio_read_last_from_sw_rb(struct iio_ring_buffer *r,
unsigned char *data)
{
return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
}
EXPORT_SYMBOL(iio_read_last_from_sw_rb);
int iio_request_update_sw_rb(struct iio_ring_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
if (ring->use_count) {
ret = -EAGAIN;
goto error_ret;
}
__iio_free_sw_ring_buffer(ring);
ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length);
error_ret:
spin_unlock(&ring->use_lock);
return ret;
}
EXPORT_SYMBOL(iio_request_update_sw_rb);
int iio_get_bpd_sw_rb(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
return ring->buf.bpd;
}
EXPORT_SYMBOL(iio_get_bpd_sw_rb);
int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd)
{
if (r->bpd != bpd) {
r->bpd = bpd;
if (r->access.mark_param_change)
r->access.mark_param_change(r);
}
return 0;
}
EXPORT_SYMBOL(iio_set_bpd_sw_rb);
int iio_get_length_sw_rb(struct iio_ring_buffer *r)
{
return r->length;
}
EXPORT_SYMBOL(iio_get_length_sw_rb);
int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
if (r->access.mark_param_change)
r->access.mark_param_change(r);
}
return 0;
}
EXPORT_SYMBOL(iio_set_length_sw_rb);
int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r)
{
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
ring->update_needed = true;
return 0;
}
EXPORT_SYMBOL(iio_mark_update_needed_sw_rb);
static void iio_sw_rb_release(struct device *dev)
{
struct iio_ring_buffer *r = to_iio_ring_buffer(dev);
kfree(iio_to_sw_ring(r));
}
static IIO_RING_ENABLE_ATTR;
static IIO_RING_BPS_ATTR;
static IIO_RING_LENGTH_ATTR;
/* Standard set of ring buffer attributes */
static struct attribute *iio_ring_attributes[] = {
&dev_attr_length.attr,
&dev_attr_bps.attr,
&dev_attr_ring_enable.attr,
NULL,
};
static struct attribute_group iio_ring_attribute_group = {
.attrs = iio_ring_attributes,
};
static const struct attribute_group *iio_ring_attribute_groups[] = {
&iio_ring_attribute_group,
NULL
};
static struct device_type iio_sw_ring_type = {
.release = iio_sw_rb_release,
.groups = iio_ring_attribute_groups,
};
struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
{
struct iio_ring_buffer *buf;
struct iio_sw_ring_buffer *ring;
ring = kzalloc(sizeof *ring, GFP_KERNEL);
if (!ring)
return 0;
buf = &ring->buf;
iio_ring_buffer_init(buf, indio_dev);
buf->dev.type = &iio_sw_ring_type;
device_initialize(&buf->dev);
buf->dev.parent = &indio_dev->dev;
buf->dev.class = &iio_class;
dev_set_drvdata(&buf->dev, (void *)buf);
return buf;
}
EXPORT_SYMBOL(iio_sw_rb_allocate);
void iio_sw_rb_free(struct iio_ring_buffer *r)
{
if (r)
iio_put_ring_buffer(r);
}
EXPORT_SYMBOL(iio_sw_rb_free);
MODULE_DESCRIPTION("Industrialio I/O software ring buffer");
MODULE_LICENSE("GPL");