1 #include <linux/interrupt.h>
3 #include <linux/gpio.h>
4 #include <linux/workqueue.h>
5 #include <linux/mutex.h>
6 #include <linux/device.h>
7 #include <linux/kernel.h>
8 #include <linux/spi/spi.h>
9 #include <linux/sysfs.h>
10 #include <linux/list.h>
11 #include <linux/slab.h>
15 #include "../ring_sw.h"
16 #include "../kfifo_buf.h"
18 #include "../trigger.h"
19 #include "lis3l02dq.h"
22 * combine_8_to_16() utility function to munge to u8s into u16
24 static inline u16 combine_8_to_16(u8 lower, u8 upper)
28 return _lower | (_upper << 8);
32 * lis3l02dq_scan_el_set_state() set whether a scan contains a given channel
33 * @scan_el: associtate iio scan element attribute
34 * @indio_dev: the device structure
35 * @bool: desired state
37 * mlock already held when this is called.
39 static int lis3l02dq_scan_el_set_state(struct iio_scan_el *scan_el,
40 struct iio_dev *indio_dev,
46 ret = lis3l02dq_spi_read_reg_8(&indio_dev->dev,
47 LIS3L02DQ_REG_CTRL_1_ADDR,
51 switch (scan_el->label) {
52 case LIS3L02DQ_REG_OUT_X_L_ADDR:
53 mask = LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
55 case LIS3L02DQ_REG_OUT_Y_L_ADDR:
56 mask = LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
58 case LIS3L02DQ_REG_OUT_Z_L_ADDR:
59 mask = LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
66 if (!(mask & t) == state) {
71 ret = lis3l02dq_spi_write_reg_8(&indio_dev->dev,
72 LIS3L02DQ_REG_CTRL_1_ADDR,
79 static IIO_SCAN_EL_C(accel_x, 0,
80 LIS3L02DQ_REG_OUT_X_L_ADDR,
81 &lis3l02dq_scan_el_set_state);
82 static IIO_SCAN_EL_C(accel_y, 1,
83 LIS3L02DQ_REG_OUT_Y_L_ADDR,
84 &lis3l02dq_scan_el_set_state);
85 static IIO_SCAN_EL_C(accel_z, 2,
86 LIS3L02DQ_REG_OUT_Z_L_ADDR,
87 &lis3l02dq_scan_el_set_state);
88 static IIO_CONST_ATTR_SCAN_EL_TYPE(accel, s, 12, 16);
89 static IIO_SCAN_EL_TIMESTAMP(3);
90 static IIO_CONST_ATTR_SCAN_EL_TYPE(timestamp, s, 64, 64);
92 static struct attribute *lis3l02dq_scan_el_attrs[] = {
93 &iio_scan_el_accel_x.dev_attr.attr,
94 &iio_const_attr_accel_x_index.dev_attr.attr,
95 &iio_scan_el_accel_y.dev_attr.attr,
96 &iio_const_attr_accel_y_index.dev_attr.attr,
97 &iio_scan_el_accel_z.dev_attr.attr,
98 &iio_const_attr_accel_z_index.dev_attr.attr,
99 &iio_const_attr_accel_type.dev_attr.attr,
100 &iio_scan_el_timestamp.dev_attr.attr,
101 &iio_const_attr_timestamp_index.dev_attr.attr,
102 &iio_const_attr_timestamp_type.dev_attr.attr,
106 static struct attribute_group lis3l02dq_scan_el_group = {
107 .attrs = lis3l02dq_scan_el_attrs,
108 .name = "scan_elements",
112 * lis3l02dq_poll_func_th() top half interrupt handler called by trigger
113 * @private_data: iio_dev
115 static void lis3l02dq_poll_func_th(struct iio_dev *indio_dev, s64 time)
117 struct iio_sw_ring_helper_state *h
118 = iio_dev_get_devdata(indio_dev);
119 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
120 /* in this case we need to slightly extend the helper function */
121 iio_sw_poll_func_th(indio_dev, time);
123 /* Indicate that this interrupt is being handled */
124 /* Technically this is trigger related, but without this
125 * handler running there is currently now way for the interrupt
132 * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
134 static int lis3l02dq_data_rdy_trig_poll(struct iio_dev *indio_dev,
139 struct iio_sw_ring_helper_state *h
140 = iio_dev_get_devdata(indio_dev);
141 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
143 iio_trigger_poll(st->trig, timestamp);
148 /* This is an event as it is a response to a physical interrupt */
149 IIO_EVENT_SH(data_rdy_trig, &lis3l02dq_data_rdy_trig_poll);
152 * lis3l02dq_read_accel_from_ring() individual acceleration read from ring
154 ssize_t lis3l02dq_read_accel_from_ring(struct device *dev,
155 struct device_attribute *attr,
158 struct iio_scan_el *el = NULL;
159 int ret, len = 0, i = 0;
160 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
161 struct iio_dev *dev_info = dev_get_drvdata(dev);
162 struct iio_ring_buffer *ring = dev_info->ring;
163 struct attribute_group *scan_el_attrs = ring->scan_el_attrs;
166 while (scan_el_attrs->attrs[i]) {
167 el = to_iio_scan_el((struct device_attribute *)
168 (scan_el_attrs->attrs[i]));
169 /* label is in fact the address */
170 if (el->label == this_attr->address)
174 if (!scan_el_attrs->attrs[i]) {
178 /* If this element is in the scan mask */
179 ret = iio_scan_mask_query(ring, el->number);
183 data = kmalloc(ring->access.get_bytes_per_datum(ring),
187 ret = ring->access.read_last(ring,
190 goto error_free_data;
195 len = iio_scan_mask_count_to_right(ring, el->number);
198 goto error_free_data;
200 len = sprintf(buf, "ring %d\n", data[len]);
204 return ret ? ret : len;
208 static const u8 read_all_tx_array[] = {
209 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
210 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
211 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
212 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
213 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
214 LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
218 * lis3l02dq_read_all() Reads all channels currently selected
219 * @st: device specific state
220 * @rx_array: (dma capable) recieve array, must be at least
221 * 4*number of channels
223 static int lis3l02dq_read_all(struct lis3l02dq_state *st, u8 *rx_array)
225 struct iio_ring_buffer *ring = st->help.indio_dev->ring;
226 struct spi_transfer *xfers;
227 struct spi_message msg;
230 xfers = kzalloc((ring->scan_count) * 2
231 * sizeof(*xfers), GFP_KERNEL);
235 mutex_lock(&st->buf_lock);
237 for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++) {
238 if (ring->scan_mask & (1 << i)) {
240 xfers[j].tx_buf = st->tx + 2*j;
241 st->tx[2*j] = read_all_tx_array[i*4];
244 xfers[j].rx_buf = rx_array + j*2;
245 xfers[j].bits_per_word = 8;
247 xfers[j].cs_change = 1;
251 xfers[j].tx_buf = st->tx + 2*j;
252 st->tx[2*j] = read_all_tx_array[i*4 + 2];
255 xfers[j].rx_buf = rx_array + j*2;
256 xfers[j].bits_per_word = 8;
258 xfers[j].cs_change = 1;
262 /* After these are transmitted, the rx_buff should have
263 * values in alternate bytes
265 spi_message_init(&msg);
266 for (j = 0; j < ring->scan_count * 2; j++)
267 spi_message_add_tail(&xfers[j], &msg);
269 ret = spi_sync(st->us, &msg);
270 mutex_unlock(&st->buf_lock);
276 static void lis3l02dq_trigger_bh_to_ring(struct work_struct *work_s)
278 struct iio_sw_ring_helper_state *h
279 = container_of(work_s, struct iio_sw_ring_helper_state,
280 work_trigger_to_ring);
281 struct lis3l02dq_state *st = lis3l02dq_h_to_s(h);
284 iio_sw_trigger_bh_to_ring(work_s);
287 static int lis3l02dq_get_ring_element(struct iio_sw_ring_helper_state *h,
292 s16 *data = (s16 *)buf;
294 rx_array = kzalloc(4 * (h->indio_dev->ring->scan_count), GFP_KERNEL);
295 if (rx_array == NULL)
297 ret = lis3l02dq_read_all(lis3l02dq_h_to_s(h), rx_array);
300 for (i = 0; i < h->indio_dev->ring->scan_count; i++)
301 data[i] = combine_8_to_16(rx_array[i*4+1],
305 return i*sizeof(data[0]);
308 /* Caller responsible for locking as necessary. */
310 __lis3l02dq_write_data_ready_config(struct device *dev,
311 struct iio_event_handler_list *list,
317 struct iio_dev *indio_dev = dev_get_drvdata(dev);
319 /* Get the current event mask register */
320 ret = lis3l02dq_spi_read_reg_8(dev,
321 LIS3L02DQ_REG_CTRL_2_ADDR,
325 /* Find out if data ready is already on */
327 = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
329 /* Disable requested */
330 if (!state && currentlyset) {
332 valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
333 /* The double write is to overcome a hardware bug?*/
334 ret = lis3l02dq_spi_write_reg_8(dev,
335 LIS3L02DQ_REG_CTRL_2_ADDR,
339 ret = lis3l02dq_spi_write_reg_8(dev,
340 LIS3L02DQ_REG_CTRL_2_ADDR,
345 iio_remove_event_from_list(list,
346 &indio_dev->interrupts[0]
349 /* Enable requested */
350 } else if (state && !currentlyset) {
351 /* if not set, enable requested */
352 valold |= LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
353 iio_add_event_to_list(list, &indio_dev->interrupts[0]->ev_list);
354 ret = lis3l02dq_spi_write_reg_8(dev,
355 LIS3L02DQ_REG_CTRL_2_ADDR,
367 * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
369 * If disabling the interrupt also does a final read to ensure it is clear.
370 * This is only important in some cases where the scan enable elements are
371 * switched before the ring is reenabled.
373 static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
376 struct lis3l02dq_state *st = trig->private_data;
379 __lis3l02dq_write_data_ready_config(&st->help.indio_dev->dev,
380 &iio_event_data_rdy_trig,
382 if (state == false) {
383 /* possible quirk with handler currently worked around
384 by ensuring the work queue is empty */
385 flush_scheduled_work();
386 /* Clear any outstanding ready events */
387 ret = lis3l02dq_read_all(st, NULL);
389 lis3l02dq_spi_read_reg_8(&st->help.indio_dev->dev,
390 LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
395 static IIO_TRIGGER_NAME_ATTR;
397 static struct attribute *lis3l02dq_trigger_attrs[] = {
402 static const struct attribute_group lis3l02dq_trigger_attr_group = {
403 .attrs = lis3l02dq_trigger_attrs,
407 * lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
408 * @trig: the datardy trigger
410 * As the trigger may occur on any data element being updated it is
411 * really rather likely to occur during the read from the previous
412 * trigger event. The only way to discover if this has occured on
413 * boards not supporting level interrupts is to take a look at the line.
414 * If it is indicating another interrupt and we don't seem to have a
415 * handler looking at it, then we need to notify the core that we need
416 * to tell the triggering core to try reading all these again.
418 static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
420 struct lis3l02dq_state *st = trig->private_data;
421 enable_irq(st->us->irq);
422 /* If gpio still high (or high again) */
423 if (gpio_get_value(irq_to_gpio(st->us->irq)))
424 if (st->inter == 0) {
425 /* already interrupt handler dealing with it */
426 disable_irq_nosync(st->us->irq);
427 if (st->inter == 1) {
428 /* interrupt handler snuck in between test
430 enable_irq(st->us->irq);
435 /* irq reenabled so success! */
439 int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
442 struct lis3l02dq_state *state = indio_dev->dev_data;
444 state->trig = iio_allocate_trigger();
448 state->trig->name = kasprintf(GFP_KERNEL,
451 if (!state->trig->name) {
453 goto error_free_trig;
456 state->trig->dev.parent = &state->us->dev;
457 state->trig->owner = THIS_MODULE;
458 state->trig->private_data = state;
459 state->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
460 state->trig->try_reenable = &lis3l02dq_trig_try_reen;
461 state->trig->control_attrs = &lis3l02dq_trigger_attr_group;
462 ret = iio_trigger_register(state->trig);
464 goto error_free_trig_name;
468 error_free_trig_name:
469 kfree(state->trig->name);
471 iio_free_trigger(state->trig);
476 void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
478 struct lis3l02dq_state *state = indio_dev->dev_data;
480 iio_trigger_unregister(state->trig);
481 kfree(state->trig->name);
482 iio_free_trigger(state->trig);
485 void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
487 kfree(indio_dev->pollfunc);
488 lis3l02dq_free_buf(indio_dev->ring);
491 int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
494 struct iio_sw_ring_helper_state *h = iio_dev_get_devdata(indio_dev);
495 struct iio_ring_buffer *ring;
496 INIT_WORK(&h->work_trigger_to_ring, lis3l02dq_trigger_bh_to_ring);
497 h->get_ring_element = &lis3l02dq_get_ring_element;
499 ring = lis3l02dq_alloc_buf(indio_dev);
503 indio_dev->ring = ring;
504 /* Effectively select the ring buffer implementation */
505 lis3l02dq_register_buf_funcs(&ring->access);
507 ring->scan_el_attrs = &lis3l02dq_scan_el_group;
508 ring->scan_timestamp = true;
509 ring->preenable = &iio_sw_ring_preenable;
510 ring->postenable = &iio_triggered_ring_postenable;
511 ring->predisable = &iio_triggered_ring_predisable;
512 ring->owner = THIS_MODULE;
514 /* Set default scan mode */
515 iio_scan_mask_set(ring, iio_scan_el_accel_x.number);
516 iio_scan_mask_set(ring, iio_scan_el_accel_y.number);
517 iio_scan_mask_set(ring, iio_scan_el_accel_z.number);
519 ret = iio_alloc_pollfunc(indio_dev, NULL, &lis3l02dq_poll_func_th);
521 goto error_iio_sw_rb_free;
522 indio_dev->modes |= INDIO_RING_TRIGGERED;
525 error_iio_sw_rb_free:
526 lis3l02dq_free_buf(indio_dev->ring);