source: GPL/branches/uniaud32-next/lib32/regmap.c@ 652

Last change on this file since 652 was 652, checked in by Paul Smedley, 5 years ago

Update regmap & regcache to 5.10.10 kernel code

File size: 79.6 KB
Line 
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register map access API
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9/* from 5.10.10 */
10
11#include <linux/device.h>
12#include <linux/slab.h>
13#include <linux/export.h>
14#include <linux/mutex.h>
15#include <linux/err.h>
16//#include <linux/property.h>
17#include <linux/rbtree.h>
18#include <linux/sched.h>
19#include <linux/delay.h>
20#include <linux/log2.h>
21//#include <linux/hwspinlock.h>
22#include <asm/unaligned.h>
23#include <linux/module.h>
24#include <linux/workqueue.h>
25#include <linux/byteorder/little_endian.h>
26#include <linux/printk.h>
27
28/* hwspinlock mode argument */
29#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
30#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
31#define HWLOCK_RAW 0x03
32
33#define CREATE_TRACE_POINTS
34//#include "trace.h"
35
36#include "internal.h"
37
38/*
39 * Sometimes for failures during very early init the trace
40 * infrastructure isn't available early enough to be used. For this
41 * sort of problem defining LOG_DEVICE will add printks for basic
42 * register I/O on a specific device.
43 */
44#undef LOG_DEVICE
45
46#ifdef LOG_DEVICE
47static inline bool regmap_should_log(struct regmap *map)
48{
49 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
50}
51#else
52static inline bool regmap_should_log(struct regmap *map) { return false; }
53#endif
54
55
56static int _regmap_update_bits(struct regmap *map, unsigned int reg,
57 unsigned int mask, unsigned int val,
58 bool *change, bool force_write);
59
60static int _regmap_bus_reg_read(void *context, unsigned int reg,
61 unsigned int *val);
62static int _regmap_bus_read(void *context, unsigned int reg,
63 unsigned int *val);
64static int _regmap_bus_formatted_write(void *context, unsigned int reg,
65 unsigned int val);
66static int _regmap_bus_reg_write(void *context, unsigned int reg,
67 unsigned int val);
68static int _regmap_bus_raw_write(void *context, unsigned int reg,
69 unsigned int val);
70
71bool regmap_reg_in_ranges(unsigned int reg,
72 const struct regmap_range *ranges,
73 unsigned int nranges)
74{
75 const struct regmap_range *r;
76 int i;
77
78 for (i = 0, r = ranges; i < nranges; i++, r++)
79 if (regmap_reg_in_range(reg, r))
80 return true;
81 return false;
82}
83EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
84
85bool regmap_check_range_table(struct regmap *map, unsigned int reg,
86 const struct regmap_access_table *table)
87{
88 /* Check "no ranges" first */
89 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
90 return false;
91
92 /* In case zero "yes ranges" are supplied, any reg is OK */
93 if (!table->n_yes_ranges)
94 return true;
95
96 return regmap_reg_in_ranges(reg, table->yes_ranges,
97 table->n_yes_ranges);
98}
99EXPORT_SYMBOL_GPL(regmap_check_range_table);
100
101bool regmap_writeable(struct regmap *map, unsigned int reg)
102{
103 if (map->max_register && reg > map->max_register)
104 return false;
105
106 if (map->writeable_reg)
107 return map->writeable_reg(map->dev, reg);
108
109 if (map->wr_table)
110 return regmap_check_range_table(map, reg, map->wr_table);
111
112 return true;
113}
114
115bool regmap_cached(struct regmap *map, unsigned int reg)
116{
117 int ret;
118 unsigned int val;
119
120 if (map->cache_type == REGCACHE_NONE)
121 return false;
122
123 if (!map->cache_ops)
124 return false;
125
126 if (map->max_register && reg > map->max_register)
127 return false;
128
129 map->lock(map->lock_arg);
130 ret = regcache_read(map, reg, &val);
131 map->unlock(map->lock_arg);
132 if (ret)
133 return false;
134
135 return true;
136}
137
138bool regmap_readable(struct regmap *map, unsigned int reg)
139{
140 if (!map->reg_read)
141 return false;
142
143 if (map->max_register && reg > map->max_register)
144 return false;
145
146 if (map->format.format_write)
147 return false;
148
149 if (map->readable_reg)
150 return map->readable_reg(map->dev, reg);
151
152 if (map->rd_table)
153 return regmap_check_range_table(map, reg, map->rd_table);
154
155 return true;
156}
157
158bool regmap_volatile(struct regmap *map, unsigned int reg)
159{
160 if (!map->format.format_write && !regmap_readable(map, reg))
161 return false;
162
163 if (map->volatile_reg)
164 return map->volatile_reg(map->dev, reg);
165
166 if (map->volatile_table)
167 return regmap_check_range_table(map, reg, map->volatile_table);
168
169 if (map->cache_ops)
170 return false;
171 else
172 return true;
173}
174
175bool regmap_precious(struct regmap *map, unsigned int reg)
176{
177 if (!regmap_readable(map, reg))
178 return false;
179
180 if (map->precious_reg)
181 return map->precious_reg(map->dev, reg);
182
183 if (map->precious_table)
184 return regmap_check_range_table(map, reg, map->precious_table);
185
186 return false;
187}
188
189bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
190{
191 if (map->writeable_noinc_reg)
192 return map->writeable_noinc_reg(map->dev, reg);
193
194 if (map->wr_noinc_table)
195 return regmap_check_range_table(map, reg, map->wr_noinc_table);
196
197 return true;
198}
199
200bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
201{
202 if (map->readable_noinc_reg)
203 return map->readable_noinc_reg(map->dev, reg);
204
205 if (map->rd_noinc_table)
206 return regmap_check_range_table(map, reg, map->rd_noinc_table);
207
208 return true;
209}
210
211static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
212 size_t num)
213{
214 unsigned int i;
215
216 for (i = 0; i < num; i++)
217 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
218 return false;
219
220 return true;
221}
222
223static void regmap_format_12_20_write(struct regmap *map,
224 unsigned int reg, unsigned int val)
225{
226 u8 *out = map->work_buf;
227
228 out[0] = reg >> 4;
229 out[1] = (reg << 4) | (val >> 16);
230 out[2] = val >> 8;
231 out[3] = val;
232}
233
234
235static void regmap_format_2_6_write(struct regmap *map,
236 unsigned int reg, unsigned int val)
237{
238 u8 *out = map->work_buf;
239
240 *out = (reg << 6) | val;
241}
242
243static void regmap_format_4_12_write(struct regmap *map,
244 unsigned int reg, unsigned int val)
245{
246 __be16 *out = map->work_buf;
247 *out = cpu_to_be16((reg << 12) | val);
248}
249
250static void regmap_format_7_9_write(struct regmap *map,
251 unsigned int reg, unsigned int val)
252{
253 __be16 *out = map->work_buf;
254 *out = cpu_to_be16((reg << 9) | val);
255}
256
257static void regmap_format_10_14_write(struct regmap *map,
258 unsigned int reg, unsigned int val)
259{
260 u8 *out = map->work_buf;
261
262 out[2] = val;
263 out[1] = (val >> 8) | (reg << 6);
264 out[0] = reg >> 2;
265}
266
267static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
268{
269 u8 *b = buf;
270
271 b[0] = val << shift;
272}
273
274static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
275{
276 put_unaligned_be16(val << shift, buf);
277}
278
279static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
280{
281 put_unaligned_le16(val << shift, buf);
282}
283
284static void regmap_format_16_native(void *buf, unsigned int val,
285 unsigned int shift)
286{
287 u16 v = val << shift;
288
289 memcpy(buf, &v, sizeof(v));
290}
291
292static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
293{
294 u8 *b = buf;
295
296 val <<= shift;
297
298 b[0] = val >> 16;
299 b[1] = val >> 8;
300 b[2] = val;
301}
302
303static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
304{
305 put_unaligned_be32(val << shift, buf);
306}
307
308static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
309{
310 put_unaligned_le32(val << shift, buf);
311}
312
313static void regmap_format_32_native(void *buf, unsigned int val,
314 unsigned int shift)
315{
316 u32 v = val << shift;
317
318 memcpy(buf, &v, sizeof(v));
319}
320
321#ifdef CONFIG_64BIT
322static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
323{
324 put_unaligned_be64((u64) val << shift, buf);
325}
326
327static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
328{
329 put_unaligned_le64((u64) val << shift, buf);
330}
331
332static void regmap_format_64_native(void *buf, unsigned int val,
333 unsigned int shift)
334{
335 u64 v = (u64) val << shift;
336
337 memcpy(buf, &v, sizeof(v));
338}
339#endif
340
341static void regmap_parse_inplace_noop(void *buf)
342{
343}
344
345static unsigned int regmap_parse_8(const void *buf)
346{
347 const u8 *b = buf;
348
349 return b[0];
350}
351
352static unsigned int regmap_parse_16_be(const void *buf)
353{
354 return get_unaligned_be16(buf);
355}
356
357static unsigned int regmap_parse_16_le(const void *buf)
358{
359 return get_unaligned_le16(buf);
360}
361
362static void regmap_parse_16_be_inplace(void *buf)
363{
364 u16 v = get_unaligned_be16(buf);
365
366 memcpy(buf, &v, sizeof(v));
367}
368
369static void regmap_parse_16_le_inplace(void *buf)
370{
371 u16 v = get_unaligned_le16(buf);
372
373 memcpy(buf, &v, sizeof(v));
374}
375
376static unsigned int regmap_parse_16_native(const void *buf)
377{
378 u16 v;
379
380 memcpy(&v, buf, sizeof(v));
381 return v;
382}
383
384static unsigned int regmap_parse_24(const void *buf)
385{
386 const u8 *b = buf;
387 unsigned int ret = b[2];
388 ret |= ((unsigned int)b[1]) << 8;
389 ret |= ((unsigned int)b[0]) << 16;
390
391 return ret;
392}
393
394static unsigned int regmap_parse_32_be(const void *buf)
395{
396 return get_unaligned_be32(buf);
397}
398
399static unsigned int regmap_parse_32_le(const void *buf)
400{
401 return get_unaligned_le32(buf);
402}
403
404static void regmap_parse_32_be_inplace(void *buf)
405{
406 u32 v = get_unaligned_be32(buf);
407
408 memcpy(buf, &v, sizeof(v));
409}
410
411static void regmap_parse_32_le_inplace(void *buf)
412{
413 u32 v = get_unaligned_le32(buf);
414
415 memcpy(buf, &v, sizeof(v));
416}
417
418static unsigned int regmap_parse_32_native(const void *buf)
419{
420 u32 v;
421
422 memcpy(&v, buf, sizeof(v));
423 return v;
424}
425
426#ifdef CONFIG_64BIT
427static unsigned int regmap_parse_64_be(const void *buf)
428{
429 return get_unaligned_be64(buf);
430}
431
432static unsigned int regmap_parse_64_le(const void *buf)
433{
434 return get_unaligned_le64(buf);
435}
436
437static void regmap_parse_64_be_inplace(void *buf)
438{
439 u64 v = get_unaligned_be64(buf);
440
441 memcpy(buf, &v, sizeof(v));
442}
443
444static void regmap_parse_64_le_inplace(void *buf)
445{
446 u64 v = get_unaligned_le64(buf);
447
448 memcpy(buf, &v, sizeof(v));
449}
450
451static unsigned int regmap_parse_64_native(const void *buf)
452{
453 u64 v;
454
455 memcpy(&v, buf, sizeof(v));
456 return v;
457}
458#endif
459
460static void regmap_lock_hwlock(void *__map)
461{
462 struct regmap *map = __map;
463
464// hwspin_lock_timeout(map->hwlock, UINT_MAX);
465}
466
467static void regmap_lock_hwlock_irq(void *__map)
468{
469 struct regmap *map = __map;
470
471// hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
472}
473
474static void regmap_lock_hwlock_irqsave(void *__map)
475{
476 struct regmap *map = __map;
477
478// hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
479// &map->spinlock_flags);
480}
481
482static void regmap_unlock_hwlock(void *__map)
483{
484 struct regmap *map = __map;
485
486// hwspin_unlock(map->hwlock);
487}
488
489static void regmap_unlock_hwlock_irq(void *__map)
490{
491 struct regmap *map = __map;
492
493// hwspin_unlock_irq(map->hwlock);
494}
495
496static void regmap_unlock_hwlock_irqrestore(void *__map)
497{
498 struct regmap *map = __map;
499
500// hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
501}
502
503static void regmap_lock_unlock_none(void *__map)
504{
505
506}
507
508static void regmap_lock_mutex(void *__map)
509{
510 struct regmap *map = __map;
511 mutex_lock(&map->mutex);
512}
513
514static void regmap_unlock_mutex(void *__map)
515{
516 struct regmap *map = __map;
517 mutex_unlock(&map->mutex);
518}
519
520static void regmap_lock_spinlock(void *__map)
521__acquires(&map->spinlock)
522{
523 struct regmap *map = __map;
524 unsigned long flags;
525
526 spin_lock_irqsave(&map->spinlock, flags);
527 map->spinlock_flags = flags;
528}
529
530static void regmap_unlock_spinlock(void *__map)
531__releases(&map->spinlock)
532{
533 struct regmap *map = __map;
534 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
535}
536
537static void dev_get_regmap_release(struct device *dev, void *res)
538{
539 /*
540 * We don't actually have anything to do here; the goal here
541 * is not to manage the regmap but to provide a simple way to
542 * get the regmap back given a struct device.
543 */
544}
545
546static bool _regmap_range_add(struct regmap *map,
547 struct regmap_range_node *data)
548{
549 struct rb_root *root = &map->range_tree;
550 struct rb_node **new = &(root->rb_node), *parent = NULL;
551
552 while (*new) {
553 struct regmap_range_node *this =
554 rb_entry(*new, struct regmap_range_node, node);
555
556 parent = *new;
557 if (data->range_max < this->range_min)
558 new = &((*new)->rb_left);
559 else if (data->range_min > this->range_max)
560 new = &((*new)->rb_right);
561 else
562 return false;
563 }
564
565 rb_link_node(&data->node, parent, new);
566 rb_insert_color(&data->node, root);
567
568 return true;
569}
570
571static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
572 unsigned int reg)
573{
574 struct rb_node *node = map->range_tree.rb_node;
575
576 while (node) {
577 struct regmap_range_node *this =
578 rb_entry(node, struct regmap_range_node, node);
579
580 if (reg < this->range_min)
581 node = node->rb_left;
582 else if (reg > this->range_max)
583 node = node->rb_right;
584 else
585 return this;
586 }
587
588 return NULL;
589}
590
591static void regmap_range_exit(struct regmap *map)
592{
593 struct rb_node *next;
594 struct regmap_range_node *range_node;
595
596 next = rb_first(&map->range_tree);
597 while (next) {
598 range_node = rb_entry(next, struct regmap_range_node, node);
599 next = rb_next(&range_node->node);
600 rb_erase(&range_node->node, &map->range_tree);
601 kfree(range_node);
602 }
603
604 kfree(map->selector_work_buf);
605}
606
607static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
608{
609 if (config->name) {
610#ifndef TARGET_OS2
611 const char *name = kstrdup_const(config->name, GFP_KERNEL);
612#else
613 const char *name = config->name;
614#endif
615
616 if (!name)
617 return -ENOMEM;
618
619#ifndef TARGET_OS2
620 kfree_const(map->name);
621#else
622 kfree(map->name);
623#endif
624 map->name = name;
625 }
626
627 return 0;
628}
629
630int regmap_attach_dev(struct device *dev, struct regmap *map,
631 const struct regmap_config *config)
632{
633 struct regmap **m;
634 int ret;
635
636 map->dev = dev;
637
638 ret = regmap_set_name(map, config);
639 if (ret)
640 return ret;
641
642 regmap_debugfs_init(map);
643
644 /* Add a devres resource for dev_get_regmap() */
645 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
646 if (!m) {
647 regmap_debugfs_exit(map);
648 return -ENOMEM;
649 }
650 *m = map;
651 devres_add(dev, m);
652
653 return 0;
654}
655EXPORT_SYMBOL_GPL(regmap_attach_dev);
656
657static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
658 const struct regmap_config *config)
659{
660 enum regmap_endian endian;
661
662 /* Retrieve the endianness specification from the regmap config */
663 endian = config->reg_format_endian;
664
665 /* If the regmap config specified a non-default value, use that */
666 if (endian != REGMAP_ENDIAN_DEFAULT)
667 return endian;
668
669 /* Retrieve the endianness specification from the bus config */
670 if (bus && bus->reg_format_endian_default)
671 endian = bus->reg_format_endian_default;
672
673 /* If the bus specified a non-default value, use that */
674 if (endian != REGMAP_ENDIAN_DEFAULT)
675 return endian;
676
677 /* Use this if no other value was found */
678 return REGMAP_ENDIAN_BIG;
679}
680
681enum regmap_endian regmap_get_val_endian(struct device *dev,
682 const struct regmap_bus *bus,
683 const struct regmap_config *config)
684{
685#ifndef TARGET_OS2
686 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
687#else
688 struct fwnode_handle *fwnode = NULL;
689#endif
690 enum regmap_endian endian;
691
692 /* Retrieve the endianness specification from the regmap config */
693 endian = config->val_format_endian;
694
695 /* If the regmap config specified a non-default value, use that */
696 if (endian != REGMAP_ENDIAN_DEFAULT)
697 return endian;
698
699#ifndef TARGET_OS2
700 /* If the firmware node exist try to get endianness from it */
701 if (fwnode_property_read_bool(fwnode, "big-endian"))
702 endian = REGMAP_ENDIAN_BIG;
703 else if (fwnode_property_read_bool(fwnode, "little-endian"))
704 endian = REGMAP_ENDIAN_LITTLE;
705 else if (fwnode_property_read_bool(fwnode, "native-endian"))
706 endian = REGMAP_ENDIAN_NATIVE;
707#endif
708 /* If the endianness was specified in fwnode, use that */
709 if (endian != REGMAP_ENDIAN_DEFAULT)
710 return endian;
711
712 /* Retrieve the endianness specification from the bus config */
713 if (bus && bus->val_format_endian_default)
714 endian = bus->val_format_endian_default;
715
716 /* If the bus specified a non-default value, use that */
717 if (endian != REGMAP_ENDIAN_DEFAULT)
718 return endian;
719
720 /* Use this if no other value was found */
721 return REGMAP_ENDIAN_BIG;
722}
723EXPORT_SYMBOL_GPL(regmap_get_val_endian);
724
725struct regmap *__regmap_init(struct device *dev,
726 const struct regmap_bus *bus,
727 void *bus_context,
728 const struct regmap_config *config,
729 struct lock_class_key *lock_key,
730 const char *lock_name)
731{
732 struct regmap *map;
733 int ret = -EINVAL;
734 enum regmap_endian reg_endian, val_endian;
735 int i, j;
736#ifdef TARGET_OS2
737 // 2020-11-17 SHL FIXME patched struct rb_root
738 struct rb_root _RB_ROOT = { NULL, };
739#endif
740
741 if (!config)
742 goto err;
743
744 map = kzalloc(sizeof(*map), GFP_KERNEL);
745 if (map == NULL) {
746 ret = -ENOMEM;
747 goto err;
748 }
749
750 ret = regmap_set_name(map, config);
751 if (ret)
752 goto err_map;
753
754 ret = -EINVAL; /* Later error paths rely on this */
755
756 if (config->disable_locking) {
757 map->lock = map->unlock = regmap_lock_unlock_none;
758 map->can_sleep = config->can_sleep;
759 regmap_debugfs_disable(map);
760 } else if (config->lock && config->unlock) {
761 map->lock = config->lock;
762 map->unlock = config->unlock;
763 map->lock_arg = config->lock_arg;
764 map->can_sleep = config->can_sleep;
765 } else if (config->use_hwlock) {
766#ifndef TARGET_OS2
767 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
768 if (!map->hwlock) {
769 ret = -ENXIO;
770 goto err_name;
771 }
772#endif
773 switch (config->hwlock_mode) {
774 case HWLOCK_IRQSTATE:
775 map->lock = regmap_lock_hwlock_irqsave;
776 map->unlock = regmap_unlock_hwlock_irqrestore;
777 break;
778 case HWLOCK_IRQ:
779 map->lock = regmap_lock_hwlock_irq;
780 map->unlock = regmap_unlock_hwlock_irq;
781 break;
782 default:
783 map->lock = regmap_lock_hwlock;
784 map->unlock = regmap_unlock_hwlock;
785 break;
786 }
787
788 map->lock_arg = map;
789 } else {
790 if ((bus && bus->fast_io) ||
791 config->fast_io) {
792 spin_lock_init(&map->spinlock);
793 map->lock = regmap_lock_spinlock;
794 map->unlock = regmap_unlock_spinlock;
795 lockdep_set_class_and_name(&map->spinlock,
796 lock_key, lock_name);
797 } else {
798 mutex_init(&map->mutex);
799 map->lock = regmap_lock_mutex;
800 map->unlock = regmap_unlock_mutex;
801 map->can_sleep = true;
802 lockdep_set_class_and_name(&map->mutex,
803 lock_key, lock_name);
804 }
805 map->lock_arg = map;
806 }
807
808 /*
809 * When we write in fast-paths with regmap_bulk_write() don't allocate
810 * scratch buffers with sleeping allocations.
811 */
812 if ((bus && bus->fast_io) || config->fast_io)
813 map->alloc_flags = GFP_ATOMIC;
814 else
815 map->alloc_flags = GFP_KERNEL;
816
817 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
818 map->format.pad_bytes = config->pad_bits / 8;
819 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
820 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
821 config->val_bits + config->pad_bits, 8);
822 map->reg_shift = config->pad_bits % 8;
823 if (config->reg_stride)
824 map->reg_stride = config->reg_stride;
825 else
826 map->reg_stride = 1;
827 if (is_power_of_2(map->reg_stride))
828 map->reg_stride_order = ilog2(map->reg_stride);
829 else
830 map->reg_stride_order = -1;
831 map->use_single_read = config->use_single_read || !bus || !bus->read;
832 map->use_single_write = config->use_single_write || !bus || !bus->write;
833 map->can_multi_write = config->can_multi_write && bus && bus->write;
834 if (bus) {
835 map->max_raw_read = bus->max_raw_read;
836 map->max_raw_write = bus->max_raw_write;
837 }
838 map->dev = dev;
839 map->bus = bus;
840 map->bus_context = bus_context;
841 map->max_register = config->max_register;
842 map->wr_table = config->wr_table;
843 map->rd_table = config->rd_table;
844 map->volatile_table = config->volatile_table;
845 map->precious_table = config->precious_table;
846 map->wr_noinc_table = config->wr_noinc_table;
847 map->rd_noinc_table = config->rd_noinc_table;
848 map->writeable_reg = config->writeable_reg;
849 map->readable_reg = config->readable_reg;
850 map->volatile_reg = config->volatile_reg;
851 map->precious_reg = config->precious_reg;
852 map->writeable_noinc_reg = config->writeable_noinc_reg;
853 map->readable_noinc_reg = config->readable_noinc_reg;
854 map->cache_type = config->cache_type;
855
856 spin_lock_init(&map->async_lock);
857 INIT_LIST_HEAD(&map->async_list);
858 INIT_LIST_HEAD(&map->async_free);
859 init_waitqueue_head(&map->async_waitq);
860
861 if (config->read_flag_mask ||
862 config->write_flag_mask ||
863 config->zero_flag_mask) {
864 map->read_flag_mask = config->read_flag_mask;
865 map->write_flag_mask = config->write_flag_mask;
866 } else if (bus) {
867 map->read_flag_mask = bus->read_flag_mask;
868 }
869
870 if (!bus) {
871 map->reg_read = config->reg_read;
872 map->reg_write = config->reg_write;
873
874 map->defer_caching = false;
875 goto skip_format_initialization;
876 } else if (!bus->read || !bus->write) {
877 map->reg_read = _regmap_bus_reg_read;
878 map->reg_write = _regmap_bus_reg_write;
879 map->reg_update_bits = bus->reg_update_bits;
880
881 map->defer_caching = false;
882 goto skip_format_initialization;
883 } else {
884 map->reg_read = _regmap_bus_read;
885 map->reg_update_bits = bus->reg_update_bits;
886 }
887
888 reg_endian = regmap_get_reg_endian(bus, config);
889 val_endian = regmap_get_val_endian(dev, bus, config);
890
891 switch (config->reg_bits + map->reg_shift) {
892 case 2:
893 switch (config->val_bits) {
894 case 6:
895 map->format.format_write = regmap_format_2_6_write;
896 break;
897 default:
898 goto err_hwlock;
899 }
900 break;
901
902 case 4:
903 switch (config->val_bits) {
904 case 12:
905 map->format.format_write = regmap_format_4_12_write;
906 break;
907 default:
908 goto err_hwlock;
909 }
910 break;
911
912 case 7:
913 switch (config->val_bits) {
914 case 9:
915 map->format.format_write = regmap_format_7_9_write;
916 break;
917 default:
918 goto err_hwlock;
919 }
920 break;
921
922 case 10:
923 switch (config->val_bits) {
924 case 14:
925 map->format.format_write = regmap_format_10_14_write;
926 break;
927 default:
928 goto err_hwlock;
929 }
930 break;
931
932 case 12:
933 switch (config->val_bits) {
934 case 20:
935 map->format.format_write = regmap_format_12_20_write;
936 break;
937 default:
938 goto err_hwlock;
939 }
940 break;
941
942 case 8:
943 map->format.format_reg = regmap_format_8;
944 break;
945
946 case 16:
947 switch (reg_endian) {
948 case REGMAP_ENDIAN_BIG:
949 map->format.format_reg = regmap_format_16_be;
950 break;
951 case REGMAP_ENDIAN_LITTLE:
952 map->format.format_reg = regmap_format_16_le;
953 break;
954 case REGMAP_ENDIAN_NATIVE:
955 map->format.format_reg = regmap_format_16_native;
956 break;
957 default:
958 goto err_hwlock;
959 }
960 break;
961
962 case 24:
963 if (reg_endian != REGMAP_ENDIAN_BIG)
964 goto err_hwlock;
965 map->format.format_reg = regmap_format_24;
966 break;
967
968 case 32:
969 switch (reg_endian) {
970 case REGMAP_ENDIAN_BIG:
971 map->format.format_reg = regmap_format_32_be;
972 break;
973 case REGMAP_ENDIAN_LITTLE:
974 map->format.format_reg = regmap_format_32_le;
975 break;
976 case REGMAP_ENDIAN_NATIVE:
977 map->format.format_reg = regmap_format_32_native;
978 break;
979 default:
980 goto err_hwlock;
981 }
982 break;
983
984#ifdef CONFIG_64BIT
985 case 64:
986 switch (reg_endian) {
987 case REGMAP_ENDIAN_BIG:
988 map->format.format_reg = regmap_format_64_be;
989 break;
990 case REGMAP_ENDIAN_LITTLE:
991 map->format.format_reg = regmap_format_64_le;
992 break;
993 case REGMAP_ENDIAN_NATIVE:
994 map->format.format_reg = regmap_format_64_native;
995 break;
996 default:
997 goto err_hwlock;
998 }
999 break;
1000#endif
1001
1002 default:
1003 goto err_hwlock;
1004 }
1005
1006 if (val_endian == REGMAP_ENDIAN_NATIVE)
1007 map->format.parse_inplace = regmap_parse_inplace_noop;
1008
1009 switch (config->val_bits) {
1010 case 8:
1011 map->format.format_val = regmap_format_8;
1012 map->format.parse_val = regmap_parse_8;
1013 map->format.parse_inplace = regmap_parse_inplace_noop;
1014 break;
1015 case 16:
1016 switch (val_endian) {
1017 case REGMAP_ENDIAN_BIG:
1018 map->format.format_val = regmap_format_16_be;
1019 map->format.parse_val = regmap_parse_16_be;
1020 map->format.parse_inplace = regmap_parse_16_be_inplace;
1021 break;
1022 case REGMAP_ENDIAN_LITTLE:
1023 map->format.format_val = regmap_format_16_le;
1024 map->format.parse_val = regmap_parse_16_le;
1025 map->format.parse_inplace = regmap_parse_16_le_inplace;
1026 break;
1027 case REGMAP_ENDIAN_NATIVE:
1028 map->format.format_val = regmap_format_16_native;
1029 map->format.parse_val = regmap_parse_16_native;
1030 break;
1031 default:
1032 goto err_hwlock;
1033 }
1034 break;
1035 case 24:
1036 if (val_endian != REGMAP_ENDIAN_BIG)
1037 goto err_hwlock;
1038 map->format.format_val = regmap_format_24;
1039 map->format.parse_val = regmap_parse_24;
1040 break;
1041 case 32:
1042 switch (val_endian) {
1043 case REGMAP_ENDIAN_BIG:
1044 map->format.format_val = regmap_format_32_be;
1045 map->format.parse_val = regmap_parse_32_be;
1046 map->format.parse_inplace = regmap_parse_32_be_inplace;
1047 break;
1048 case REGMAP_ENDIAN_LITTLE:
1049 map->format.format_val = regmap_format_32_le;
1050 map->format.parse_val = regmap_parse_32_le;
1051 map->format.parse_inplace = regmap_parse_32_le_inplace;
1052 break;
1053 case REGMAP_ENDIAN_NATIVE:
1054 map->format.format_val = regmap_format_32_native;
1055 map->format.parse_val = regmap_parse_32_native;
1056 break;
1057 default:
1058 goto err_hwlock;
1059 }
1060 break;
1061#ifdef CONFIG_64BIT
1062 case 64:
1063 switch (val_endian) {
1064 case REGMAP_ENDIAN_BIG:
1065 map->format.format_val = regmap_format_64_be;
1066 map->format.parse_val = regmap_parse_64_be;
1067 map->format.parse_inplace = regmap_parse_64_be_inplace;
1068 break;
1069 case REGMAP_ENDIAN_LITTLE:
1070 map->format.format_val = regmap_format_64_le;
1071 map->format.parse_val = regmap_parse_64_le;
1072 map->format.parse_inplace = regmap_parse_64_le_inplace;
1073 break;
1074 case REGMAP_ENDIAN_NATIVE:
1075 map->format.format_val = regmap_format_64_native;
1076 map->format.parse_val = regmap_parse_64_native;
1077 break;
1078 default:
1079 goto err_hwlock;
1080 }
1081 break;
1082#endif
1083 }
1084
1085 if (map->format.format_write) {
1086 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1087 (val_endian != REGMAP_ENDIAN_BIG))
1088 goto err_hwlock;
1089 map->use_single_write = true;
1090 }
1091
1092 if (!map->format.format_write &&
1093 !(map->format.format_reg && map->format.format_val))
1094 goto err_hwlock;
1095
1096 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1097 if (map->work_buf == NULL) {
1098 ret = -ENOMEM;
1099 goto err_hwlock;
1100 }
1101
1102 if (map->format.format_write) {
1103 map->defer_caching = false;
1104 map->reg_write = _regmap_bus_formatted_write;
1105 } else if (map->format.format_val) {
1106 map->defer_caching = true;
1107 map->reg_write = _regmap_bus_raw_write;
1108 }
1109
1110skip_format_initialization:
1111
1112#ifndef TARGET_OS2
1113 map->range_tree = RB_ROOT;
1114#else
1115 map->range_tree = _RB_ROOT;
1116 map->range_tree.rb_node = NULL;
1117 memset(&map->range_tree, 0, sizeof(struct rb_root));
1118#endif
1119 for (i = 0; i < config->num_ranges; i++) {
1120 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1121 struct regmap_range_node *new;
1122
1123 /* Sanity check */
1124 if (range_cfg->range_max < range_cfg->range_min) {
1125 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1126 range_cfg->range_max, range_cfg->range_min);
1127 goto err_range;
1128 }
1129
1130 if (range_cfg->range_max > map->max_register) {
1131 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1132 range_cfg->range_max, map->max_register);
1133 goto err_range;
1134 }
1135
1136 if (range_cfg->selector_reg > map->max_register) {
1137 dev_err(map->dev,
1138 "Invalid range %d: selector out of map\n", i);
1139 goto err_range;
1140 }
1141
1142 if (range_cfg->window_len == 0) {
1143 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1144 i);
1145 goto err_range;
1146 }
1147
1148 /* Make sure, that this register range has no selector
1149 or data window within its boundary */
1150 for (j = 0; j < config->num_ranges; j++) {
1151 unsigned sel_reg = config->ranges[j].selector_reg;
1152 unsigned win_min = config->ranges[j].window_start;
1153 unsigned win_max = win_min +
1154 config->ranges[j].window_len - 1;
1155
1156 /* Allow data window inside its own virtual range */
1157 if (j == i)
1158 continue;
1159
1160 if (range_cfg->range_min <= sel_reg &&
1161 sel_reg <= range_cfg->range_max) {
1162 dev_err(map->dev,
1163 "Range %d: selector for %d in window\n",
1164 i, j);
1165 goto err_range;
1166 }
1167
1168 if (!(win_max < range_cfg->range_min ||
1169 win_min > range_cfg->range_max)) {
1170 dev_err(map->dev,
1171 "Range %d: window for %d in window\n",
1172 i, j);
1173 goto err_range;
1174 }
1175 }
1176
1177 new = kzalloc(sizeof(*new), GFP_KERNEL);
1178 if (new == NULL) {
1179 ret = -ENOMEM;
1180 goto err_range;
1181 }
1182
1183 new->map = map;
1184 new->name = range_cfg->name;
1185 new->range_min = range_cfg->range_min;
1186 new->range_max = range_cfg->range_max;
1187 new->selector_reg = range_cfg->selector_reg;
1188 new->selector_mask = range_cfg->selector_mask;
1189 new->selector_shift = range_cfg->selector_shift;
1190 new->window_start = range_cfg->window_start;
1191 new->window_len = range_cfg->window_len;
1192
1193 if (!_regmap_range_add(map, new)) {
1194 dev_err(map->dev, "Failed to add range %d\n", i);
1195 kfree(new);
1196 goto err_range;
1197 }
1198
1199 if (map->selector_work_buf == NULL) {
1200 map->selector_work_buf =
1201 kzalloc(map->format.buf_size, GFP_KERNEL);
1202 if (map->selector_work_buf == NULL) {
1203 ret = -ENOMEM;
1204 goto err_range;
1205 }
1206 }
1207 }
1208
1209 ret = regcache_init(map, config);
1210 if (ret != 0)
1211 goto err_range;
1212
1213 if (dev) {
1214 ret = regmap_attach_dev(dev, map, config);
1215 if (ret != 0)
1216 goto err_regcache;
1217 } else {
1218 regmap_debugfs_init(map);
1219 }
1220
1221 return map;
1222
1223err_regcache:
1224 regcache_exit(map);
1225err_range:
1226 regmap_range_exit(map);
1227 kfree(map->work_buf);
1228err_hwlock:
1229#ifndef TARGET_OS2
1230 if (map->hwlock)
1231 hwspin_lock_free(map->hwlock);
1232#endif
1233err_name:
1234#ifndef TARGET_OS2
1235 kfree_const(map->name);
1236#else
1237 kfree(map->name);
1238#endif
1239err_map:
1240 kfree(map);
1241err:
1242 return ERR_PTR(ret);
1243}
1244EXPORT_SYMBOL_GPL(__regmap_init);
1245
1246#ifndef TARGET_OS2
1247static void devm_regmap_release(struct device *dev, void *res)
1248{
1249 regmap_exit(*(struct regmap **)res);
1250}
1251
1252struct regmap *__devm_regmap_init(struct device *dev,
1253 const struct regmap_bus *bus,
1254 void *bus_context,
1255 const struct regmap_config *config,
1256 struct lock_class_key *lock_key,
1257 const char *lock_name)
1258{
1259 struct regmap **ptr, *regmap;
1260
1261 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1262 if (!ptr)
1263 return ERR_PTR(-ENOMEM);
1264
1265 regmap = __regmap_init(dev, bus, bus_context, config,
1266 lock_key, lock_name);
1267 if (!IS_ERR(regmap)) {
1268 *ptr = regmap;
1269 devres_add(dev, ptr);
1270 } else {
1271 devres_free(ptr);
1272 }
1273
1274 return regmap;
1275}
1276EXPORT_SYMBOL_GPL(__devm_regmap_init);
1277#endif
1278
1279static void regmap_field_init(struct regmap_field *rm_field,
1280 struct regmap *regmap, struct reg_field reg_field)
1281{
1282 rm_field->regmap = regmap;
1283 rm_field->reg = reg_field.reg;
1284 rm_field->shift = reg_field.lsb;
1285 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1286 rm_field->id_size = reg_field.id_size;
1287 rm_field->id_offset = reg_field.id_offset;
1288}
1289
1290#ifndef TARGET_OS2
1291/**
1292 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1293 *
1294 * @dev: Device that will be interacted with
1295 * @regmap: regmap bank in which this register field is located.
1296 * @reg_field: Register field with in the bank.
1297 *
1298 * The return value will be an ERR_PTR() on error or a valid pointer
1299 * to a struct regmap_field. The regmap_field will be automatically freed
1300 * by the device management code.
1301 */
1302struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1303 struct regmap *regmap, struct reg_field reg_field)
1304{
1305 struct regmap_field *rm_field = devm_kzalloc(dev,
1306 sizeof(*rm_field), GFP_KERNEL);
1307 if (!rm_field)
1308 return ERR_PTR(-ENOMEM);
1309
1310 regmap_field_init(rm_field, regmap, reg_field);
1311
1312 return rm_field;
1313
1314}
1315EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1316#endif
1317
1318/**
1319 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1320 *
1321 * @regmap: regmap bank in which this register field is located.
1322 * @rm_field: regmap register fields within the bank.
1323 * @reg_field: Register fields within the bank.
1324 * @num_fields: Number of register fields.
1325 *
1326 * The return value will be an -ENOMEM on error or zero for success.
1327 * Newly allocated regmap_fields should be freed by calling
1328 * regmap_field_bulk_free()
1329 */
1330int regmap_field_bulk_alloc(struct regmap *regmap,
1331 struct regmap_field **rm_field,
1332 struct reg_field *reg_field,
1333 int num_fields)
1334{
1335 struct regmap_field *rf;
1336 int i;
1337
1338 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1339 if (!rf)
1340 return -ENOMEM;
1341
1342 for (i = 0; i < num_fields; i++) {
1343 regmap_field_init(&rf[i], regmap, reg_field[i]);
1344 rm_field[i] = &rf[i];
1345 }
1346
1347 return 0;
1348}
1349EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1350
1351#ifndef TARGET_OS2
1352/**
1353 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1354 * fields.
1355 *
1356 * @dev: Device that will be interacted with
1357 * @regmap: regmap bank in which this register field is located.
1358 * @rm_field: regmap register fields within the bank.
1359 * @reg_field: Register fields within the bank.
1360 * @num_fields: Number of register fields.
1361 *
1362 * The return value will be an -ENOMEM on error or zero for success.
1363 * Newly allocated regmap_fields will be automatically freed by the
1364 * device management code.
1365 */
1366int devm_regmap_field_bulk_alloc(struct device *dev,
1367 struct regmap *regmap,
1368 struct regmap_field **rm_field,
1369 struct reg_field *reg_field,
1370 int num_fields)
1371{
1372 struct regmap_field *rf;
1373 int i;
1374
1375 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1376 if (!rf)
1377 return -ENOMEM;
1378
1379 for (i = 0; i < num_fields; i++) {
1380 regmap_field_init(&rf[i], regmap, reg_field[i]);
1381 rm_field[i] = &rf[i];
1382 }
1383
1384 return 0;
1385}
1386EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1387#endif
1388
1389/**
1390 * regmap_field_bulk_free() - Free register field allocated using
1391 * regmap_field_bulk_alloc.
1392 *
1393 * @field: regmap fields which should be freed.
1394 */
1395void regmap_field_bulk_free(struct regmap_field *field)
1396{
1397 kfree(field);
1398}
1399EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1400
1401#ifndef TARGET_OS2
1402/**
1403 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1404 * devm_regmap_field_bulk_alloc.
1405 *
1406 * @dev: Device that will be interacted with
1407 * @field: regmap field which should be freed.
1408 *
1409 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1410 * drivers need not call this function, as the memory allocated via devm
1411 * will be freed as per device-driver life-cycle.
1412 */
1413void devm_regmap_field_bulk_free(struct device *dev,
1414 struct regmap_field *field)
1415{
1416 devm_kfree(dev, field);
1417}
1418EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1419
1420/**
1421 * devm_regmap_field_free() - Free a register field allocated using
1422 * devm_regmap_field_alloc.
1423 *
1424 * @dev: Device that will be interacted with
1425 * @field: regmap field which should be freed.
1426 *
1427 * Free register field allocated using devm_regmap_field_alloc(). Usually
1428 * drivers need not call this function, as the memory allocated via devm
1429 * will be freed as per device-driver life-cyle.
1430 */
1431void devm_regmap_field_free(struct device *dev,
1432 struct regmap_field *field)
1433{
1434 devm_kfree(dev, field);
1435}
1436EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1437#endif
1438
1439/**
1440 * regmap_field_alloc() - Allocate and initialise a register field.
1441 *
1442 * @regmap: regmap bank in which this register field is located.
1443 * @reg_field: Register field with in the bank.
1444 *
1445 * The return value will be an ERR_PTR() on error or a valid pointer
1446 * to a struct regmap_field. The regmap_field should be freed by the
1447 * user once its finished working with it using regmap_field_free().
1448 */
1449struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1450 struct reg_field reg_field)
1451{
1452 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1453
1454 if (!rm_field)
1455 return ERR_PTR(-ENOMEM);
1456
1457 regmap_field_init(rm_field, regmap, reg_field);
1458
1459 return rm_field;
1460}
1461EXPORT_SYMBOL_GPL(regmap_field_alloc);
1462
1463/**
1464 * regmap_field_free() - Free register field allocated using
1465 * regmap_field_alloc.
1466 *
1467 * @field: regmap field which should be freed.
1468 */
1469void regmap_field_free(struct regmap_field *field)
1470{
1471 kfree(field);
1472}
1473EXPORT_SYMBOL_GPL(regmap_field_free);
1474
1475/**
1476 * regmap_reinit_cache() - Reinitialise the current register cache
1477 *
1478 * @map: Register map to operate on.
1479 * @config: New configuration. Only the cache data will be used.
1480 *
1481 * Discard any existing register cache for the map and initialize a
1482 * new cache. This can be used to restore the cache to defaults or to
1483 * update the cache configuration to reflect runtime discovery of the
1484 * hardware.
1485 *
1486 * No explicit locking is done here, the user needs to ensure that
1487 * this function will not race with other calls to regmap.
1488 */
1489int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1490{
1491 int ret;
1492
1493 regcache_exit(map);
1494 regmap_debugfs_exit(map);
1495
1496 map->max_register = config->max_register;
1497 map->writeable_reg = config->writeable_reg;
1498 map->readable_reg = config->readable_reg;
1499 map->volatile_reg = config->volatile_reg;
1500 map->precious_reg = config->precious_reg;
1501 map->writeable_noinc_reg = config->writeable_noinc_reg;
1502 map->readable_noinc_reg = config->readable_noinc_reg;
1503 map->cache_type = config->cache_type;
1504
1505 ret = regmap_set_name(map, config);
1506 if (ret)
1507 return ret;
1508
1509 regmap_debugfs_init(map);
1510
1511 map->cache_bypass = false;
1512 map->cache_only = false;
1513
1514 return regcache_init(map, config);
1515}
1516EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1517
1518/**
1519 * regmap_exit() - Free a previously allocated register map
1520 *
1521 * @map: Register map to operate on.
1522 */
1523void regmap_exit(struct regmap *map)
1524{
1525 struct regmap_async *async;
1526
1527 regcache_exit(map);
1528 regmap_debugfs_exit(map);
1529 regmap_range_exit(map);
1530 if (map->bus && map->bus->free_context)
1531 map->bus->free_context(map->bus_context);
1532 kfree(map->work_buf);
1533 while (!list_empty(&map->async_free)) {
1534 async = list_first_entry_or_null(&map->async_free,
1535 struct regmap_async,
1536 list);
1537 list_del(&async->list);
1538 kfree(async->work_buf);
1539 kfree(async);
1540 }
1541#ifndef TARGET_OS2
1542 if (map->hwlock)
1543 hwspin_lock_free(map->hwlock);
1544#endif
1545 if (map->lock == regmap_lock_mutex)
1546 mutex_destroy(&map->mutex);
1547#ifndef TARGET_OS2
1548 kfree_const(map->name);
1549#else
1550 kfree(map->name);
1551#endif
1552 kfree(map->patch);
1553 kfree(map);
1554}
1555EXPORT_SYMBOL_GPL(regmap_exit);
1556
1557static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1558{
1559 struct regmap **r = res;
1560 if (!r || !*r) {
1561 WARN_ON(!r || !*r);
1562 return 0;
1563 }
1564
1565 /* If the user didn't specify a name match any */
1566 if (data)
1567 return !strcmp((*r)->name, data);
1568 else
1569 return 1;
1570}
1571
1572/**
1573 * dev_get_regmap() - Obtain the regmap (if any) for a device
1574 *
1575 * @dev: Device to retrieve the map for
1576 * @name: Optional name for the register map, usually NULL.
1577 *
1578 * Returns the regmap for the device if one is present, or NULL. If
1579 * name is specified then it must match the name specified when
1580 * registering the device, if it is NULL then the first regmap found
1581 * will be used. Devices with multiple register maps are very rare,
1582 * generic code should normally not need to specify a name.
1583 */
1584struct regmap *dev_get_regmap(struct device *dev, const char *name)
1585{
1586 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1587 dev_get_regmap_match, (void *)name);
1588
1589 if (!r)
1590 return NULL;
1591 return *r;
1592}
1593EXPORT_SYMBOL_GPL(dev_get_regmap);
1594
1595/**
1596 * regmap_get_device() - Obtain the device from a regmap
1597 *
1598 * @map: Register map to operate on.
1599 *
1600 * Returns the underlying device that the regmap has been created for.
1601 */
1602struct device *regmap_get_device(struct regmap *map)
1603{
1604 return map->dev;
1605}
1606EXPORT_SYMBOL_GPL(regmap_get_device);
1607
1608static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1609 struct regmap_range_node *range,
1610 unsigned int val_num)
1611{
1612 void *orig_work_buf;
1613 unsigned int win_offset;
1614 unsigned int win_page;
1615 bool page_chg;
1616 int ret;
1617
1618 win_offset = (*reg - range->range_min) % range->window_len;
1619 win_page = (*reg - range->range_min) / range->window_len;
1620
1621 if (val_num > 1) {
1622 /* Bulk write shouldn't cross range boundary */
1623 if (*reg + val_num - 1 > range->range_max)
1624 return -EINVAL;
1625
1626 /* ... or single page boundary */
1627 if (val_num > range->window_len - win_offset)
1628 return -EINVAL;
1629 }
1630
1631 /* It is possible to have selector register inside data window.
1632 In that case, selector register is located on every page and
1633 it needs no page switching, when accessed alone. */
1634 if (val_num > 1 ||
1635 range->window_start + win_offset != range->selector_reg) {
1636 /* Use separate work_buf during page switching */
1637 orig_work_buf = map->work_buf;
1638 map->work_buf = map->selector_work_buf;
1639
1640 ret = _regmap_update_bits(map, range->selector_reg,
1641 range->selector_mask,
1642 win_page << range->selector_shift,
1643 &page_chg, false);
1644
1645 map->work_buf = orig_work_buf;
1646
1647 if (ret != 0)
1648 return ret;
1649 }
1650
1651 *reg = range->window_start + win_offset;
1652
1653 return 0;
1654}
1655
1656static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1657 unsigned long mask)
1658{
1659 u8 *buf;
1660 int i;
1661
1662 if (!mask || !map->work_buf)
1663 return;
1664
1665 buf = map->work_buf;
1666
1667 for (i = 0; i < max_bytes; i++)
1668 buf[i] |= (mask >> (8 * i)) & 0xff;
1669}
1670
1671static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1672 const void *val, size_t val_len, bool noinc)
1673{
1674 struct regmap_range_node *range;
1675 unsigned long flags;
1676 void *work_val = map->work_buf + map->format.reg_bytes +
1677 map->format.pad_bytes;
1678 void *buf;
1679 int ret = -ENOTSUPP;
1680 size_t len;
1681 int i;
1682
1683 WARN_ON(!map->bus);
1684
1685 /* Check for unwritable or noinc registers in range
1686 * before we start
1687 */
1688 if (!regmap_writeable_noinc(map, reg)) {
1689 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1690 unsigned int element =
1691 reg + regmap_get_offset(map, i);
1692 if (!regmap_writeable(map, element) ||
1693 regmap_writeable_noinc(map, element))
1694 return -EINVAL;
1695 }
1696 }
1697
1698 if (!map->cache_bypass && map->format.parse_val) {
1699 unsigned int ival;
1700 int val_bytes = map->format.val_bytes;
1701 for (i = 0; i < val_len / val_bytes; i++) {
1702 ival = map->format.parse_val(val + (i * val_bytes));
1703 ret = regcache_write(map,
1704 reg + regmap_get_offset(map, i),
1705 ival);
1706 if (ret) {
1707 dev_err(map->dev,
1708 "Error in caching of register: %x ret: %d\n",
1709 reg + i, ret);
1710 return ret;
1711 }
1712 }
1713 if (map->cache_only) {
1714 map->cache_dirty = true;
1715 return 0;
1716 }
1717 }
1718
1719 range = _regmap_range_lookup(map, reg);
1720 if (range) {
1721 int val_num = val_len / map->format.val_bytes;
1722 int win_offset = (reg - range->range_min) % range->window_len;
1723 int win_residue = range->window_len - win_offset;
1724
1725 /* If the write goes beyond the end of the window split it */
1726 while (val_num > win_residue) {
1727 dev_dbg(map->dev, "Writing window %d/%zu\n",
1728 win_residue, val_len / map->format.val_bytes);
1729 ret = _regmap_raw_write_impl(map, reg, val,
1730 win_residue *
1731 map->format.val_bytes, noinc);
1732 if (ret != 0)
1733 return ret;
1734
1735 reg += win_residue;
1736 val_num -= win_residue;
1737 val += win_residue * map->format.val_bytes;
1738 val_len -= win_residue * map->format.val_bytes;
1739
1740 win_offset = (reg - range->range_min) %
1741 range->window_len;
1742 win_residue = range->window_len - win_offset;
1743 }
1744
1745 ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1746 if (ret != 0)
1747 return ret;
1748 }
1749
1750 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1751 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1752 map->write_flag_mask);
1753
1754 /*
1755 * Essentially all I/O mechanisms will be faster with a single
1756 * buffer to write. Since register syncs often generate raw
1757 * writes of single registers optimise that case.
1758 */
1759 if (val != work_val && val_len == map->format.val_bytes) {
1760 memcpy(work_val, val, map->format.val_bytes);
1761 val = work_val;
1762 }
1763
1764 if (map->async && map->bus->async_write) {
1765 struct regmap_async *async;
1766
1767 spin_lock_irqsave(&map->async_lock, flags);
1768 async = list_first_entry_or_null(&map->async_free,
1769 struct regmap_async,
1770 list);
1771 if (async)
1772 list_del(&async->list);
1773 spin_unlock_irqrestore(&map->async_lock, flags);
1774
1775 if (!async) {
1776 async = map->bus->async_alloc();
1777 if (!async)
1778 return -ENOMEM;
1779
1780 async->work_buf = kzalloc(map->format.buf_size,
1781 GFP_KERNEL | GFP_DMA);
1782 if (!async->work_buf) {
1783 kfree(async);
1784 return -ENOMEM;
1785 }
1786 }
1787
1788 async->map = map;
1789
1790 /* If the caller supplied the value we can use it safely. */
1791 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1792 map->format.reg_bytes + map->format.val_bytes);
1793
1794 spin_lock_irqsave(&map->async_lock, flags);
1795 list_add_tail(&async->list, &map->async_list);
1796 spin_unlock_irqrestore(&map->async_lock, flags);
1797
1798 if (val != work_val)
1799 ret = map->bus->async_write(map->bus_context,
1800 async->work_buf,
1801 map->format.reg_bytes +
1802 map->format.pad_bytes,
1803 val, val_len, async);
1804 else
1805 ret = map->bus->async_write(map->bus_context,
1806 async->work_buf,
1807 map->format.reg_bytes +
1808 map->format.pad_bytes +
1809 val_len, NULL, 0, async);
1810
1811 if (ret != 0) {
1812 dev_err(map->dev, "Failed to schedule write: %d\n",
1813 ret);
1814
1815 spin_lock_irqsave(&map->async_lock, flags);
1816 list_move(&async->list, &map->async_free);
1817 spin_unlock_irqrestore(&map->async_lock, flags);
1818 }
1819
1820 return ret;
1821 }
1822
1823 /* If we're doing a single register write we can probably just
1824 * send the work_buf directly, otherwise try to do a gather
1825 * write.
1826 */
1827 if (val == work_val)
1828 ret = map->bus->write(map->bus_context, map->work_buf,
1829 map->format.reg_bytes +
1830 map->format.pad_bytes +
1831 val_len);
1832 else if (map->bus->gather_write)
1833 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1834 map->format.reg_bytes +
1835 map->format.pad_bytes,
1836 val, val_len);
1837 else
1838 ret = -ENOTSUPP;
1839
1840 /* If that didn't work fall back on linearising by hand. */
1841 if (ret == -ENOTSUPP) {
1842 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1843 buf = kzalloc(len, GFP_KERNEL);
1844 if (!buf)
1845 return -ENOMEM;
1846
1847 memcpy(buf, map->work_buf, map->format.reg_bytes);
1848 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1849 val, val_len);
1850 ret = map->bus->write(map->bus_context, buf, len);
1851
1852 kfree(buf);
1853 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1854 /* regcache_drop_region() takes lock that we already have,
1855 * thus call map->cache_ops->drop() directly
1856 */
1857 if (map->cache_ops && map->cache_ops->drop)
1858 map->cache_ops->drop(map, reg, reg + 1);
1859 }
1860
1861 return ret;
1862}
1863
1864/**
1865 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1866 *
1867 * @map: Map to check.
1868 */
1869bool regmap_can_raw_write(struct regmap *map)
1870{
1871 return map->bus && map->bus->write && map->format.format_val &&
1872 map->format.format_reg;
1873}
1874EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1875
1876/**
1877 * regmap_get_raw_read_max - Get the maximum size we can read
1878 *
1879 * @map: Map to check.
1880 */
1881size_t regmap_get_raw_read_max(struct regmap *map)
1882{
1883 return map->max_raw_read;
1884}
1885EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1886
1887/**
1888 * regmap_get_raw_write_max - Get the maximum size we can read
1889 *
1890 * @map: Map to check.
1891 */
1892size_t regmap_get_raw_write_max(struct regmap *map)
1893{
1894 return map->max_raw_write;
1895}
1896EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1897
1898static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1899 unsigned int val)
1900{
1901 int ret;
1902 struct regmap_range_node *range;
1903 struct regmap *map = context;
1904
1905 WARN_ON(!map->bus || !map->format.format_write);
1906
1907 range = _regmap_range_lookup(map, reg);
1908 if (range) {
1909 ret = _regmap_select_page(map, &reg, range, 1);
1910 if (ret != 0)
1911 return ret;
1912 }
1913
1914 map->format.format_write(map, reg, val);
1915
1916 ret = map->bus->write(map->bus_context, map->work_buf,
1917 map->format.buf_size);
1918
1919 return ret;
1920}
1921
1922static int _regmap_bus_reg_write(void *context, unsigned int reg,
1923 unsigned int val)
1924{
1925 struct regmap *map = context;
1926
1927 return map->bus->reg_write(map->bus_context, reg, val);
1928}
1929
1930static int _regmap_bus_raw_write(void *context, unsigned int reg,
1931 unsigned int val)
1932{
1933 struct regmap *map = context;
1934
1935 WARN_ON(!map->bus || !map->format.format_val);
1936
1937 map->format.format_val(map->work_buf + map->format.reg_bytes
1938 + map->format.pad_bytes, val, 0);
1939 return _regmap_raw_write_impl(map, reg,
1940 map->work_buf +
1941 map->format.reg_bytes +
1942 map->format.pad_bytes,
1943 map->format.val_bytes,
1944 false);
1945}
1946
1947static inline void *_regmap_map_get_context(struct regmap *map)
1948{
1949 return (map->bus) ? map : map->bus_context;
1950}
1951
1952int _regmap_write(struct regmap *map, unsigned int reg,
1953 unsigned int val)
1954{
1955 int ret;
1956 void *context = _regmap_map_get_context(map);
1957
1958 if (!regmap_writeable(map, reg))
1959 return -EIO;
1960
1961 if (!map->cache_bypass && !map->defer_caching) {
1962 ret = regcache_write(map, reg, val);
1963 if (ret != 0)
1964 return ret;
1965 if (map->cache_only) {
1966 map->cache_dirty = true;
1967 return 0;
1968 }
1969 }
1970
1971 if (regmap_should_log(map))
1972 dev_info(map->dev, "%x <= %x\n", reg, val);
1973
1974 return map->reg_write(context, reg, val);
1975}
1976
1977#ifdef TARGET_OS2
1978#define IS_ALIGNED(x, a) (((x) & ((unsigned int)(a) - 1)) == 0)
1979#endif
1980
1981/**
1982 * regmap_write() - Write a value to a single register
1983 *
1984 * @map: Register map to write to
1985 * @reg: Register to write to
1986 * @val: Value to be written
1987 *
1988 * A value of zero will be returned on success, a negative errno will
1989 * be returned in error cases.
1990 */
1991int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1992{
1993 int ret;
1994
1995 if (!IS_ALIGNED(reg, map->reg_stride))
1996 return -EINVAL;
1997
1998 map->lock(map->lock_arg);
1999
2000 ret = _regmap_write(map, reg, val);
2001
2002 map->unlock(map->lock_arg);
2003
2004 return ret;
2005}
2006EXPORT_SYMBOL_GPL(regmap_write);
2007
2008/**
2009 * regmap_write_async() - Write a value to a single register asynchronously
2010 *
2011 * @map: Register map to write to
2012 * @reg: Register to write to
2013 * @val: Value to be written
2014 *
2015 * A value of zero will be returned on success, a negative errno will
2016 * be returned in error cases.
2017 */
2018int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
2019{
2020 int ret;
2021
2022 if (!IS_ALIGNED(reg, map->reg_stride))
2023 return -EINVAL;
2024
2025 map->lock(map->lock_arg);
2026
2027 map->async = true;
2028
2029 ret = _regmap_write(map, reg, val);
2030
2031 map->async = false;
2032
2033 map->unlock(map->lock_arg);
2034
2035 return ret;
2036}
2037EXPORT_SYMBOL_GPL(regmap_write_async);
2038
2039int _regmap_raw_write(struct regmap *map, unsigned int reg,
2040 const void *val, size_t val_len, bool noinc)
2041{
2042 size_t val_bytes = map->format.val_bytes;
2043 size_t val_count = val_len / val_bytes;
2044 size_t chunk_count, chunk_bytes;
2045 size_t chunk_regs = val_count;
2046 int ret, i;
2047
2048 if (!val_count)
2049 return -EINVAL;
2050
2051 if (map->use_single_write)
2052 chunk_regs = 1;
2053 else if (map->max_raw_write && val_len > map->max_raw_write)
2054 chunk_regs = map->max_raw_write / val_bytes;
2055
2056 chunk_count = val_count / chunk_regs;
2057 chunk_bytes = chunk_regs * val_bytes;
2058
2059 /* Write as many bytes as possible with chunk_size */
2060 for (i = 0; i < chunk_count; i++) {
2061 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2062 if (ret)
2063 return ret;
2064
2065 reg += regmap_get_offset(map, chunk_regs);
2066 val += chunk_bytes;
2067 val_len -= chunk_bytes;
2068 }
2069
2070 /* Write remaining bytes */
2071 if (val_len)
2072 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2073
2074 return ret;
2075}
2076
2077/**
2078 * regmap_raw_write() - Write raw values to one or more registers
2079 *
2080 * @map: Register map to write to
2081 * @reg: Initial register to write to
2082 * @val: Block of data to be written, laid out for direct transmission to the
2083 * device
2084 * @val_len: Length of data pointed to by val.
2085 *
2086 * This function is intended to be used for things like firmware
2087 * download where a large block of data needs to be transferred to the
2088 * device. No formatting will be done on the data provided.
2089 *
2090 * A value of zero will be returned on success, a negative errno will
2091 * be returned in error cases.
2092 */
2093int regmap_raw_write(struct regmap *map, unsigned int reg,
2094 const void *val, size_t val_len)
2095{
2096 int ret;
2097
2098 if (!regmap_can_raw_write(map))
2099 return -EINVAL;
2100 if (val_len % map->format.val_bytes)
2101 return -EINVAL;
2102
2103 map->lock(map->lock_arg);
2104
2105 ret = _regmap_raw_write(map, reg, val, val_len, false);
2106
2107 map->unlock(map->lock_arg);
2108
2109 return ret;
2110}
2111EXPORT_SYMBOL_GPL(regmap_raw_write);
2112
2113/**
2114 * regmap_noinc_write(): Write data from a register without incrementing the
2115 * register number
2116 *
2117 * @map: Register map to write to
2118 * @reg: Register to write to
2119 * @val: Pointer to data buffer
2120 * @val_len: Length of output buffer in bytes.
2121 *
2122 * The regmap API usually assumes that bulk bus write operations will write a
2123 * range of registers. Some devices have certain registers for which a write
2124 * operation can write to an internal FIFO.
2125 *
2126 * The target register must be volatile but registers after it can be
2127 * completely unrelated cacheable registers.
2128 *
2129 * This will attempt multiple writes as required to write val_len bytes.
2130 *
2131 * A value of zero will be returned on success, a negative errno will be
2132 * returned in error cases.
2133 */
2134int regmap_noinc_write(struct regmap *map, unsigned int reg,
2135 const void *val, size_t val_len)
2136{
2137 size_t write_len;
2138 int ret;
2139
2140 if (!map->bus)
2141 return -EINVAL;
2142 if (!map->bus->write)
2143 return -ENOTSUPP;
2144 if (val_len % map->format.val_bytes)
2145 return -EINVAL;
2146 if (!IS_ALIGNED(reg, map->reg_stride))
2147 return -EINVAL;
2148 if (val_len == 0)
2149 return -EINVAL;
2150
2151 map->lock(map->lock_arg);
2152
2153 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2154 ret = -EINVAL;
2155 goto out_unlock;
2156 }
2157
2158 while (val_len) {
2159 if (map->max_raw_write && map->max_raw_write < val_len)
2160 write_len = map->max_raw_write;
2161 else
2162 write_len = val_len;
2163 ret = _regmap_raw_write(map, reg, val, write_len, true);
2164 if (ret)
2165 goto out_unlock;
2166 val = ((u8 *)val) + write_len;
2167 val_len -= write_len;
2168 }
2169
2170out_unlock:
2171 map->unlock(map->lock_arg);
2172 return ret;
2173}
2174EXPORT_SYMBOL_GPL(regmap_noinc_write);
2175
2176/**
2177 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2178 * register field.
2179 *
2180 * @field: Register field to write to
2181 * @mask: Bitmask to change
2182 * @val: Value to be written
2183 * @change: Boolean indicating if a write was done
2184 * @async: Boolean indicating asynchronously
2185 * @force: Boolean indicating use force update
2186 *
2187 * Perform a read/modify/write cycle on the register field with change,
2188 * async, force option.
2189 *
2190 * A value of zero will be returned on success, a negative errno will
2191 * be returned in error cases.
2192 */
2193int regmap_field_update_bits_base(struct regmap_field *field,
2194 unsigned int mask, unsigned int val,
2195 bool *change, bool async, bool force)
2196{
2197 mask = (mask << field->shift) & field->mask;
2198
2199 return regmap_update_bits_base(field->regmap, field->reg,
2200 mask, val << field->shift,
2201 change, async, force);
2202}
2203EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2204
2205/**
2206 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2207 * register field with port ID
2208 *
2209 * @field: Register field to write to
2210 * @id: port ID
2211 * @mask: Bitmask to change
2212 * @val: Value to be written
2213 * @change: Boolean indicating if a write was done
2214 * @async: Boolean indicating asynchronously
2215 * @force: Boolean indicating use force update
2216 *
2217 * A value of zero will be returned on success, a negative errno will
2218 * be returned in error cases.
2219 */
2220int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2221 unsigned int mask, unsigned int val,
2222 bool *change, bool async, bool force)
2223{
2224 if (id >= field->id_size)
2225 return -EINVAL;
2226
2227 mask = (mask << field->shift) & field->mask;
2228
2229 return regmap_update_bits_base(field->regmap,
2230 field->reg + (field->id_offset * id),
2231 mask, val << field->shift,
2232 change, async, force);
2233}
2234EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2235
2236/**
2237 * regmap_bulk_write() - Write multiple registers to the device
2238 *
2239 * @map: Register map to write to
2240 * @reg: First register to be write from
2241 * @val: Block of data to be written, in native register size for device
2242 * @val_count: Number of registers to write
2243 *
2244 * This function is intended to be used for writing a large block of
2245 * data to the device either in single transfer or multiple transfer.
2246 *
2247 * A value of zero will be returned on success, a negative errno will
2248 * be returned in error cases.
2249 */
2250int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2251 size_t val_count)
2252{
2253 int ret = 0, i;
2254 size_t val_bytes = map->format.val_bytes;
2255
2256 if (!IS_ALIGNED(reg, map->reg_stride))
2257 return -EINVAL;
2258
2259 /*
2260 * Some devices don't support bulk write, for them we have a series of
2261 * single write operations.
2262 */
2263 if (!map->bus || !map->format.parse_inplace) {
2264 map->lock(map->lock_arg);
2265 for (i = 0; i < val_count; i++) {
2266 unsigned int ival;
2267
2268 switch (val_bytes) {
2269 case 1:
2270 ival = *(u8 *)(val + (i * val_bytes));
2271 break;
2272 case 2:
2273 ival = *(u16 *)(val + (i * val_bytes));
2274 break;
2275 case 4:
2276 ival = *(u32 *)(val + (i * val_bytes));
2277 break;
2278#ifdef CONFIG_64BIT
2279 case 8:
2280 ival = *(u64 *)(val + (i * val_bytes));
2281 break;
2282#endif
2283 default:
2284 ret = -EINVAL;
2285 goto out;
2286 }
2287
2288 ret = _regmap_write(map,
2289 reg + regmap_get_offset(map, i),
2290 ival);
2291 if (ret != 0)
2292 goto out;
2293 }
2294out:
2295 map->unlock(map->lock_arg);
2296 } else {
2297 void *wval;
2298
2299 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2300 if (!wval)
2301 return -ENOMEM;
2302
2303 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2304 map->format.parse_inplace(wval + i);
2305
2306 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2307
2308 kfree(wval);
2309 }
2310 return ret;
2311}
2312EXPORT_SYMBOL_GPL(regmap_bulk_write);
2313
2314/*
2315 * _regmap_raw_multi_reg_write()
2316 *
2317 * the (register,newvalue) pairs in regs have not been formatted, but
2318 * they are all in the same page and have been changed to being page
2319 * relative. The page register has been written if that was necessary.
2320 */
2321static int _regmap_raw_multi_reg_write(struct regmap *map,
2322 const struct reg_sequence *regs,
2323 size_t num_regs)
2324{
2325 int ret;
2326 void *buf;
2327 int i;
2328 u8 *u8;
2329 size_t val_bytes = map->format.val_bytes;
2330 size_t reg_bytes = map->format.reg_bytes;
2331 size_t pad_bytes = map->format.pad_bytes;
2332 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2333 size_t len = pair_size * num_regs;
2334
2335 if (!len)
2336 return -EINVAL;
2337
2338 buf = kzalloc(len, GFP_KERNEL);
2339 if (!buf)
2340 return -ENOMEM;
2341
2342 /* We have to linearise by hand. */
2343
2344 u8 = buf;
2345
2346 for (i = 0; i < num_regs; i++) {
2347 unsigned int reg = regs[i].reg;
2348 unsigned int val = regs[i].def;
2349 map->format.format_reg(u8, reg, map->reg_shift);
2350 u8 += reg_bytes + pad_bytes;
2351 map->format.format_val(u8, val, 0);
2352 u8 += val_bytes;
2353 }
2354 u8 = buf;
2355 *u8 |= map->write_flag_mask;
2356
2357 ret = map->bus->write(map->bus_context, buf, len);
2358
2359 kfree(buf);
2360
2361 for (i = 0; i < num_regs; i++) {
2362 int reg = regs[i].reg;
2363 }
2364 return ret;
2365}
2366
2367static unsigned int _regmap_register_page(struct regmap *map,
2368 unsigned int reg,
2369 struct regmap_range_node *range)
2370{
2371 unsigned int win_page = (reg - range->range_min) / range->window_len;
2372
2373 return win_page;
2374}
2375
2376static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2377 struct reg_sequence *regs,
2378 size_t num_regs)
2379{
2380 int ret;
2381 int i, n;
2382 struct reg_sequence *base;
2383 unsigned int this_page = 0;
2384 unsigned int page_change = 0;
2385 /*
2386 * the set of registers are not neccessarily in order, but
2387 * since the order of write must be preserved this algorithm
2388 * chops the set each time the page changes. This also applies
2389 * if there is a delay required at any point in the sequence.
2390 */
2391 base = regs;
2392 for (i = 0, n = 0; i < num_regs; i++, n++) {
2393 unsigned int reg = regs[i].reg;
2394 struct regmap_range_node *range;
2395
2396 range = _regmap_range_lookup(map, reg);
2397 if (range) {
2398 unsigned int win_page = _regmap_register_page(map, reg,
2399 range);
2400
2401 if (i == 0)
2402 this_page = win_page;
2403 if (win_page != this_page) {
2404 this_page = win_page;
2405 page_change = 1;
2406 }
2407 }
2408
2409 /* If we have both a page change and a delay make sure to
2410 * write the regs and apply the delay before we change the
2411 * page.
2412 */
2413
2414 if (page_change || regs[i].delay_us) {
2415
2416 /* For situations where the first write requires
2417 * a delay we need to make sure we don't call
2418 * raw_multi_reg_write with n=0
2419 * This can't occur with page breaks as we
2420 * never write on the first iteration
2421 */
2422 if (regs[i].delay_us && i == 0)
2423 n = 1;
2424
2425 ret = _regmap_raw_multi_reg_write(map, base, n);
2426 if (ret != 0)
2427 return ret;
2428
2429 if (regs[i].delay_us) {
2430#ifndef TARGET_OS2
2431 if (map->can_sleep)
2432 fsleep(regs[i].delay_us);
2433 else
2434#endif
2435 udelay(regs[i].delay_us);
2436 }
2437
2438 base += n;
2439 n = 0;
2440
2441 if (page_change) {
2442 ret = _regmap_select_page(map,
2443 &base[n].reg,
2444 range, 1);
2445 if (ret != 0)
2446 return ret;
2447
2448 page_change = 0;
2449 }
2450
2451 }
2452
2453 }
2454 if (n > 0)
2455 return _regmap_raw_multi_reg_write(map, base, n);
2456 return 0;
2457}
2458
2459static int _regmap_multi_reg_write(struct regmap *map,
2460 const struct reg_sequence *regs,
2461 size_t num_regs)
2462{
2463 int i;
2464 int ret;
2465
2466 if (!map->can_multi_write) {
2467 for (i = 0; i < num_regs; i++) {
2468 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2469 if (ret != 0)
2470 return ret;
2471
2472 if (regs[i].delay_us) {
2473#ifndef TARGET_OS2
2474 if (map->can_sleep)
2475 fsleep(regs[i].delay_us);
2476 else
2477#endif
2478 udelay(regs[i].delay_us);
2479 }
2480 }
2481 return 0;
2482 }
2483
2484 if (!map->format.parse_inplace)
2485 return -EINVAL;
2486
2487 if (map->writeable_reg)
2488 for (i = 0; i < num_regs; i++) {
2489 int reg = regs[i].reg;
2490 if (!map->writeable_reg(map->dev, reg))
2491 return -EINVAL;
2492 if (!IS_ALIGNED(reg, map->reg_stride))
2493 return -EINVAL;
2494 }
2495
2496 if (!map->cache_bypass) {
2497 for (i = 0; i < num_regs; i++) {
2498 unsigned int val = regs[i].def;
2499 unsigned int reg = regs[i].reg;
2500 ret = regcache_write(map, reg, val);
2501 if (ret) {
2502 dev_err(map->dev,
2503 "Error in caching of register: %x ret: %d\n",
2504 reg, ret);
2505 return ret;
2506 }
2507 }
2508 if (map->cache_only) {
2509 map->cache_dirty = true;
2510 return 0;
2511 }
2512 }
2513
2514 WARN_ON(!map->bus);
2515
2516 for (i = 0; i < num_regs; i++) {
2517 unsigned int reg = regs[i].reg;
2518 struct regmap_range_node *range;
2519
2520 /* Coalesce all the writes between a page break or a delay
2521 * in a sequence
2522 */
2523 range = _regmap_range_lookup(map, reg);
2524 if (range || regs[i].delay_us) {
2525 size_t len = sizeof(struct reg_sequence)*num_regs;
2526 struct reg_sequence *base = kmemdup(regs, len,
2527 GFP_KERNEL);
2528 if (!base)
2529 return -ENOMEM;
2530 ret = _regmap_range_multi_paged_reg_write(map, base,
2531 num_regs);
2532 kfree(base);
2533
2534 return ret;
2535 }
2536 }
2537 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2538}
2539
2540/**
2541 * regmap_multi_reg_write() - Write multiple registers to the device
2542 *
2543 * @map: Register map to write to
2544 * @regs: Array of structures containing register,value to be written
2545 * @num_regs: Number of registers to write
2546 *
2547 * Write multiple registers to the device where the set of register, value
2548 * pairs are supplied in any order, possibly not all in a single range.
2549 *
2550 * The 'normal' block write mode will send ultimately send data on the
2551 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2552 * addressed. However, this alternative block multi write mode will send
2553 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2554 * must of course support the mode.
2555 *
2556 * A value of zero will be returned on success, a negative errno will be
2557 * returned in error cases.
2558 */
2559int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2560 int num_regs)
2561{
2562 int ret;
2563
2564 map->lock(map->lock_arg);
2565
2566 ret = _regmap_multi_reg_write(map, regs, num_regs);
2567
2568 map->unlock(map->lock_arg);
2569
2570 return ret;
2571}
2572EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2573
2574/**
2575 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2576 * device but not the cache
2577 *
2578 * @map: Register map to write to
2579 * @regs: Array of structures containing register,value to be written
2580 * @num_regs: Number of registers to write
2581 *
2582 * Write multiple registers to the device but not the cache where the set
2583 * of register are supplied in any order.
2584 *
2585 * This function is intended to be used for writing a large block of data
2586 * atomically to the device in single transfer for those I2C client devices
2587 * that implement this alternative block write mode.
2588 *
2589 * A value of zero will be returned on success, a negative errno will
2590 * be returned in error cases.
2591 */
2592int regmap_multi_reg_write_bypassed(struct regmap *map,
2593 const struct reg_sequence *regs,
2594 int num_regs)
2595{
2596 int ret;
2597 bool bypass;
2598
2599 map->lock(map->lock_arg);
2600
2601 bypass = map->cache_bypass;
2602 map->cache_bypass = true;
2603
2604 ret = _regmap_multi_reg_write(map, regs, num_regs);
2605
2606 map->cache_bypass = bypass;
2607
2608 map->unlock(map->lock_arg);
2609
2610 return ret;
2611}
2612EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2613
2614/**
2615 * regmap_raw_write_async() - Write raw values to one or more registers
2616 * asynchronously
2617 *
2618 * @map: Register map to write to
2619 * @reg: Initial register to write to
2620 * @val: Block of data to be written, laid out for direct transmission to the
2621 * device. Must be valid until regmap_async_complete() is called.
2622 * @val_len: Length of data pointed to by val.
2623 *
2624 * This function is intended to be used for things like firmware
2625 * download where a large block of data needs to be transferred to the
2626 * device. No formatting will be done on the data provided.
2627 *
2628 * If supported by the underlying bus the write will be scheduled
2629 * asynchronously, helping maximise I/O speed on higher speed buses
2630 * like SPI. regmap_async_complete() can be called to ensure that all
2631 * asynchrnous writes have been completed.
2632 *
2633 * A value of zero will be returned on success, a negative errno will
2634 * be returned in error cases.
2635 */
2636int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2637 const void *val, size_t val_len)
2638{
2639 int ret;
2640
2641 if (val_len % map->format.val_bytes)
2642 return -EINVAL;
2643 if (!IS_ALIGNED(reg, map->reg_stride))
2644 return -EINVAL;
2645
2646 map->lock(map->lock_arg);
2647
2648 map->async = true;
2649
2650 ret = _regmap_raw_write(map, reg, val, val_len, false);
2651
2652 map->async = false;
2653
2654 map->unlock(map->lock_arg);
2655
2656 return ret;
2657}
2658EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2659
2660static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2661 unsigned int val_len, bool noinc)
2662{
2663 struct regmap_range_node *range;
2664 int ret;
2665
2666 WARN_ON(!map->bus);
2667
2668 if (!map->bus || !map->bus->read)
2669 return -EINVAL;
2670
2671 range = _regmap_range_lookup(map, reg);
2672 if (range) {
2673 ret = _regmap_select_page(map, &reg, range,
2674 noinc ? 1 : val_len / map->format.val_bytes);
2675 if (ret != 0)
2676 return ret;
2677 }
2678
2679 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2680 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2681 map->read_flag_mask);
2682
2683 ret = map->bus->read(map->bus_context, map->work_buf,
2684 map->format.reg_bytes + map->format.pad_bytes,
2685 val, val_len);
2686
2687 return ret;
2688}
2689
2690static int _regmap_bus_reg_read(void *context, unsigned int reg,
2691 unsigned int *val)
2692{
2693 struct regmap *map = context;
2694
2695 return map->bus->reg_read(map->bus_context, reg, val);
2696}
2697
2698static int _regmap_bus_read(void *context, unsigned int reg,
2699 unsigned int *val)
2700{
2701 int ret;
2702 struct regmap *map = context;
2703 void *work_val = map->work_buf + map->format.reg_bytes +
2704 map->format.pad_bytes;
2705
2706 if (!map->format.parse_val)
2707 return -EINVAL;
2708
2709 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2710 if (ret == 0)
2711 *val = map->format.parse_val(work_val);
2712
2713 return ret;
2714}
2715
2716static int _regmap_read(struct regmap *map, unsigned int reg,
2717 unsigned int *val)
2718{
2719 int ret;
2720 void *context = _regmap_map_get_context(map);
2721
2722 if (!map->cache_bypass) {
2723 ret = regcache_read(map, reg, val);
2724 if (ret == 0)
2725 return 0;
2726 }
2727
2728 if (map->cache_only)
2729 return -EBUSY;
2730
2731 if (!regmap_readable(map, reg))
2732 return -EIO;
2733
2734 ret = map->reg_read(context, reg, val);
2735 if (ret == 0) {
2736 if (regmap_should_log(map))
2737 dev_info(map->dev, "%x => %x\n", reg, *val);
2738
2739 if (!map->cache_bypass)
2740 regcache_write(map, reg, *val);
2741 }
2742
2743 return ret;
2744}
2745
2746/**
2747 * regmap_read() - Read a value from a single register
2748 *
2749 * @map: Register map to read from
2750 * @reg: Register to be read from
2751 * @val: Pointer to store read value
2752 *
2753 * A value of zero will be returned on success, a negative errno will
2754 * be returned in error cases.
2755 */
2756int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2757{
2758 int ret;
2759
2760 if (!IS_ALIGNED(reg, map->reg_stride))
2761 return -EINVAL;
2762
2763 map->lock(map->lock_arg);
2764
2765 ret = _regmap_read(map, reg, val);
2766
2767 map->unlock(map->lock_arg);
2768
2769 return ret;
2770}
2771EXPORT_SYMBOL_GPL(regmap_read);
2772
2773/**
2774 * regmap_raw_read() - Read raw data from the device
2775 *
2776 * @map: Register map to read from
2777 * @reg: First register to be read from
2778 * @val: Pointer to store read value
2779 * @val_len: Size of data to read
2780 *
2781 * A value of zero will be returned on success, a negative errno will
2782 * be returned in error cases.
2783 */
2784int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2785 size_t val_len)
2786{
2787 size_t val_bytes = map->format.val_bytes;
2788 size_t val_count = val_len / val_bytes;
2789 unsigned int v;
2790 int ret, i;
2791
2792 if (!map->bus)
2793 return -EINVAL;
2794 if (val_len % map->format.val_bytes)
2795 return -EINVAL;
2796 if (!IS_ALIGNED(reg, map->reg_stride))
2797 return -EINVAL;
2798 if (val_count == 0)
2799 return -EINVAL;
2800
2801 map->lock(map->lock_arg);
2802
2803 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2804 map->cache_type == REGCACHE_NONE) {
2805 size_t chunk_count, chunk_bytes;
2806 size_t chunk_regs = val_count;
2807
2808 if (!map->bus->read) {
2809 ret = -ENOTSUPP;
2810 goto out;
2811 }
2812
2813 if (map->use_single_read)
2814 chunk_regs = 1;
2815 else if (map->max_raw_read && val_len > map->max_raw_read)
2816 chunk_regs = map->max_raw_read / val_bytes;
2817
2818 chunk_count = val_count / chunk_regs;
2819 chunk_bytes = chunk_regs * val_bytes;
2820
2821 /* Read bytes that fit into whole chunks */
2822 for (i = 0; i < chunk_count; i++) {
2823 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2824 if (ret != 0)
2825 goto out;
2826
2827 reg += regmap_get_offset(map, chunk_regs);
2828 val += chunk_bytes;
2829 val_len -= chunk_bytes;
2830 }
2831
2832 /* Read remaining bytes */
2833 if (val_len) {
2834 ret = _regmap_raw_read(map, reg, val, val_len, false);
2835 if (ret != 0)
2836 goto out;
2837 }
2838 } else {
2839 /* Otherwise go word by word for the cache; should be low
2840 * cost as we expect to hit the cache.
2841 */
2842 for (i = 0; i < val_count; i++) {
2843 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2844 &v);
2845 if (ret != 0)
2846 goto out;
2847
2848 map->format.format_val(val + (i * val_bytes), v, 0);
2849 }
2850 }
2851
2852 out:
2853 map->unlock(map->lock_arg);
2854
2855 return ret;
2856}
2857EXPORT_SYMBOL_GPL(regmap_raw_read);
2858
2859/**
2860 * regmap_noinc_read(): Read data from a register without incrementing the
2861 * register number
2862 *
2863 * @map: Register map to read from
2864 * @reg: Register to read from
2865 * @val: Pointer to data buffer
2866 * @val_len: Length of output buffer in bytes.
2867 *
2868 * The regmap API usually assumes that bulk bus read operations will read a
2869 * range of registers. Some devices have certain registers for which a read
2870 * operation read will read from an internal FIFO.
2871 *
2872 * The target register must be volatile but registers after it can be
2873 * completely unrelated cacheable registers.
2874 *
2875 * This will attempt multiple reads as required to read val_len bytes.
2876 *
2877 * A value of zero will be returned on success, a negative errno will be
2878 * returned in error cases.
2879 */
2880int regmap_noinc_read(struct regmap *map, unsigned int reg,
2881 void *val, size_t val_len)
2882{
2883 size_t read_len;
2884 int ret;
2885
2886 if (!map->bus)
2887 return -EINVAL;
2888 if (!map->bus->read)
2889 return -ENOTSUPP;
2890 if (val_len % map->format.val_bytes)
2891 return -EINVAL;
2892 if (!IS_ALIGNED(reg, map->reg_stride))
2893 return -EINVAL;
2894 if (val_len == 0)
2895 return -EINVAL;
2896
2897 map->lock(map->lock_arg);
2898
2899 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2900 ret = -EINVAL;
2901 goto out_unlock;
2902 }
2903
2904 while (val_len) {
2905 if (map->max_raw_read && map->max_raw_read < val_len)
2906 read_len = map->max_raw_read;
2907 else
2908 read_len = val_len;
2909 ret = _regmap_raw_read(map, reg, val, read_len, true);
2910 if (ret)
2911 goto out_unlock;
2912 val = ((u8 *)val) + read_len;
2913 val_len -= read_len;
2914 }
2915
2916out_unlock:
2917 map->unlock(map->lock_arg);
2918 return ret;
2919}
2920EXPORT_SYMBOL_GPL(regmap_noinc_read);
2921
2922/**
2923 * regmap_field_read(): Read a value to a single register field
2924 *
2925 * @field: Register field to read from
2926 * @val: Pointer to store read value
2927 *
2928 * A value of zero will be returned on success, a negative errno will
2929 * be returned in error cases.
2930 */
2931int regmap_field_read(struct regmap_field *field, unsigned int *val)
2932{
2933 int ret;
2934 unsigned int reg_val;
2935 ret = regmap_read(field->regmap, field->reg, &reg_val);
2936 if (ret != 0)
2937 return ret;
2938
2939 reg_val &= field->mask;
2940 reg_val >>= field->shift;
2941 *val = reg_val;
2942
2943 return ret;
2944}
2945EXPORT_SYMBOL_GPL(regmap_field_read);
2946
2947/**
2948 * regmap_fields_read() - Read a value to a single register field with port ID
2949 *
2950 * @field: Register field to read from
2951 * @id: port ID
2952 * @val: Pointer to store read value
2953 *
2954 * A value of zero will be returned on success, a negative errno will
2955 * be returned in error cases.
2956 */
2957int regmap_fields_read(struct regmap_field *field, unsigned int id,
2958 unsigned int *val)
2959{
2960 int ret;
2961 unsigned int reg_val;
2962
2963 if (id >= field->id_size)
2964 return -EINVAL;
2965
2966 ret = regmap_read(field->regmap,
2967 field->reg + (field->id_offset * id),
2968 &reg_val);
2969 if (ret != 0)
2970 return ret;
2971
2972 reg_val &= field->mask;
2973 reg_val >>= field->shift;
2974 *val = reg_val;
2975
2976 return ret;
2977}
2978EXPORT_SYMBOL_GPL(regmap_fields_read);
2979
2980/**
2981 * regmap_bulk_read() - Read multiple registers from the device
2982 *
2983 * @map: Register map to read from
2984 * @reg: First register to be read from
2985 * @val: Pointer to store read value, in native register size for device
2986 * @val_count: Number of registers to read
2987 *
2988 * A value of zero will be returned on success, a negative errno will
2989 * be returned in error cases.
2990 */
2991int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2992 size_t val_count)
2993{
2994 int ret, i;
2995 size_t val_bytes = map->format.val_bytes;
2996 bool vol = regmap_volatile_range(map, reg, val_count);
2997
2998 if (!IS_ALIGNED(reg, map->reg_stride))
2999 return -EINVAL;
3000 if (val_count == 0)
3001 return -EINVAL;
3002
3003 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3004 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3005 if (ret != 0)
3006 return ret;
3007
3008 for (i = 0; i < val_count * val_bytes; i += val_bytes)
3009 map->format.parse_inplace(val + i);
3010 } else {
3011#ifdef CONFIG_64BIT
3012 u64 *u64 = val;
3013#endif
3014 u32 *u32 = val;
3015 u16 *u16 = val;
3016 u8 *u8 = val;
3017
3018 map->lock(map->lock_arg);
3019
3020 for (i = 0; i < val_count; i++) {
3021 unsigned int ival;
3022
3023 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
3024 &ival);
3025 if (ret != 0)
3026 goto out;
3027
3028 switch (map->format.val_bytes) {
3029#ifdef CONFIG_64BIT
3030 case 8:
3031 u64[i] = ival;
3032 break;
3033#endif
3034 case 4:
3035 u32[i] = ival;
3036 break;
3037 case 2:
3038 u16[i] = ival;
3039 break;
3040 case 1:
3041 u8[i] = ival;
3042 break;
3043 default:
3044 ret = -EINVAL;
3045 goto out;
3046 }
3047 }
3048
3049out:
3050 map->unlock(map->lock_arg);
3051 }
3052
3053 return ret;
3054}
3055EXPORT_SYMBOL_GPL(regmap_bulk_read);
3056
3057static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3058 unsigned int mask, unsigned int val,
3059 bool *change, bool force_write)
3060{
3061 int ret;
3062 unsigned int tmp, orig;
3063
3064 if (change)
3065 *change = false;
3066
3067 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3068 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3069 if (ret == 0 && change)
3070 *change = true;
3071 } else {
3072 ret = _regmap_read(map, reg, &orig);
3073 if (ret != 0)
3074 return ret;
3075
3076 tmp = orig & ~mask;
3077 tmp |= val & mask;
3078
3079 if (force_write || (tmp != orig)) {
3080 ret = _regmap_write(map, reg, tmp);
3081 if (ret == 0 && change)
3082 *change = true;
3083 }
3084 }
3085
3086 return ret;
3087}
3088
3089/**
3090 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3091 *
3092 * @map: Register map to update
3093 * @reg: Register to update
3094 * @mask: Bitmask to change
3095 * @val: New value for bitmask
3096 * @change: Boolean indicating if a write was done
3097 * @async: Boolean indicating asynchronously
3098 * @force: Boolean indicating use force update
3099 *
3100 * Perform a read/modify/write cycle on a register map with change, async, force
3101 * options.
3102 *
3103 * If async is true:
3104 *
3105 * With most buses the read must be done synchronously so this is most useful
3106 * for devices with a cache which do not need to interact with the hardware to
3107 * determine the current register value.
3108 *
3109 * Returns zero for success, a negative number on error.
3110 */
3111int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3112 unsigned int mask, unsigned int val,
3113 bool *change, bool async, bool force)
3114{
3115 int ret;
3116
3117 map->lock(map->lock_arg);
3118
3119 map->async = async;
3120
3121 ret = _regmap_update_bits(map, reg, mask, val, change, force);
3122
3123 map->async = false;
3124
3125 map->unlock(map->lock_arg);
3126
3127 return ret;
3128}
3129EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3130
3131/**
3132 * regmap_test_bits() - Check if all specified bits are set in a register.
3133 *
3134 * @map: Register map to operate on
3135 * @reg: Register to read from
3136 * @bits: Bits to test
3137 *
3138 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3139 * bits are set and a negative error number if the underlying regmap_read()
3140 * fails.
3141 */
3142int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3143{
3144 unsigned int val, ret;
3145
3146 ret = regmap_read(map, reg, &val);
3147 if (ret)
3148 return ret;
3149
3150 return (val & bits) == bits;
3151}
3152EXPORT_SYMBOL_GPL(regmap_test_bits);
3153
3154void regmap_async_complete_cb(struct regmap_async *async, int ret)
3155{
3156 struct regmap *map = async->map;
3157 bool wake;
3158
3159 spin_lock(&map->async_lock);
3160 list_move(&async->list, &map->async_free);
3161 wake = list_empty(&map->async_list);
3162
3163 if (ret != 0)
3164 map->async_ret = ret;
3165
3166 spin_unlock(&map->async_lock);
3167
3168 if (wake)
3169 wake_up(&map->async_waitq);
3170}
3171EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3172
3173static int regmap_async_is_done(struct regmap *map)
3174{
3175 unsigned long flags;
3176 int ret;
3177
3178 spin_lock_irqsave(&map->async_lock, flags);
3179 ret = list_empty(&map->async_list);
3180 spin_unlock_irqrestore(&map->async_lock, flags);
3181
3182 return ret;
3183}
3184
3185/**
3186 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3187 *
3188 * @map: Map to operate on.
3189 *
3190 * Blocks until any pending asynchronous I/O has completed. Returns
3191 * an error code for any failed I/O operations.
3192 */
3193int regmap_async_complete(struct regmap *map)
3194{
3195 unsigned long flags;
3196 int ret;
3197
3198 /* Nothing to do with no async support */
3199 if (!map->bus || !map->bus->async_write)
3200 return 0;
3201
3202#ifndef TARGET_OS2
3203 wait_event(map->async_waitq, regmap_async_is_done(map));
3204#endif
3205 spin_lock_irqsave(&map->async_lock, flags);
3206 ret = map->async_ret;
3207 map->async_ret = 0;
3208 spin_unlock_irqrestore(&map->async_lock, flags);
3209
3210 return ret;
3211}
3212EXPORT_SYMBOL_GPL(regmap_async_complete);
3213
3214/**
3215 * regmap_register_patch - Register and apply register updates to be applied
3216 * on device initialistion
3217 *
3218 * @map: Register map to apply updates to.
3219 * @regs: Values to update.
3220 * @num_regs: Number of entries in regs.
3221 *
3222 * Register a set of register updates to be applied to the device
3223 * whenever the device registers are synchronised with the cache and
3224 * apply them immediately. Typically this is used to apply
3225 * corrections to be applied to the device defaults on startup, such
3226 * as the updates some vendors provide to undocumented registers.
3227 *
3228 * The caller must ensure that this function cannot be called
3229 * concurrently with either itself or regcache_sync().
3230 */
3231int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3232 int num_regs)
3233{
3234 struct reg_sequence *p;
3235 int ret;
3236 bool bypass;
3237
3238#ifndef TARGET_OS2
3239 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3240 num_regs))
3241 return 0;
3242#else
3243 if (num_regs <= 0) {
3244 pr_warn("invalid registers number (%d)", num_regs);
3245 return 0;
3246 }
3247#endif
3248 p = krealloc(map->patch,
3249 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3250 GFP_KERNEL);
3251 if (p) {
3252 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3253 map->patch = p;
3254 map->patch_regs += num_regs;
3255 } else {
3256 return -ENOMEM;
3257 }
3258
3259 map->lock(map->lock_arg);
3260
3261 bypass = map->cache_bypass;
3262
3263 map->cache_bypass = true;
3264 map->async = true;
3265
3266 ret = _regmap_multi_reg_write(map, regs, num_regs);
3267
3268 map->async = false;
3269 map->cache_bypass = bypass;
3270
3271 map->unlock(map->lock_arg);
3272
3273 regmap_async_complete(map);
3274
3275 return ret;
3276}
3277EXPORT_SYMBOL_GPL(regmap_register_patch);
3278
3279/**
3280 * regmap_get_val_bytes() - Report the size of a register value
3281 *
3282 * @map: Register map to operate on.
3283 *
3284 * Report the size of a register value, mainly intended to for use by
3285 * generic infrastructure built on top of regmap.
3286 */
3287int regmap_get_val_bytes(struct regmap *map)
3288{
3289 if (map->format.format_write)
3290 return -EINVAL;
3291
3292 return map->format.val_bytes;
3293}
3294EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3295
3296/**
3297 * regmap_get_max_register() - Report the max register value
3298 *
3299 * @map: Register map to operate on.
3300 *
3301 * Report the max register value, mainly intended to for use by
3302 * generic infrastructure built on top of regmap.
3303 */
3304int regmap_get_max_register(struct regmap *map)
3305{
3306 return map->max_register ? map->max_register : -EINVAL;
3307}
3308EXPORT_SYMBOL_GPL(regmap_get_max_register);
3309
3310/**
3311 * regmap_get_reg_stride() - Report the register address stride
3312 *
3313 * @map: Register map to operate on.
3314 *
3315 * Report the register address stride, mainly intended to for use by
3316 * generic infrastructure built on top of regmap.
3317 */
3318int regmap_get_reg_stride(struct regmap *map)
3319{
3320 return map->reg_stride;
3321}
3322EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3323
3324int regmap_parse_val(struct regmap *map, const void *buf,
3325 unsigned int *val)
3326{
3327 if (!map->format.parse_val)
3328 return -EINVAL;
3329
3330 *val = map->format.parse_val(buf);
3331
3332 return 0;
3333}
3334EXPORT_SYMBOL_GPL(regmap_parse_val);
3335
3336static int __init regmap_initcall(void)
3337{
3338 regmap_debugfs_initcall();
3339
3340 return 0;
3341}
3342postcore_initcall(regmap_initcall);
Note: See TracBrowser for help on using the repository browser.