source: GPL/branches/uniaud32-next/lib32/regmap.c@ 718

Last change on this file since 718 was 718, checked in by Paul Smedley, 3 years ago

WIP trying to fix non-HDA Hardware

File size: 80.0 KB
Line 
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register map access API
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9/* from 5.10.10 */
10
11#include <linux/device.h>
12#include <linux/slab.h>
13#include <linux/export.h>
14#include <linux/mutex.h>
15#include <linux/err.h>
16//#include <linux/property.h>
17#include <linux/rbtree.h>
18#include <linux/sched.h>
19#include <linux/delay.h>
20#include <linux/log2.h>
21//#include <linux/hwspinlock.h>
22#include <asm/unaligned.h>
23#include <linux/module.h>
24#include <linux/workqueue.h>
25#include <linux/byteorder/little_endian.h>
26#include <linux/printk.h>
27
28/* hwspinlock mode argument */
29#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
30#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
31#define HWLOCK_RAW 0x03
32
33#define CREATE_TRACE_POINTS
34//#include "trace.h"
35
36#include "internal.h"
37
38/*
39 * Sometimes for failures during very early init the trace
40 * infrastructure isn't available early enough to be used. For this
41 * sort of problem defining LOG_DEVICE will add printks for basic
42 * register I/O on a specific device.
43 */
44#undef LOG_DEVICE
45
46#ifdef LOG_DEVICE
47/*static*/ inline bool regmap_should_log(struct regmap *map)
48{
49 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
50}
51#else
52/*static*/ inline bool regmap_should_log(struct regmap *map) { return false; }
53#endif
54
55
56/*static*/ int _regmap_update_bits(struct regmap *map, unsigned int reg,
57 unsigned int mask, unsigned int val,
58 bool *change, bool force_write);
59
60/*static*/ int _regmap_bus_reg_read(void *context, unsigned int reg,
61 unsigned int *val);
62/*static*/ int _regmap_bus_read(void *context, unsigned int reg,
63 unsigned int *val);
64/*static*/ int _regmap_bus_formatted_write(void *context, unsigned int reg,
65 unsigned int val);
66/*static*/ int _regmap_bus_reg_write(void *context, unsigned int reg,
67 unsigned int val);
68/*static*/ int _regmap_bus_raw_write(void *context, unsigned int reg,
69 unsigned int val);
70
71bool regmap_reg_in_ranges(unsigned int reg,
72 const struct regmap_range *ranges,
73 unsigned int nranges)
74{
75 const struct regmap_range *r;
76 int i;
77
78 for (i = 0, r = ranges; i < nranges; i++, r++)
79 if (regmap_reg_in_range(reg, r))
80 return true;
81 return false;
82}
83EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
84
85bool regmap_check_range_table(struct regmap *map, unsigned int reg,
86 const struct regmap_access_table *table)
87{
88 /* Check "no ranges" first */
89 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
90 return false;
91
92 /* In case zero "yes ranges" are supplied, any reg is OK */
93 if (!table->n_yes_ranges)
94 return true;
95
96 return regmap_reg_in_ranges(reg, table->yes_ranges,
97 table->n_yes_ranges);
98}
99EXPORT_SYMBOL_GPL(regmap_check_range_table);
100
101bool regmap_writeable(struct regmap *map, unsigned int reg)
102{
103 if (map->max_register && reg > map->max_register)
104 return false;
105
106 if (map->writeable_reg)
107 return map->writeable_reg(map->dev, reg);
108
109 if (map->wr_table)
110 return regmap_check_range_table(map, reg, map->wr_table);
111
112 return true;
113}
114
115bool regmap_cached(struct regmap *map, unsigned int reg)
116{
117 int ret;
118 unsigned int val;
119
120 if (map->cache_type == REGCACHE_NONE)
121 return false;
122
123 if (!map->cache_ops)
124 return false;
125
126 if (map->max_register && reg > map->max_register)
127 return false;
128
129 map->lock(map->lock_arg);
130 ret = regcache_read(map, reg, &val);
131 map->unlock(map->lock_arg);
132 if (ret)
133 return false;
134
135 return true;
136}
137
138bool regmap_readable(struct regmap *map, unsigned int reg)
139{
140 if (!map->reg_read)
141 return false;
142
143 if (map->max_register && reg > map->max_register)
144 return false;
145
146 if (map->format.format_write)
147 return false;
148
149 if (map->readable_reg)
150 return map->readable_reg(map->dev, reg);
151
152 if (map->rd_table)
153 return regmap_check_range_table(map, reg, map->rd_table);
154
155 return true;
156}
157
158bool regmap_volatile(struct regmap *map, unsigned int reg)
159{
160 if (!map->format.format_write && !regmap_readable(map, reg))
161 return false;
162
163 if (map->volatile_reg)
164 return map->volatile_reg(map->dev, reg);
165
166 if (map->volatile_table)
167 return regmap_check_range_table(map, reg, map->volatile_table);
168
169 if (map->cache_ops)
170 return false;
171 else
172 return true;
173}
174
175bool regmap_precious(struct regmap *map, unsigned int reg)
176{
177 if (!regmap_readable(map, reg))
178 return false;
179
180 if (map->precious_reg)
181 return map->precious_reg(map->dev, reg);
182
183 if (map->precious_table)
184 return regmap_check_range_table(map, reg, map->precious_table);
185
186 return false;
187}
188
189bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
190{
191 if (map->writeable_noinc_reg)
192 return map->writeable_noinc_reg(map->dev, reg);
193
194 if (map->wr_noinc_table)
195 return regmap_check_range_table(map, reg, map->wr_noinc_table);
196
197 return true;
198}
199
200bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
201{
202 if (map->readable_noinc_reg)
203 return map->readable_noinc_reg(map->dev, reg);
204
205 if (map->rd_noinc_table)
206 return regmap_check_range_table(map, reg, map->rd_noinc_table);
207
208 return true;
209}
210
211/*static*/ bool regmap_volatile_range(struct regmap *map, unsigned int reg,
212 size_t num)
213{
214 unsigned int i;
215
216 for (i = 0; i < num; i++)
217 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
218 return false;
219
220 return true;
221}
222
223/*static*/ void regmap_format_12_20_write(struct regmap *map,
224 unsigned int reg, unsigned int val)
225{
226 u8 *out = map->work_buf;
227
228 out[0] = reg >> 4;
229 out[1] = (reg << 4) | (val >> 16);
230 out[2] = val >> 8;
231 out[3] = val;
232}
233
234
235/*static*/ void regmap_format_2_6_write(struct regmap *map,
236 unsigned int reg, unsigned int val)
237{
238 u8 *out = map->work_buf;
239
240 *out = (reg << 6) | val;
241}
242
243/*static*/ void regmap_format_4_12_write(struct regmap *map,
244 unsigned int reg, unsigned int val)
245{
246 __be16 *out = map->work_buf;
247 *out = cpu_to_be16((reg << 12) | val);
248}
249
250/*static*/ void regmap_format_7_9_write(struct regmap *map,
251 unsigned int reg, unsigned int val)
252{
253 __be16 *out = map->work_buf;
254 *out = cpu_to_be16((reg << 9) | val);
255}
256
257/*static*/ void regmap_format_10_14_write(struct regmap *map,
258 unsigned int reg, unsigned int val)
259{
260 u8 *out = map->work_buf;
261
262 out[2] = val;
263 out[1] = (val >> 8) | (reg << 6);
264 out[0] = reg >> 2;
265}
266
267/*static*/ void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
268{
269 u8 *b = buf;
270
271 b[0] = val << shift;
272}
273
274/*static*/ void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
275{
276 put_unaligned_be16(val << shift, buf);
277}
278
279/*static*/ void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
280{
281 put_unaligned_le16(val << shift, buf);
282}
283
284/*static*/ void regmap_format_16_native(void *buf, unsigned int val,
285 unsigned int shift)
286{
287 u16 v = val << shift;
288
289 memcpy(buf, &v, sizeof(v));
290}
291
292/*static*/ void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
293{
294 u8 *b = buf;
295
296 val <<= shift;
297
298 b[0] = val >> 16;
299 b[1] = val >> 8;
300 b[2] = val;
301}
302
303/*static*/ void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
304{
305 put_unaligned_be32(val << shift, buf);
306}
307
308/*static*/ void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
309{
310 put_unaligned_le32(val << shift, buf);
311}
312
313/*static*/ void regmap_format_32_native(void *buf, unsigned int val,
314 unsigned int shift)
315{
316 u32 v = val << shift;
317
318 memcpy(buf, &v, sizeof(v));
319}
320
321#ifdef CONFIG_64BIT
322/*static*/ void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
323{
324 put_unaligned_be64((u64) val << shift, buf);
325}
326
327/*static*/ void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
328{
329 put_unaligned_le64((u64) val << shift, buf);
330}
331
332/*static*/ void regmap_format_64_native(void *buf, unsigned int val,
333 unsigned int shift)
334{
335 u64 v = (u64) val << shift;
336
337 memcpy(buf, &v, sizeof(v));
338}
339#endif
340
341/*static*/ void regmap_parse_inplace_noop(void *buf)
342{
343}
344
345/*static*/ unsigned int regmap_parse_8(const void *buf)
346{
347 const u8 *b = buf;
348
349 return b[0];
350}
351
352/*static*/ unsigned int regmap_parse_16_be(const void *buf)
353{
354 return get_unaligned_be16(buf);
355}
356
357/*static*/ unsigned int regmap_parse_16_le(const void *buf)
358{
359 return get_unaligned_le16(buf);
360}
361
362/*static*/ void regmap_parse_16_be_inplace(void *buf)
363{
364 u16 v = get_unaligned_be16(buf);
365
366 memcpy(buf, &v, sizeof(v));
367}
368
369/*static*/ void regmap_parse_16_le_inplace(void *buf)
370{
371 u16 v = get_unaligned_le16(buf);
372
373 memcpy(buf, &v, sizeof(v));
374}
375
376/*static*/ unsigned int regmap_parse_16_native(const void *buf)
377{
378 u16 v;
379
380 memcpy(&v, buf, sizeof(v));
381 return v;
382}
383
384/*static*/ unsigned int regmap_parse_24(const void *buf)
385{
386 const u8 *b = buf;
387 unsigned int ret = b[2];
388 ret |= ((unsigned int)b[1]) << 8;
389 ret |= ((unsigned int)b[0]) << 16;
390
391 return ret;
392}
393
394/*static*/ unsigned int regmap_parse_32_be(const void *buf)
395{
396 return get_unaligned_be32(buf);
397}
398
399/*static*/ unsigned int regmap_parse_32_le(const void *buf)
400{
401 return get_unaligned_le32(buf);
402}
403
404/*static*/ void regmap_parse_32_be_inplace(void *buf)
405{
406 u32 v = get_unaligned_be32(buf);
407
408 memcpy(buf, &v, sizeof(v));
409}
410
411/*static*/ void regmap_parse_32_le_inplace(void *buf)
412{
413 u32 v = get_unaligned_le32(buf);
414
415 memcpy(buf, &v, sizeof(v));
416}
417
418/*static*/ unsigned int regmap_parse_32_native(const void *buf)
419{
420 u32 v;
421
422 memcpy(&v, buf, sizeof(v));
423 return v;
424}
425
426#ifdef CONFIG_64BIT
427/*static*/ unsigned int regmap_parse_64_be(const void *buf)
428{
429 return get_unaligned_be64(buf);
430}
431
432/*static*/ unsigned int regmap_parse_64_le(const void *buf)
433{
434 return get_unaligned_le64(buf);
435}
436
437/*static*/ void regmap_parse_64_be_inplace(void *buf)
438{
439 u64 v = get_unaligned_be64(buf);
440
441 memcpy(buf, &v, sizeof(v));
442}
443
444/*static*/ void regmap_parse_64_le_inplace(void *buf)
445{
446 u64 v = get_unaligned_le64(buf);
447
448 memcpy(buf, &v, sizeof(v));
449}
450
451/*static*/ unsigned int regmap_parse_64_native(const void *buf)
452{
453 u64 v;
454
455 memcpy(&v, buf, sizeof(v));
456 return v;
457}
458#endif
459
460/*static*/ void regmap_lock_hwlock(void *__map)
461{
462#ifndef TARGET_OS2
463 struct regmap *map = __map;
464
465 hwspin_lock_timeout(map->hwlock, UINT_MAX);
466#endif
467}
468
469/*static*/ void regmap_lock_hwlock_irq(void *__map)
470{
471#ifndef TARGET_OS2
472 struct regmap *map = __map;
473
474 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
475#endif
476}
477
478/*static*/ void regmap_lock_hwlock_irqsave(void *__map)
479{
480#ifndef TARGET_OS2
481 struct regmap *map = __map;
482
483 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
484 &map->spinlock_flags);
485#endif
486}
487
488/*static*/ void regmap_unlock_hwlock(void *__map)
489{
490#ifndef TARGET_OS2
491 struct regmap *map = __map;
492
493 hwspin_unlock(map->hwlock);
494#endif
495}
496
497/*static*/ void regmap_unlock_hwlock_irq(void *__map)
498{
499#ifndef TARGET_OS2
500 struct regmap *map = __map;
501
502 hwspin_unlock_irq(map->hwlock);
503#endif
504}
505
506/*static*/ void regmap_unlock_hwlock_irqrestore(void *__map)
507{
508#ifndef TARGET_OS2
509 struct regmap *map = __map;
510
511 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
512#endif
513}
514
515/*static*/ void regmap_lock_unlock_none(void *__map)
516{
517
518}
519
520/*static*/ void regmap_lock_mutex(void *__map)
521{
522 struct regmap *map = __map;
523 mutex_lock(&map->mutex);
524}
525
526/*static*/ void regmap_unlock_mutex(void *__map)
527{
528 struct regmap *map = __map;
529 mutex_unlock(&map->mutex);
530}
531
532/*static*/ void regmap_lock_spinlock(void *__map)
533__acquires(&map->spinlock)
534{
535 struct regmap *map = __map;
536 unsigned long flags;
537
538 spin_lock_irqsave(&map->spinlock, flags);
539 map->spinlock_flags = flags;
540}
541
542/*static*/ void regmap_unlock_spinlock(void *__map)
543__releases(&map->spinlock)
544{
545 struct regmap *map = __map;
546 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
547}
548
549/*static*/ void dev_get_regmap_release(struct device *dev, void *res)
550{
551 /*
552 * We don't actually have anything to do here; the goal here
553 * is not to manage the regmap but to provide a simple way to
554 * get the regmap back given a struct device.
555 */
556}
557
558/*static*/ bool _regmap_range_add(struct regmap *map,
559 struct regmap_range_node *data)
560{
561 struct rb_root *root = &map->range_tree;
562 struct rb_node **new = &(root->rb_node), *parent = NULL;
563
564 while (*new) {
565 struct regmap_range_node *this =
566 rb_entry(*new, struct regmap_range_node, node);
567
568 parent = *new;
569 if (data->range_max < this->range_min)
570 new = &((*new)->rb_left);
571 else if (data->range_min > this->range_max)
572 new = &((*new)->rb_right);
573 else
574 return false;
575 }
576
577 rb_link_node(&data->node, parent, new);
578 rb_insert_color(&data->node, root);
579
580 return true;
581}
582
583/*static*/ struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
584 unsigned int reg)
585{
586 struct rb_node *node = map->range_tree.rb_node;
587
588 while (node) {
589 struct regmap_range_node *this =
590 rb_entry(node, struct regmap_range_node, node);
591
592 if (reg < this->range_min)
593 node = node->rb_left;
594 else if (reg > this->range_max)
595 node = node->rb_right;
596 else
597 return this;
598 }
599
600 return NULL;
601}
602
603/*static*/ void regmap_range_exit(struct regmap *map)
604{
605 struct rb_node *next;
606 struct regmap_range_node *range_node;
607
608 next = rb_first(&map->range_tree);
609 while (next) {
610 range_node = rb_entry(next, struct regmap_range_node, node);
611 next = rb_next(&range_node->node);
612 rb_erase(&range_node->node, &map->range_tree);
613 kfree(range_node);
614 }
615
616 kfree(map->selector_work_buf);
617}
618
619/*static*/ int regmap_set_name(struct regmap *map, const struct regmap_config *config)
620{
621 if (config->name) {
622#ifndef TARGET_OS2
623 const char *name = kstrdup_const(config->name, GFP_KERNEL);
624#else
625 const char *name = config->name;
626#endif
627
628 if (!name)
629 return -ENOMEM;
630
631#ifndef TARGET_OS2
632 kfree_const(map->name);
633#else
634 kfree(map->name);
635#endif
636 map->name = name;
637 }
638
639 return 0;
640}
641
642int regmap_attach_dev(struct device *dev, struct regmap *map,
643 const struct regmap_config *config)
644{
645 struct regmap **m;
646 int ret;
647
648 map->dev = dev;
649
650 ret = regmap_set_name(map, config);
651 if (ret)
652 return ret;
653
654 regmap_debugfs_exit(map);
655 regmap_debugfs_init(map);
656
657 /* Add a devres resource for dev_get_regmap() */
658 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
659 if (!m) {
660 regmap_debugfs_exit(map);
661 return -ENOMEM;
662 }
663 *m = map;
664 devres_add(dev, m);
665
666 return 0;
667}
668EXPORT_SYMBOL_GPL(regmap_attach_dev);
669
670/*static*/ enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
671 const struct regmap_config *config)
672{
673 enum regmap_endian endian;
674
675 /* Retrieve the endianness specification from the regmap config */
676 endian = config->reg_format_endian;
677
678 /* If the regmap config specified a non-default value, use that */
679 if (endian != REGMAP_ENDIAN_DEFAULT)
680 return endian;
681
682 /* Retrieve the endianness specification from the bus config */
683 if (bus && bus->reg_format_endian_default)
684 endian = bus->reg_format_endian_default;
685
686 /* If the bus specified a non-default value, use that */
687 if (endian != REGMAP_ENDIAN_DEFAULT)
688 return endian;
689
690 /* Use this if no other value was found */
691 return REGMAP_ENDIAN_BIG;
692}
693
694enum regmap_endian regmap_get_val_endian(struct device *dev,
695 const struct regmap_bus *bus,
696 const struct regmap_config *config)
697{
698#ifndef TARGET_OS2
699 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
700#endif
701 enum regmap_endian endian;
702
703 /* Retrieve the endianness specification from the regmap config */
704 endian = config->val_format_endian;
705
706 /* If the regmap config specified a non-default value, use that */
707 if (endian != REGMAP_ENDIAN_DEFAULT)
708 return endian;
709
710#ifndef TARGET_OS2
711 /* If the firmware node exist try to get endianness from it */
712 if (fwnode_property_read_bool(fwnode, "big-endian"))
713 endian = REGMAP_ENDIAN_BIG;
714 else if (fwnode_property_read_bool(fwnode, "little-endian"))
715 endian = REGMAP_ENDIAN_LITTLE;
716 else if (fwnode_property_read_bool(fwnode, "native-endian"))
717 endian = REGMAP_ENDIAN_NATIVE;
718#endif
719 /* If the endianness was specified in fwnode, use that */
720 if (endian != REGMAP_ENDIAN_DEFAULT)
721 return endian;
722
723 /* Retrieve the endianness specification from the bus config */
724 if (bus && bus->val_format_endian_default)
725 endian = bus->val_format_endian_default;
726
727 /* If the bus specified a non-default value, use that */
728 if (endian != REGMAP_ENDIAN_DEFAULT)
729 return endian;
730
731 /* Use this if no other value was found */
732 return REGMAP_ENDIAN_BIG;
733}
734EXPORT_SYMBOL_GPL(regmap_get_val_endian);
735
736struct regmap *__regmap_init(struct device *dev,
737 const struct regmap_bus *bus,
738 void *bus_context,
739 const struct regmap_config *config,
740 struct lock_class_key *lock_key,
741 const char *lock_name)
742{
743 struct regmap *map;
744 int ret = -EINVAL;
745 enum regmap_endian reg_endian, val_endian;
746 int i, j;
747#ifdef TARGET_OS2
748 // 2020-11-17 SHL FIXME patched struct rb_root
749 struct rb_root _RB_ROOT = { NULL, };
750#endif
751
752 if (!config)
753 goto err;
754
755 map = kzalloc(sizeof(*map), GFP_KERNEL);
756 if (map == NULL) {
757 ret = -ENOMEM;
758 goto err;
759 }
760
761 ret = regmap_set_name(map, config);
762 if (ret)
763 goto err_map;
764
765 ret = -EINVAL; /* Later error paths rely on this */
766
767 if (config->disable_locking) {
768 map->lock = map->unlock = regmap_lock_unlock_none;
769 map->can_sleep = config->can_sleep;
770 regmap_debugfs_disable(map);
771 } else if (config->lock && config->unlock) {
772 map->lock = config->lock;
773 map->unlock = config->unlock;
774 map->lock_arg = config->lock_arg;
775 map->can_sleep = config->can_sleep;
776 } else if (config->use_hwlock) {
777#ifndef TARGET_OS2
778 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
779 if (!map->hwlock) {
780 ret = -ENXIO;
781 goto err_name;
782 }
783#endif
784 switch (config->hwlock_mode) {
785 case HWLOCK_IRQSTATE:
786 map->lock = regmap_lock_hwlock_irqsave;
787 map->unlock = regmap_unlock_hwlock_irqrestore;
788 break;
789 case HWLOCK_IRQ:
790 map->lock = regmap_lock_hwlock_irq;
791 map->unlock = regmap_unlock_hwlock_irq;
792 break;
793 default:
794 map->lock = regmap_lock_hwlock;
795 map->unlock = regmap_unlock_hwlock;
796 break;
797 }
798
799 map->lock_arg = map;
800 } else {
801 if ((bus && bus->fast_io) ||
802 config->fast_io) {
803 spin_lock_init(&map->spinlock);
804 map->lock = regmap_lock_spinlock;
805 map->unlock = regmap_unlock_spinlock;
806 lockdep_set_class_and_name(&map->spinlock,
807 lock_key, lock_name);
808 } else {
809 mutex_init(&map->mutex);
810 map->lock = regmap_lock_mutex;
811 map->unlock = regmap_unlock_mutex;
812 map->can_sleep = true;
813 lockdep_set_class_and_name(&map->mutex,
814 lock_key, lock_name);
815 }
816 map->lock_arg = map;
817 }
818
819 /*
820 * When we write in fast-paths with regmap_bulk_write() don't allocate
821 * scratch buffers with sleeping allocations.
822 */
823 if ((bus && bus->fast_io) || config->fast_io)
824 map->alloc_flags = GFP_ATOMIC;
825 else
826 map->alloc_flags = GFP_KERNEL;
827
828 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
829 map->format.pad_bytes = config->pad_bits / 8;
830 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
831 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
832 config->val_bits + config->pad_bits, 8);
833 map->reg_shift = config->pad_bits % 8;
834 if (config->reg_stride)
835 map->reg_stride = config->reg_stride;
836 else
837 map->reg_stride = 1;
838 if (is_power_of_2(map->reg_stride))
839 map->reg_stride_order = ilog2(map->reg_stride);
840 else
841 map->reg_stride_order = -1;
842 map->use_single_read = config->use_single_read || !bus || !bus->read;
843 map->use_single_write = config->use_single_write || !bus || !bus->write;
844 map->can_multi_write = config->can_multi_write && bus && bus->write;
845 if (bus) {
846 map->max_raw_read = bus->max_raw_read;
847 map->max_raw_write = bus->max_raw_write;
848 }
849 map->dev = dev;
850 map->bus = bus;
851 map->bus_context = bus_context;
852 map->max_register = config->max_register;
853 map->wr_table = config->wr_table;
854 map->rd_table = config->rd_table;
855 map->volatile_table = config->volatile_table;
856 map->precious_table = config->precious_table;
857 map->wr_noinc_table = config->wr_noinc_table;
858 map->rd_noinc_table = config->rd_noinc_table;
859 map->writeable_reg = config->writeable_reg;
860 map->readable_reg = config->readable_reg;
861 map->volatile_reg = config->volatile_reg;
862 map->precious_reg = config->precious_reg;
863 map->writeable_noinc_reg = config->writeable_noinc_reg;
864 map->readable_noinc_reg = config->readable_noinc_reg;
865 map->cache_type = config->cache_type;
866
867 spin_lock_init(&map->async_lock);
868 INIT_LIST_HEAD(&map->async_list);
869 INIT_LIST_HEAD(&map->async_free);
870 init_waitqueue_head(&map->async_waitq);
871
872 if (config->read_flag_mask ||
873 config->write_flag_mask ||
874 config->zero_flag_mask) {
875 map->read_flag_mask = config->read_flag_mask;
876 map->write_flag_mask = config->write_flag_mask;
877 } else if (bus) {
878 map->read_flag_mask = bus->read_flag_mask;
879 }
880
881 if (!bus) {
882 map->reg_read = config->reg_read;
883 map->reg_write = config->reg_write;
884
885 map->defer_caching = false;
886 goto skip_format_initialization;
887 } else if (!bus->read || !bus->write) {
888 map->reg_read = _regmap_bus_reg_read;
889 map->reg_write = _regmap_bus_reg_write;
890 map->reg_update_bits = bus->reg_update_bits;
891
892 map->defer_caching = false;
893 goto skip_format_initialization;
894 } else {
895 map->reg_read = _regmap_bus_read;
896 map->reg_update_bits = bus->reg_update_bits;
897 }
898
899 reg_endian = regmap_get_reg_endian(bus, config);
900 val_endian = regmap_get_val_endian(dev, bus, config);
901
902 switch (config->reg_bits + map->reg_shift) {
903 case 2:
904 switch (config->val_bits) {
905 case 6:
906 map->format.format_write = regmap_format_2_6_write;
907 break;
908 default:
909 goto err_hwlock;
910 }
911 break;
912
913 case 4:
914 switch (config->val_bits) {
915 case 12:
916 map->format.format_write = regmap_format_4_12_write;
917 break;
918 default:
919 goto err_hwlock;
920 }
921 break;
922
923 case 7:
924 switch (config->val_bits) {
925 case 9:
926 map->format.format_write = regmap_format_7_9_write;
927 break;
928 default:
929 goto err_hwlock;
930 }
931 break;
932
933 case 10:
934 switch (config->val_bits) {
935 case 14:
936 map->format.format_write = regmap_format_10_14_write;
937 break;
938 default:
939 goto err_hwlock;
940 }
941 break;
942
943 case 12:
944 switch (config->val_bits) {
945 case 20:
946 map->format.format_write = regmap_format_12_20_write;
947 break;
948 default:
949 goto err_hwlock;
950 }
951 break;
952
953 case 8:
954 map->format.format_reg = regmap_format_8;
955 break;
956
957 case 16:
958 switch (reg_endian) {
959 case REGMAP_ENDIAN_BIG:
960 map->format.format_reg = regmap_format_16_be;
961 break;
962 case REGMAP_ENDIAN_LITTLE:
963 map->format.format_reg = regmap_format_16_le;
964 break;
965 case REGMAP_ENDIAN_NATIVE:
966 map->format.format_reg = regmap_format_16_native;
967 break;
968 default:
969 goto err_hwlock;
970 }
971 break;
972
973 case 24:
974 if (reg_endian != REGMAP_ENDIAN_BIG)
975 goto err_hwlock;
976 map->format.format_reg = regmap_format_24;
977 break;
978
979 case 32:
980 switch (reg_endian) {
981 case REGMAP_ENDIAN_BIG:
982 map->format.format_reg = regmap_format_32_be;
983 break;
984 case REGMAP_ENDIAN_LITTLE:
985 map->format.format_reg = regmap_format_32_le;
986 break;
987 case REGMAP_ENDIAN_NATIVE:
988 map->format.format_reg = regmap_format_32_native;
989 break;
990 default:
991 goto err_hwlock;
992 }
993 break;
994
995#ifdef CONFIG_64BIT
996 case 64:
997 switch (reg_endian) {
998 case REGMAP_ENDIAN_BIG:
999 map->format.format_reg = regmap_format_64_be;
1000 break;
1001 case REGMAP_ENDIAN_LITTLE:
1002 map->format.format_reg = regmap_format_64_le;
1003 break;
1004 case REGMAP_ENDIAN_NATIVE:
1005 map->format.format_reg = regmap_format_64_native;
1006 break;
1007 default:
1008 goto err_hwlock;
1009 }
1010 break;
1011#endif
1012
1013 default:
1014 goto err_hwlock;
1015 }
1016
1017 if (val_endian == REGMAP_ENDIAN_NATIVE)
1018 map->format.parse_inplace = regmap_parse_inplace_noop;
1019
1020 switch (config->val_bits) {
1021 case 8:
1022 map->format.format_val = regmap_format_8;
1023 map->format.parse_val = regmap_parse_8;
1024 map->format.parse_inplace = regmap_parse_inplace_noop;
1025 break;
1026 case 16:
1027 switch (val_endian) {
1028 case REGMAP_ENDIAN_BIG:
1029 map->format.format_val = regmap_format_16_be;
1030 map->format.parse_val = regmap_parse_16_be;
1031 map->format.parse_inplace = regmap_parse_16_be_inplace;
1032 break;
1033 case REGMAP_ENDIAN_LITTLE:
1034 map->format.format_val = regmap_format_16_le;
1035 map->format.parse_val = regmap_parse_16_le;
1036 map->format.parse_inplace = regmap_parse_16_le_inplace;
1037 break;
1038 case REGMAP_ENDIAN_NATIVE:
1039 map->format.format_val = regmap_format_16_native;
1040 map->format.parse_val = regmap_parse_16_native;
1041 break;
1042 default:
1043 goto err_hwlock;
1044 }
1045 break;
1046 case 24:
1047 if (val_endian != REGMAP_ENDIAN_BIG)
1048 goto err_hwlock;
1049 map->format.format_val = regmap_format_24;
1050 map->format.parse_val = regmap_parse_24;
1051 break;
1052 case 32:
1053 switch (val_endian) {
1054 case REGMAP_ENDIAN_BIG:
1055 map->format.format_val = regmap_format_32_be;
1056 map->format.parse_val = regmap_parse_32_be;
1057 map->format.parse_inplace = regmap_parse_32_be_inplace;
1058 break;
1059 case REGMAP_ENDIAN_LITTLE:
1060 map->format.format_val = regmap_format_32_le;
1061 map->format.parse_val = regmap_parse_32_le;
1062 map->format.parse_inplace = regmap_parse_32_le_inplace;
1063 break;
1064 case REGMAP_ENDIAN_NATIVE:
1065 map->format.format_val = regmap_format_32_native;
1066 map->format.parse_val = regmap_parse_32_native;
1067 break;
1068 default:
1069 goto err_hwlock;
1070 }
1071 break;
1072#ifdef CONFIG_64BIT
1073 case 64:
1074 switch (val_endian) {
1075 case REGMAP_ENDIAN_BIG:
1076 map->format.format_val = regmap_format_64_be;
1077 map->format.parse_val = regmap_parse_64_be;
1078 map->format.parse_inplace = regmap_parse_64_be_inplace;
1079 break;
1080 case REGMAP_ENDIAN_LITTLE:
1081 map->format.format_val = regmap_format_64_le;
1082 map->format.parse_val = regmap_parse_64_le;
1083 map->format.parse_inplace = regmap_parse_64_le_inplace;
1084 break;
1085 case REGMAP_ENDIAN_NATIVE:
1086 map->format.format_val = regmap_format_64_native;
1087 map->format.parse_val = regmap_parse_64_native;
1088 break;
1089 default:
1090 goto err_hwlock;
1091 }
1092 break;
1093#endif
1094 }
1095
1096 if (map->format.format_write) {
1097 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1098 (val_endian != REGMAP_ENDIAN_BIG))
1099 goto err_hwlock;
1100 map->use_single_write = true;
1101 }
1102
1103 if (!map->format.format_write &&
1104 !(map->format.format_reg && map->format.format_val))
1105 goto err_hwlock;
1106
1107 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1108 if (map->work_buf == NULL) {
1109 ret = -ENOMEM;
1110 goto err_hwlock;
1111 }
1112
1113 if (map->format.format_write) {
1114 map->defer_caching = false;
1115 map->reg_write = _regmap_bus_formatted_write;
1116 } else if (map->format.format_val) {
1117 map->defer_caching = true;
1118 map->reg_write = _regmap_bus_raw_write;
1119 }
1120
1121skip_format_initialization:
1122
1123#ifndef TARGET_OS2
1124 map->range_tree = RB_ROOT;
1125#else
1126 map->range_tree = _RB_ROOT;
1127 map->range_tree.rb_node = NULL;
1128 memset(&map->range_tree, 0, sizeof(struct rb_root));
1129#endif
1130 for (i = 0; i < config->num_ranges; i++) {
1131 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1132 struct regmap_range_node *new;
1133
1134 /* Sanity check */
1135 if (range_cfg->range_max < range_cfg->range_min) {
1136 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1137 range_cfg->range_max, range_cfg->range_min);
1138 goto err_range;
1139 }
1140
1141 if (range_cfg->range_max > map->max_register) {
1142 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1143 range_cfg->range_max, map->max_register);
1144 goto err_range;
1145 }
1146
1147 if (range_cfg->selector_reg > map->max_register) {
1148 dev_err(map->dev,
1149 "Invalid range %d: selector out of map\n", i);
1150 goto err_range;
1151 }
1152
1153 if (range_cfg->window_len == 0) {
1154 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1155 i);
1156 goto err_range;
1157 }
1158
1159 /* Make sure, that this register range has no selector
1160 or data window within its boundary */
1161 for (j = 0; j < config->num_ranges; j++) {
1162 unsigned sel_reg = config->ranges[j].selector_reg;
1163 unsigned win_min = config->ranges[j].window_start;
1164 unsigned win_max = win_min +
1165 config->ranges[j].window_len - 1;
1166
1167 /* Allow data window inside its own virtual range */
1168 if (j == i)
1169 continue;
1170
1171 if (range_cfg->range_min <= sel_reg &&
1172 sel_reg <= range_cfg->range_max) {
1173 dev_err(map->dev,
1174 "Range %d: selector for %d in window\n",
1175 i, j);
1176 goto err_range;
1177 }
1178
1179 if (!(win_max < range_cfg->range_min ||
1180 win_min > range_cfg->range_max)) {
1181 dev_err(map->dev,
1182 "Range %d: window for %d in window\n",
1183 i, j);
1184 goto err_range;
1185 }
1186 }
1187
1188 new = kzalloc(sizeof(*new), GFP_KERNEL);
1189 if (new == NULL) {
1190 ret = -ENOMEM;
1191 goto err_range;
1192 }
1193
1194 new->map = map;
1195 new->name = range_cfg->name;
1196 new->range_min = range_cfg->range_min;
1197 new->range_max = range_cfg->range_max;
1198 new->selector_reg = range_cfg->selector_reg;
1199 new->selector_mask = range_cfg->selector_mask;
1200 new->selector_shift = range_cfg->selector_shift;
1201 new->window_start = range_cfg->window_start;
1202 new->window_len = range_cfg->window_len;
1203
1204 if (!_regmap_range_add(map, new)) {
1205 dev_err(map->dev, "Failed to add range %d\n", i);
1206 kfree(new);
1207 goto err_range;
1208 }
1209
1210 if (map->selector_work_buf == NULL) {
1211 map->selector_work_buf =
1212 kzalloc(map->format.buf_size, GFP_KERNEL);
1213 if (map->selector_work_buf == NULL) {
1214 ret = -ENOMEM;
1215 goto err_range;
1216 }
1217 }
1218 }
1219
1220 ret = regcache_init(map, config);
1221 if (ret != 0)
1222 goto err_range;
1223
1224 if (dev) {
1225 ret = regmap_attach_dev(dev, map, config);
1226 if (ret != 0)
1227 goto err_regcache;
1228 } else {
1229 regmap_debugfs_init(map);
1230 }
1231
1232 return map;
1233
1234err_regcache:
1235 regcache_exit(map);
1236err_range:
1237 regmap_range_exit(map);
1238 kfree(map->work_buf);
1239err_hwlock:
1240#ifndef TARGET_OS2
1241 if (map->hwlock)
1242 hwspin_lock_free(map->hwlock);
1243err_name:
1244 kfree_const(map->name);
1245#endif
1246err_map:
1247 kfree(map);
1248err:
1249 return ERR_PTR(ret);
1250}
1251EXPORT_SYMBOL_GPL(__regmap_init);
1252
1253#ifndef TARGET_OS2
1254/*static*/ void devm_regmap_release(struct device *dev, void *res)
1255{
1256 regmap_exit(*(struct regmap **)res);
1257}
1258
1259struct regmap *__devm_regmap_init(struct device *dev,
1260 const struct regmap_bus *bus,
1261 void *bus_context,
1262 const struct regmap_config *config,
1263 struct lock_class_key *lock_key,
1264 const char *lock_name)
1265{
1266 struct regmap **ptr, *regmap;
1267
1268 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1269 if (!ptr)
1270 return ERR_PTR(-ENOMEM);
1271
1272 regmap = __regmap_init(dev, bus, bus_context, config,
1273 lock_key, lock_name);
1274 if (!IS_ERR(regmap)) {
1275 *ptr = regmap;
1276 devres_add(dev, ptr);
1277 } else {
1278 devres_free(ptr);
1279 }
1280
1281 return regmap;
1282}
1283EXPORT_SYMBOL_GPL(__devm_regmap_init);
1284#endif
1285
1286/*static*/ void regmap_field_init(struct regmap_field *rm_field,
1287 struct regmap *regmap, struct reg_field reg_field)
1288{
1289 rm_field->regmap = regmap;
1290 rm_field->reg = reg_field.reg;
1291 rm_field->shift = reg_field.lsb;
1292 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1293 rm_field->id_size = reg_field.id_size;
1294 rm_field->id_offset = reg_field.id_offset;
1295}
1296
1297#ifndef TARGET_OS2
1298/**
1299 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1300 *
1301 * @dev: Device that will be interacted with
1302 * @regmap: regmap bank in which this register field is located.
1303 * @reg_field: Register field with in the bank.
1304 *
1305 * The return value will be an ERR_PTR() on error or a valid pointer
1306 * to a struct regmap_field. The regmap_field will be automatically freed
1307 * by the device management code.
1308 */
1309struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1310 struct regmap *regmap, struct reg_field reg_field)
1311{
1312 struct regmap_field *rm_field = devm_kzalloc(dev,
1313 sizeof(*rm_field), GFP_KERNEL);
1314 if (!rm_field)
1315 return ERR_PTR(-ENOMEM);
1316
1317 regmap_field_init(rm_field, regmap, reg_field);
1318
1319 return rm_field;
1320
1321}
1322EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1323#endif
1324
1325/**
1326 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1327 *
1328 * @regmap: regmap bank in which this register field is located.
1329 * @rm_field: regmap register fields within the bank.
1330 * @reg_field: Register fields within the bank.
1331 * @num_fields: Number of register fields.
1332 *
1333 * The return value will be an -ENOMEM on error or zero for success.
1334 * Newly allocated regmap_fields should be freed by calling
1335 * regmap_field_bulk_free()
1336 */
1337int regmap_field_bulk_alloc(struct regmap *regmap,
1338 struct regmap_field **rm_field,
1339 struct reg_field *reg_field,
1340 int num_fields)
1341{
1342 struct regmap_field *rf;
1343 int i;
1344
1345 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1346 if (!rf)
1347 return -ENOMEM;
1348
1349 for (i = 0; i < num_fields; i++) {
1350 regmap_field_init(&rf[i], regmap, reg_field[i]);
1351 rm_field[i] = &rf[i];
1352 }
1353
1354 return 0;
1355}
1356EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1357
1358#ifndef TARGET_OS2
1359/**
1360 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1361 * fields.
1362 *
1363 * @dev: Device that will be interacted with
1364 * @regmap: regmap bank in which this register field is located.
1365 * @rm_field: regmap register fields within the bank.
1366 * @reg_field: Register fields within the bank.
1367 * @num_fields: Number of register fields.
1368 *
1369 * The return value will be an -ENOMEM on error or zero for success.
1370 * Newly allocated regmap_fields will be automatically freed by the
1371 * device management code.
1372 */
1373int devm_regmap_field_bulk_alloc(struct device *dev,
1374 struct regmap *regmap,
1375 struct regmap_field **rm_field,
1376 struct reg_field *reg_field,
1377 int num_fields)
1378{
1379 struct regmap_field *rf;
1380 int i;
1381
1382 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1383 if (!rf)
1384 return -ENOMEM;
1385
1386 for (i = 0; i < num_fields; i++) {
1387 regmap_field_init(&rf[i], regmap, reg_field[i]);
1388 rm_field[i] = &rf[i];
1389 }
1390
1391 return 0;
1392}
1393EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1394#endif
1395
1396/**
1397 * regmap_field_bulk_free() - Free register field allocated using
1398 * regmap_field_bulk_alloc.
1399 *
1400 * @field: regmap fields which should be freed.
1401 */
1402void regmap_field_bulk_free(struct regmap_field *field)
1403{
1404 kfree(field);
1405}
1406EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1407
1408#ifndef TARGET_OS2
1409/**
1410 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1411 * devm_regmap_field_bulk_alloc.
1412 *
1413 * @dev: Device that will be interacted with
1414 * @field: regmap field which should be freed.
1415 *
1416 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1417 * drivers need not call this function, as the memory allocated via devm
1418 * will be freed as per device-driver life-cycle.
1419 */
1420void devm_regmap_field_bulk_free(struct device *dev,
1421 struct regmap_field *field)
1422{
1423 devm_kfree(dev, field);
1424}
1425EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1426
1427/**
1428 * devm_regmap_field_free() - Free a register field allocated using
1429 * devm_regmap_field_alloc.
1430 *
1431 * @dev: Device that will be interacted with
1432 * @field: regmap field which should be freed.
1433 *
1434 * Free register field allocated using devm_regmap_field_alloc(). Usually
1435 * drivers need not call this function, as the memory allocated via devm
1436 * will be freed as per device-driver life-cyle.
1437 */
1438void devm_regmap_field_free(struct device *dev,
1439 struct regmap_field *field)
1440{
1441 devm_kfree(dev, field);
1442}
1443EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1444#endif
1445
1446/**
1447 * regmap_field_alloc() - Allocate and initialise a register field.
1448 *
1449 * @regmap: regmap bank in which this register field is located.
1450 * @reg_field: Register field with in the bank.
1451 *
1452 * The return value will be an ERR_PTR() on error or a valid pointer
1453 * to a struct regmap_field. The regmap_field should be freed by the
1454 * user once its finished working with it using regmap_field_free().
1455 */
1456struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1457 struct reg_field reg_field)
1458{
1459 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1460
1461 if (!rm_field)
1462 return ERR_PTR(-ENOMEM);
1463
1464 regmap_field_init(rm_field, regmap, reg_field);
1465
1466 return rm_field;
1467}
1468EXPORT_SYMBOL_GPL(regmap_field_alloc);
1469
1470/**
1471 * regmap_field_free() - Free register field allocated using
1472 * regmap_field_alloc.
1473 *
1474 * @field: regmap field which should be freed.
1475 */
1476void regmap_field_free(struct regmap_field *field)
1477{
1478 kfree(field);
1479}
1480EXPORT_SYMBOL_GPL(regmap_field_free);
1481
1482/**
1483 * regmap_reinit_cache() - Reinitialise the current register cache
1484 *
1485 * @map: Register map to operate on.
1486 * @config: New configuration. Only the cache data will be used.
1487 *
1488 * Discard any existing register cache for the map and initialize a
1489 * new cache. This can be used to restore the cache to defaults or to
1490 * update the cache configuration to reflect runtime discovery of the
1491 * hardware.
1492 *
1493 * No explicit locking is done here, the user needs to ensure that
1494 * this function will not race with other calls to regmap.
1495 */
1496int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1497{
1498 int ret;
1499
1500 regcache_exit(map);
1501 regmap_debugfs_exit(map);
1502
1503 map->max_register = config->max_register;
1504 map->writeable_reg = config->writeable_reg;
1505 map->readable_reg = config->readable_reg;
1506 map->volatile_reg = config->volatile_reg;
1507 map->precious_reg = config->precious_reg;
1508 map->writeable_noinc_reg = config->writeable_noinc_reg;
1509 map->readable_noinc_reg = config->readable_noinc_reg;
1510 map->cache_type = config->cache_type;
1511
1512 ret = regmap_set_name(map, config);
1513 if (ret)
1514 return ret;
1515
1516 regmap_debugfs_init(map);
1517
1518 map->cache_bypass = false;
1519 map->cache_only = false;
1520
1521 return regcache_init(map, config);
1522}
1523EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1524
1525/**
1526 * regmap_exit() - Free a previously allocated register map
1527 *
1528 * @map: Register map to operate on.
1529 */
1530void regmap_exit(struct regmap *map)
1531{
1532 struct regmap_async *async;
1533
1534 regcache_exit(map);
1535 regmap_debugfs_exit(map);
1536 regmap_range_exit(map);
1537 if (map->bus && map->bus->free_context)
1538 map->bus->free_context(map->bus_context);
1539 kfree(map->work_buf);
1540 while (!list_empty(&map->async_free)) {
1541 async = list_first_entry_or_null(&map->async_free,
1542 struct regmap_async,
1543 list);
1544 list_del(&async->list);
1545 kfree(async->work_buf);
1546 kfree(async);
1547 }
1548#ifndef TARGET_OS2
1549 if (map->hwlock)
1550 hwspin_lock_free(map->hwlock);
1551#endif
1552 if (map->lock == regmap_lock_mutex)
1553 mutex_destroy(&map->mutex);
1554#ifndef TARGET_OS2
1555 kfree_const(map->name);
1556#else
1557 kfree(map->name);
1558#endif
1559 kfree(map->patch);
1560 kfree(map);
1561}
1562EXPORT_SYMBOL_GPL(regmap_exit);
1563
1564/*static*/ int dev_get_regmap_match(struct device *dev, void *res, void *data)
1565{
1566 struct regmap **r = res;
1567 if (!r || !*r) {
1568 WARN_ON(!r || !*r);
1569 return 0;
1570 }
1571
1572 /* If the user didn't specify a name match any */
1573 if (data)
1574 return !strcmp((*r)->name, data);
1575 else
1576 return 1;
1577}
1578
1579/**
1580 * dev_get_regmap() - Obtain the regmap (if any) for a device
1581 *
1582 * @dev: Device to retrieve the map for
1583 * @name: Optional name for the register map, usually NULL.
1584 *
1585 * Returns the regmap for the device if one is present, or NULL. If
1586 * name is specified then it must match the name specified when
1587 * registering the device, if it is NULL then the first regmap found
1588 * will be used. Devices with multiple register maps are very rare,
1589 * generic code should normally not need to specify a name.
1590 */
1591struct regmap *dev_get_regmap(struct device *dev, const char *name)
1592{
1593 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1594 dev_get_regmap_match, (void *)name);
1595
1596 if (!r)
1597 return NULL;
1598 return *r;
1599}
1600EXPORT_SYMBOL_GPL(dev_get_regmap);
1601
1602/**
1603 * regmap_get_device() - Obtain the device from a regmap
1604 *
1605 * @map: Register map to operate on.
1606 *
1607 * Returns the underlying device that the regmap has been created for.
1608 */
1609struct device *regmap_get_device(struct regmap *map)
1610{
1611 return map->dev;
1612}
1613EXPORT_SYMBOL_GPL(regmap_get_device);
1614
1615/*static*/ int _regmap_select_page(struct regmap *map, unsigned int *reg,
1616 struct regmap_range_node *range,
1617 unsigned int val_num)
1618{
1619 void *orig_work_buf;
1620 unsigned int win_offset;
1621 unsigned int win_page;
1622 bool page_chg;
1623 int ret;
1624
1625 win_offset = (*reg - range->range_min) % range->window_len;
1626 win_page = (*reg - range->range_min) / range->window_len;
1627
1628 if (val_num > 1) {
1629 /* Bulk write shouldn't cross range boundary */
1630 if (*reg + val_num - 1 > range->range_max)
1631 return -EINVAL;
1632
1633 /* ... or single page boundary */
1634 if (val_num > range->window_len - win_offset)
1635 return -EINVAL;
1636 }
1637
1638 /* It is possible to have selector register inside data window.
1639 In that case, selector register is located on every page and
1640 it needs no page switching, when accessed alone. */
1641 if (val_num > 1 ||
1642 range->window_start + win_offset != range->selector_reg) {
1643 /* Use separate work_buf during page switching */
1644 orig_work_buf = map->work_buf;
1645 map->work_buf = map->selector_work_buf;
1646
1647 ret = _regmap_update_bits(map, range->selector_reg,
1648 range->selector_mask,
1649 win_page << range->selector_shift,
1650 &page_chg, false);
1651
1652 map->work_buf = orig_work_buf;
1653
1654 if (ret != 0)
1655 return ret;
1656 }
1657
1658 *reg = range->window_start + win_offset;
1659
1660 return 0;
1661}
1662
1663/*static*/ void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1664 unsigned long mask)
1665{
1666 u8 *buf;
1667 int i;
1668
1669 if (!mask || !map->work_buf)
1670 return;
1671
1672 buf = map->work_buf;
1673
1674 for (i = 0; i < max_bytes; i++)
1675 buf[i] |= (mask >> (8 * i)) & 0xff;
1676}
1677
1678/*static*/ int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1679 const void *val, size_t val_len, bool noinc)
1680{
1681 struct regmap_range_node *range;
1682 unsigned long flags;
1683 void *work_val = map->work_buf + map->format.reg_bytes +
1684 map->format.pad_bytes;
1685 void *buf;
1686 int ret = -ENOTSUPP;
1687 size_t len;
1688 int i;
1689
1690 WARN_ON(!map->bus);
1691
1692 /* Check for unwritable or noinc registers in range
1693 * before we start
1694 */
1695 if (!regmap_writeable_noinc(map, reg)) {
1696 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1697 unsigned int element =
1698 reg + regmap_get_offset(map, i);
1699 if (!regmap_writeable(map, element) ||
1700 regmap_writeable_noinc(map, element))
1701 return -EINVAL;
1702 }
1703 }
1704
1705 if (!map->cache_bypass && map->format.parse_val) {
1706 unsigned int ival;
1707 int val_bytes = map->format.val_bytes;
1708 for (i = 0; i < val_len / val_bytes; i++) {
1709 ival = map->format.parse_val(val + (i * val_bytes));
1710 ret = regcache_write(map,
1711 reg + regmap_get_offset(map, i),
1712 ival);
1713 if (ret) {
1714 dev_err(map->dev,
1715 "Error in caching of register: %x ret: %d\n",
1716 reg + i, ret);
1717 return ret;
1718 }
1719 }
1720 if (map->cache_only) {
1721 map->cache_dirty = true;
1722 return 0;
1723 }
1724 }
1725
1726 range = _regmap_range_lookup(map, reg);
1727 if (range) {
1728 int val_num = val_len / map->format.val_bytes;
1729 int win_offset = (reg - range->range_min) % range->window_len;
1730 int win_residue = range->window_len - win_offset;
1731
1732 /* If the write goes beyond the end of the window split it */
1733 while (val_num > win_residue) {
1734 dev_dbg(map->dev, "Writing window %d/%zu\n",
1735 win_residue, val_len / map->format.val_bytes);
1736 ret = _regmap_raw_write_impl(map, reg, val,
1737 win_residue *
1738 map->format.val_bytes, noinc);
1739 if (ret != 0)
1740 return ret;
1741
1742 reg += win_residue;
1743 val_num -= win_residue;
1744 val += win_residue * map->format.val_bytes;
1745 val_len -= win_residue * map->format.val_bytes;
1746
1747 win_offset = (reg - range->range_min) %
1748 range->window_len;
1749 win_residue = range->window_len - win_offset;
1750 }
1751
1752 ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1753 if (ret != 0)
1754 return ret;
1755 }
1756
1757 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1758 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1759 map->write_flag_mask);
1760
1761 /*
1762 * Essentially all I/O mechanisms will be faster with a single
1763 * buffer to write. Since register syncs often generate raw
1764 * writes of single registers optimise that case.
1765 */
1766 if (val != work_val && val_len == map->format.val_bytes) {
1767 memcpy(work_val, val, map->format.val_bytes);
1768 val = work_val;
1769 }
1770
1771 if (map->async && map->bus->async_write) {
1772 struct regmap_async *async;
1773
1774 spin_lock_irqsave(&map->async_lock, flags);
1775 async = list_first_entry_or_null(&map->async_free,
1776 struct regmap_async,
1777 list);
1778 if (async)
1779 list_del(&async->list);
1780 spin_unlock_irqrestore(&map->async_lock, flags);
1781
1782 if (!async) {
1783 async = map->bus->async_alloc();
1784 if (!async)
1785 return -ENOMEM;
1786
1787 async->work_buf = kzalloc(map->format.buf_size,
1788 GFP_KERNEL | GFP_DMA);
1789 if (!async->work_buf) {
1790 kfree(async);
1791 return -ENOMEM;
1792 }
1793 }
1794
1795 async->map = map;
1796
1797 /* If the caller supplied the value we can use it safely. */
1798 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1799 map->format.reg_bytes + map->format.val_bytes);
1800
1801 spin_lock_irqsave(&map->async_lock, flags);
1802 list_add_tail(&async->list, &map->async_list);
1803 spin_unlock_irqrestore(&map->async_lock, flags);
1804
1805 if (val != work_val)
1806 ret = map->bus->async_write(map->bus_context,
1807 async->work_buf,
1808 map->format.reg_bytes +
1809 map->format.pad_bytes,
1810 val, val_len, async);
1811 else
1812 ret = map->bus->async_write(map->bus_context,
1813 async->work_buf,
1814 map->format.reg_bytes +
1815 map->format.pad_bytes +
1816 val_len, NULL, 0, async);
1817
1818 if (ret != 0) {
1819 dev_err(map->dev, "Failed to schedule write: %d\n",
1820 ret);
1821
1822 spin_lock_irqsave(&map->async_lock, flags);
1823 list_move(&async->list, &map->async_free);
1824 spin_unlock_irqrestore(&map->async_lock, flags);
1825 }
1826
1827 return ret;
1828 }
1829
1830 /* If we're doing a single register write we can probably just
1831 * send the work_buf directly, otherwise try to do a gather
1832 * write.
1833 */
1834 if (val == work_val)
1835 ret = map->bus->write(map->bus_context, map->work_buf,
1836 map->format.reg_bytes +
1837 map->format.pad_bytes +
1838 val_len);
1839 else if (map->bus->gather_write)
1840 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1841 map->format.reg_bytes +
1842 map->format.pad_bytes,
1843 val, val_len);
1844 else
1845 ret = -ENOTSUPP;
1846
1847 /* If that didn't work fall back on linearising by hand. */
1848 if (ret == -ENOTSUPP) {
1849 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1850 buf = kzalloc(len, GFP_KERNEL);
1851 if (!buf)
1852 return -ENOMEM;
1853
1854 memcpy(buf, map->work_buf, map->format.reg_bytes);
1855 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1856 val, val_len);
1857 ret = map->bus->write(map->bus_context, buf, len);
1858
1859 kfree(buf);
1860 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1861 /* regcache_drop_region() takes lock that we already have,
1862 * thus call map->cache_ops->drop() directly
1863 */
1864 if (map->cache_ops && map->cache_ops->drop)
1865 map->cache_ops->drop(map, reg, reg + 1);
1866 }
1867
1868 return ret;
1869}
1870
1871/**
1872 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1873 *
1874 * @map: Map to check.
1875 */
1876bool regmap_can_raw_write(struct regmap *map)
1877{
1878 return map->bus && map->bus->write && map->format.format_val &&
1879 map->format.format_reg;
1880}
1881EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1882
1883/**
1884 * regmap_get_raw_read_max - Get the maximum size we can read
1885 *
1886 * @map: Map to check.
1887 */
1888size_t regmap_get_raw_read_max(struct regmap *map)
1889{
1890 return map->max_raw_read;
1891}
1892EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1893
1894/**
1895 * regmap_get_raw_write_max - Get the maximum size we can read
1896 *
1897 * @map: Map to check.
1898 */
1899size_t regmap_get_raw_write_max(struct regmap *map)
1900{
1901 return map->max_raw_write;
1902}
1903EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1904
1905/*static*/ int _regmap_bus_formatted_write(void *context, unsigned int reg,
1906 unsigned int val)
1907{
1908 int ret;
1909 struct regmap_range_node *range;
1910 struct regmap *map = context;
1911
1912 WARN_ON(!map->bus || !map->format.format_write);
1913
1914 range = _regmap_range_lookup(map, reg);
1915 if (range) {
1916 ret = _regmap_select_page(map, &reg, range, 1);
1917 if (ret != 0)
1918 return ret;
1919 }
1920
1921 map->format.format_write(map, reg, val);
1922
1923 ret = map->bus->write(map->bus_context, map->work_buf,
1924 map->format.buf_size);
1925
1926 return ret;
1927}
1928
1929/*static*/ int _regmap_bus_reg_write(void *context, unsigned int reg,
1930 unsigned int val)
1931{
1932 struct regmap *map = context;
1933
1934 return map->bus->reg_write(map->bus_context, reg, val);
1935}
1936
1937/*static*/ int _regmap_bus_raw_write(void *context, unsigned int reg,
1938 unsigned int val)
1939{
1940 struct regmap *map = context;
1941
1942 WARN_ON(!map->bus || !map->format.format_val);
1943
1944 map->format.format_val(map->work_buf + map->format.reg_bytes
1945 + map->format.pad_bytes, val, 0);
1946 return _regmap_raw_write_impl(map, reg,
1947 map->work_buf +
1948 map->format.reg_bytes +
1949 map->format.pad_bytes,
1950 map->format.val_bytes,
1951 false);
1952}
1953
1954/*static*/ inline void *_regmap_map_get_context(struct regmap *map)
1955{
1956 return (map->bus) ? map : map->bus_context;
1957}
1958
1959int _regmap_write(struct regmap *map, unsigned int reg,
1960 unsigned int val)
1961{
1962 int ret;
1963 void *context = _regmap_map_get_context(map);
1964
1965 if (!regmap_writeable(map, reg))
1966 return -EIO;
1967
1968 if (!map->cache_bypass && !map->defer_caching) {
1969 ret = regcache_write(map, reg, val);
1970 if (ret != 0)
1971 return ret;
1972 if (map->cache_only) {
1973 map->cache_dirty = true;
1974 return 0;
1975 }
1976 }
1977
1978 if (regmap_should_log(map))
1979 dev_info(map->dev, "%x <= %x\n", reg, val);
1980
1981 return map->reg_write(context, reg, val);
1982}
1983
1984#ifdef TARGET_OS2
1985#define IS_ALIGNED(x, a) (((x) & ((unsigned int)(a) - 1)) == 0)
1986#endif
1987
1988/**
1989 * regmap_write() - Write a value to a single register
1990 *
1991 * @map: Register map to write to
1992 * @reg: Register to write to
1993 * @val: Value to be written
1994 *
1995 * A value of zero will be returned on success, a negative errno will
1996 * be returned in error cases.
1997 */
1998int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1999{
2000 int ret;
2001
2002 if (!IS_ALIGNED(reg, map->reg_stride))
2003 return -EINVAL;
2004
2005 map->lock(map->lock_arg);
2006
2007 ret = _regmap_write(map, reg, val);
2008
2009 map->unlock(map->lock_arg);
2010
2011 return ret;
2012}
2013EXPORT_SYMBOL_GPL(regmap_write);
2014
2015/**
2016 * regmap_write_async() - Write a value to a single register asynchronously
2017 *
2018 * @map: Register map to write to
2019 * @reg: Register to write to
2020 * @val: Value to be written
2021 *
2022 * A value of zero will be returned on success, a negative errno will
2023 * be returned in error cases.
2024 */
2025int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
2026{
2027 int ret;
2028
2029 if (!IS_ALIGNED(reg, map->reg_stride))
2030 return -EINVAL;
2031
2032 map->lock(map->lock_arg);
2033
2034 map->async = true;
2035
2036 ret = _regmap_write(map, reg, val);
2037
2038 map->async = false;
2039
2040 map->unlock(map->lock_arg);
2041
2042 return ret;
2043}
2044EXPORT_SYMBOL_GPL(regmap_write_async);
2045
2046int _regmap_raw_write(struct regmap *map, unsigned int reg,
2047 const void *val, size_t val_len, bool noinc)
2048{
2049 size_t val_bytes = map->format.val_bytes;
2050 size_t val_count = val_len / val_bytes;
2051 size_t chunk_count, chunk_bytes;
2052 size_t chunk_regs = val_count;
2053 int ret, i;
2054
2055 if (!val_count)
2056 return -EINVAL;
2057
2058 if (map->use_single_write)
2059 chunk_regs = 1;
2060 else if (map->max_raw_write && val_len > map->max_raw_write)
2061 chunk_regs = map->max_raw_write / val_bytes;
2062
2063 chunk_count = val_count / chunk_regs;
2064 chunk_bytes = chunk_regs * val_bytes;
2065
2066 /* Write as many bytes as possible with chunk_size */
2067 for (i = 0; i < chunk_count; i++) {
2068 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2069 if (ret)
2070 return ret;
2071
2072 reg += regmap_get_offset(map, chunk_regs);
2073 val += chunk_bytes;
2074 val_len -= chunk_bytes;
2075 }
2076
2077 /* Write remaining bytes */
2078 if (val_len)
2079 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2080
2081 return ret;
2082}
2083
2084/**
2085 * regmap_raw_write() - Write raw values to one or more registers
2086 *
2087 * @map: Register map to write to
2088 * @reg: Initial register to write to
2089 * @val: Block of data to be written, laid out for direct transmission to the
2090 * device
2091 * @val_len: Length of data pointed to by val.
2092 *
2093 * This function is intended to be used for things like firmware
2094 * download where a large block of data needs to be transferred to the
2095 * device. No formatting will be done on the data provided.
2096 *
2097 * A value of zero will be returned on success, a negative errno will
2098 * be returned in error cases.
2099 */
2100int regmap_raw_write(struct regmap *map, unsigned int reg,
2101 const void *val, size_t val_len)
2102{
2103 int ret;
2104
2105 if (!regmap_can_raw_write(map))
2106 return -EINVAL;
2107 if (val_len % map->format.val_bytes)
2108 return -EINVAL;
2109
2110 map->lock(map->lock_arg);
2111
2112 ret = _regmap_raw_write(map, reg, val, val_len, false);
2113
2114 map->unlock(map->lock_arg);
2115
2116 return ret;
2117}
2118EXPORT_SYMBOL_GPL(regmap_raw_write);
2119
2120/**
2121 * regmap_noinc_write(): Write data from a register without incrementing the
2122 * register number
2123 *
2124 * @map: Register map to write to
2125 * @reg: Register to write to
2126 * @val: Pointer to data buffer
2127 * @val_len: Length of output buffer in bytes.
2128 *
2129 * The regmap API usually assumes that bulk bus write operations will write a
2130 * range of registers. Some devices have certain registers for which a write
2131 * operation can write to an internal FIFO.
2132 *
2133 * The target register must be volatile but registers after it can be
2134 * completely unrelated cacheable registers.
2135 *
2136 * This will attempt multiple writes as required to write val_len bytes.
2137 *
2138 * A value of zero will be returned on success, a negative errno will be
2139 * returned in error cases.
2140 */
2141int regmap_noinc_write(struct regmap *map, unsigned int reg,
2142 const void *val, size_t val_len)
2143{
2144 size_t write_len;
2145 int ret;
2146
2147 if (!map->bus)
2148 return -EINVAL;
2149 if (!map->bus->write)
2150 return -ENOTSUPP;
2151 if (val_len % map->format.val_bytes)
2152 return -EINVAL;
2153 if (!IS_ALIGNED(reg, map->reg_stride))
2154 return -EINVAL;
2155 if (val_len == 0)
2156 return -EINVAL;
2157
2158 map->lock(map->lock_arg);
2159
2160 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2161 ret = -EINVAL;
2162 goto out_unlock;
2163 }
2164
2165 while (val_len) {
2166 if (map->max_raw_write && map->max_raw_write < val_len)
2167 write_len = map->max_raw_write;
2168 else
2169 write_len = val_len;
2170 ret = _regmap_raw_write(map, reg, val, write_len, true);
2171 if (ret)
2172 goto out_unlock;
2173 val = ((u8 *)val) + write_len;
2174 val_len -= write_len;
2175 }
2176
2177out_unlock:
2178 map->unlock(map->lock_arg);
2179 return ret;
2180}
2181EXPORT_SYMBOL_GPL(regmap_noinc_write);
2182
2183/**
2184 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2185 * register field.
2186 *
2187 * @field: Register field to write to
2188 * @mask: Bitmask to change
2189 * @val: Value to be written
2190 * @change: Boolean indicating if a write was done
2191 * @async: Boolean indicating asynchronously
2192 * @force: Boolean indicating use force update
2193 *
2194 * Perform a read/modify/write cycle on the register field with change,
2195 * async, force option.
2196 *
2197 * A value of zero will be returned on success, a negative errno will
2198 * be returned in error cases.
2199 */
2200int regmap_field_update_bits_base(struct regmap_field *field,
2201 unsigned int mask, unsigned int val,
2202 bool *change, bool async, bool force)
2203{
2204 mask = (mask << field->shift) & field->mask;
2205
2206 return regmap_update_bits_base(field->regmap, field->reg,
2207 mask, val << field->shift,
2208 change, async, force);
2209}
2210EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2211
2212/**
2213 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2214 * register field with port ID
2215 *
2216 * @field: Register field to write to
2217 * @id: port ID
2218 * @mask: Bitmask to change
2219 * @val: Value to be written
2220 * @change: Boolean indicating if a write was done
2221 * @async: Boolean indicating asynchronously
2222 * @force: Boolean indicating use force update
2223 *
2224 * A value of zero will be returned on success, a negative errno will
2225 * be returned in error cases.
2226 */
2227int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2228 unsigned int mask, unsigned int val,
2229 bool *change, bool async, bool force)
2230{
2231 if (id >= field->id_size)
2232 return -EINVAL;
2233
2234 mask = (mask << field->shift) & field->mask;
2235
2236 return regmap_update_bits_base(field->regmap,
2237 field->reg + (field->id_offset * id),
2238 mask, val << field->shift,
2239 change, async, force);
2240}
2241EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2242
2243/**
2244 * regmap_bulk_write() - Write multiple registers to the device
2245 *
2246 * @map: Register map to write to
2247 * @reg: First register to be write from
2248 * @val: Block of data to be written, in native register size for device
2249 * @val_count: Number of registers to write
2250 *
2251 * This function is intended to be used for writing a large block of
2252 * data to the device either in single transfer or multiple transfer.
2253 *
2254 * A value of zero will be returned on success, a negative errno will
2255 * be returned in error cases.
2256 */
2257int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2258 size_t val_count)
2259{
2260 int ret = 0, i;
2261 size_t val_bytes = map->format.val_bytes;
2262
2263 if (!IS_ALIGNED(reg, map->reg_stride))
2264 return -EINVAL;
2265
2266 /*
2267 * Some devices don't support bulk write, for them we have a series of
2268 * single write operations.
2269 */
2270 if (!map->bus || !map->format.parse_inplace) {
2271 map->lock(map->lock_arg);
2272 for (i = 0; i < val_count; i++) {
2273 unsigned int ival;
2274
2275 switch (val_bytes) {
2276 case 1:
2277 ival = *(u8 *)(val + (i * val_bytes));
2278 break;
2279 case 2:
2280 ival = *(u16 *)(val + (i * val_bytes));
2281 break;
2282 case 4:
2283 ival = *(u32 *)(val + (i * val_bytes));
2284 break;
2285#ifdef CONFIG_64BIT
2286 case 8:
2287 ival = *(u64 *)(val + (i * val_bytes));
2288 break;
2289#endif
2290 default:
2291 ret = -EINVAL;
2292 goto out;
2293 }
2294
2295 ret = _regmap_write(map,
2296 reg + regmap_get_offset(map, i),
2297 ival);
2298 if (ret != 0)
2299 goto out;
2300 }
2301out:
2302 map->unlock(map->lock_arg);
2303 } else {
2304 void *wval;
2305
2306 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2307 if (!wval)
2308 return -ENOMEM;
2309
2310 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2311 map->format.parse_inplace(wval + i);
2312
2313 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2314
2315 kfree(wval);
2316 }
2317 return ret;
2318}
2319EXPORT_SYMBOL_GPL(regmap_bulk_write);
2320
2321/*
2322 * _regmap_raw_multi_reg_write()
2323 *
2324 * the (register,newvalue) pairs in regs have not been formatted, but
2325 * they are all in the same page and have been changed to being page
2326 * relative. The page register has been written if that was necessary.
2327 */
2328/*static*/ int _regmap_raw_multi_reg_write(struct regmap *map,
2329 const struct reg_sequence *regs,
2330 size_t num_regs)
2331{
2332 int ret;
2333 void *buf;
2334 int i;
2335 u8 *u8;
2336 size_t val_bytes = map->format.val_bytes;
2337 size_t reg_bytes = map->format.reg_bytes;
2338 size_t pad_bytes = map->format.pad_bytes;
2339 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2340 size_t len = pair_size * num_regs;
2341
2342 if (!len)
2343 return -EINVAL;
2344
2345 buf = kzalloc(len, GFP_KERNEL);
2346 if (!buf)
2347 return -ENOMEM;
2348
2349 /* We have to linearise by hand. */
2350
2351 u8 = buf;
2352
2353 for (i = 0; i < num_regs; i++) {
2354 unsigned int reg = regs[i].reg;
2355 unsigned int val = regs[i].def;
2356 map->format.format_reg(u8, reg, map->reg_shift);
2357 u8 += reg_bytes + pad_bytes;
2358 map->format.format_val(u8, val, 0);
2359 u8 += val_bytes;
2360 }
2361 u8 = buf;
2362 *u8 |= map->write_flag_mask;
2363
2364 ret = map->bus->write(map->bus_context, buf, len);
2365
2366 kfree(buf);
2367
2368#ifndef TARGET_OS2
2369 for (i = 0; i < num_regs; i++) {
2370 int reg = regs[i].reg;
2371 trace_regmap_hw_write_done(map, reg, 1);
2372 }
2373#endif
2374 return ret;
2375}
2376
2377/*static*/ unsigned int _regmap_register_page(struct regmap *map,
2378 unsigned int reg,
2379 struct regmap_range_node *range)
2380{
2381 unsigned int win_page = (reg - range->range_min) / range->window_len;
2382
2383 return win_page;
2384}
2385
2386/*static*/ int _regmap_range_multi_paged_reg_write(struct regmap *map,
2387 struct reg_sequence *regs,
2388 size_t num_regs)
2389{
2390 int ret;
2391 int i, n;
2392 struct reg_sequence *base;
2393 unsigned int this_page = 0;
2394 unsigned int page_change = 0;
2395 /*
2396 * the set of registers are not neccessarily in order, but
2397 * since the order of write must be preserved this algorithm
2398 * chops the set each time the page changes. This also applies
2399 * if there is a delay required at any point in the sequence.
2400 */
2401 base = regs;
2402 for (i = 0, n = 0; i < num_regs; i++, n++) {
2403 unsigned int reg = regs[i].reg;
2404 struct regmap_range_node *range;
2405
2406 range = _regmap_range_lookup(map, reg);
2407 if (range) {
2408 unsigned int win_page = _regmap_register_page(map, reg,
2409 range);
2410
2411 if (i == 0)
2412 this_page = win_page;
2413 if (win_page != this_page) {
2414 this_page = win_page;
2415 page_change = 1;
2416 }
2417 }
2418
2419 /* If we have both a page change and a delay make sure to
2420 * write the regs and apply the delay before we change the
2421 * page.
2422 */
2423
2424 if (page_change || regs[i].delay_us) {
2425
2426 /* For situations where the first write requires
2427 * a delay we need to make sure we don't call
2428 * raw_multi_reg_write with n=0
2429 * This can't occur with page breaks as we
2430 * never write on the first iteration
2431 */
2432 if (regs[i].delay_us && i == 0)
2433 n = 1;
2434
2435 ret = _regmap_raw_multi_reg_write(map, base, n);
2436 if (ret != 0)
2437 return ret;
2438
2439 if (regs[i].delay_us) {
2440#ifndef TARGET_OS2
2441 if (map->can_sleep)
2442 fsleep(regs[i].delay_us);
2443 else
2444#endif
2445 udelay(regs[i].delay_us);
2446 }
2447
2448 base += n;
2449 n = 0;
2450
2451 if (page_change) {
2452 ret = _regmap_select_page(map,
2453 &base[n].reg,
2454 range, 1);
2455 if (ret != 0)
2456 return ret;
2457
2458 page_change = 0;
2459 }
2460
2461 }
2462
2463 }
2464 if (n > 0)
2465 return _regmap_raw_multi_reg_write(map, base, n);
2466 return 0;
2467}
2468
2469/*static*/ int _regmap_multi_reg_write(struct regmap *map,
2470 const struct reg_sequence *regs,
2471 size_t num_regs)
2472{
2473 int i;
2474 int ret;
2475
2476 if (!map->can_multi_write) {
2477 for (i = 0; i < num_regs; i++) {
2478 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2479 if (ret != 0)
2480 return ret;
2481
2482 if (regs[i].delay_us) {
2483#ifndef TARGET_OS2
2484 if (map->can_sleep)
2485 fsleep(regs[i].delay_us);
2486 else
2487#endif
2488 udelay(regs[i].delay_us);
2489 }
2490 }
2491 return 0;
2492 }
2493
2494 if (!map->format.parse_inplace)
2495 return -EINVAL;
2496
2497 if (map->writeable_reg)
2498 for (i = 0; i < num_regs; i++) {
2499 int reg = regs[i].reg;
2500 if (!map->writeable_reg(map->dev, reg))
2501 return -EINVAL;
2502 if (!IS_ALIGNED(reg, map->reg_stride))
2503 return -EINVAL;
2504 }
2505
2506 if (!map->cache_bypass) {
2507 for (i = 0; i < num_regs; i++) {
2508 unsigned int val = regs[i].def;
2509 unsigned int reg = regs[i].reg;
2510 ret = regcache_write(map, reg, val);
2511 if (ret) {
2512 dev_err(map->dev,
2513 "Error in caching of register: %x ret: %d\n",
2514 reg, ret);
2515 return ret;
2516 }
2517 }
2518 if (map->cache_only) {
2519 map->cache_dirty = true;
2520 return 0;
2521 }
2522 }
2523
2524 WARN_ON(!map->bus);
2525
2526 for (i = 0; i < num_regs; i++) {
2527 unsigned int reg = regs[i].reg;
2528 struct regmap_range_node *range;
2529
2530 /* Coalesce all the writes between a page break or a delay
2531 * in a sequence
2532 */
2533 range = _regmap_range_lookup(map, reg);
2534 if (range || regs[i].delay_us) {
2535 size_t len = sizeof(struct reg_sequence)*num_regs;
2536 struct reg_sequence *base = kmemdup(regs, len,
2537 GFP_KERNEL);
2538 if (!base)
2539 return -ENOMEM;
2540 ret = _regmap_range_multi_paged_reg_write(map, base,
2541 num_regs);
2542 kfree(base);
2543
2544 return ret;
2545 }
2546 }
2547 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2548}
2549
2550/**
2551 * regmap_multi_reg_write() - Write multiple registers to the device
2552 *
2553 * @map: Register map to write to
2554 * @regs: Array of structures containing register,value to be written
2555 * @num_regs: Number of registers to write
2556 *
2557 * Write multiple registers to the device where the set of register, value
2558 * pairs are supplied in any order, possibly not all in a single range.
2559 *
2560 * The 'normal' block write mode will send ultimately send data on the
2561 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2562 * addressed. However, this alternative block multi write mode will send
2563 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2564 * must of course support the mode.
2565 *
2566 * A value of zero will be returned on success, a negative errno will be
2567 * returned in error cases.
2568 */
2569int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2570 int num_regs)
2571{
2572 int ret;
2573
2574 map->lock(map->lock_arg);
2575
2576 ret = _regmap_multi_reg_write(map, regs, num_regs);
2577
2578 map->unlock(map->lock_arg);
2579
2580 return ret;
2581}
2582EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2583
2584/**
2585 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2586 * device but not the cache
2587 *
2588 * @map: Register map to write to
2589 * @regs: Array of structures containing register,value to be written
2590 * @num_regs: Number of registers to write
2591 *
2592 * Write multiple registers to the device but not the cache where the set
2593 * of register are supplied in any order.
2594 *
2595 * This function is intended to be used for writing a large block of data
2596 * atomically to the device in single transfer for those I2C client devices
2597 * that implement this alternative block write mode.
2598 *
2599 * A value of zero will be returned on success, a negative errno will
2600 * be returned in error cases.
2601 */
2602int regmap_multi_reg_write_bypassed(struct regmap *map,
2603 const struct reg_sequence *regs,
2604 int num_regs)
2605{
2606 int ret;
2607 bool bypass;
2608
2609 map->lock(map->lock_arg);
2610
2611 bypass = map->cache_bypass;
2612 map->cache_bypass = true;
2613
2614 ret = _regmap_multi_reg_write(map, regs, num_regs);
2615
2616 map->cache_bypass = bypass;
2617
2618 map->unlock(map->lock_arg);
2619
2620 return ret;
2621}
2622EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2623
2624/**
2625 * regmap_raw_write_async() - Write raw values to one or more registers
2626 * asynchronously
2627 *
2628 * @map: Register map to write to
2629 * @reg: Initial register to write to
2630 * @val: Block of data to be written, laid out for direct transmission to the
2631 * device. Must be valid until regmap_async_complete() is called.
2632 * @val_len: Length of data pointed to by val.
2633 *
2634 * This function is intended to be used for things like firmware
2635 * download where a large block of data needs to be transferred to the
2636 * device. No formatting will be done on the data provided.
2637 *
2638 * If supported by the underlying bus the write will be scheduled
2639 * asynchronously, helping maximise I/O speed on higher speed buses
2640 * like SPI. regmap_async_complete() can be called to ensure that all
2641 * asynchrnous writes have been completed.
2642 *
2643 * A value of zero will be returned on success, a negative errno will
2644 * be returned in error cases.
2645 */
2646int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2647 const void *val, size_t val_len)
2648{
2649 int ret;
2650
2651 if (val_len % map->format.val_bytes)
2652 return -EINVAL;
2653 if (!IS_ALIGNED(reg, map->reg_stride))
2654 return -EINVAL;
2655
2656 map->lock(map->lock_arg);
2657
2658 map->async = true;
2659
2660 ret = _regmap_raw_write(map, reg, val, val_len, false);
2661
2662 map->async = false;
2663
2664 map->unlock(map->lock_arg);
2665
2666 return ret;
2667}
2668EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2669
2670/*static*/ int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2671 unsigned int val_len, bool noinc)
2672{
2673 struct regmap_range_node *range;
2674 int ret;
2675
2676 WARN_ON(!map->bus);
2677
2678 if (!map->bus || !map->bus->read)
2679 return -EINVAL;
2680
2681 range = _regmap_range_lookup(map, reg);
2682 if (range) {
2683 ret = _regmap_select_page(map, &reg, range,
2684 noinc ? 1 : val_len / map->format.val_bytes);
2685 if (ret != 0)
2686 return ret;
2687 }
2688
2689 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2690 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2691 map->read_flag_mask);
2692
2693 ret = map->bus->read(map->bus_context, map->work_buf,
2694 map->format.reg_bytes + map->format.pad_bytes,
2695 val, val_len);
2696
2697 return ret;
2698}
2699
2700/*static*/ int _regmap_bus_reg_read(void *context, unsigned int reg,
2701 unsigned int *val)
2702{
2703 struct regmap *map = context;
2704
2705 return map->bus->reg_read(map->bus_context, reg, val);
2706}
2707
2708/*static*/ int _regmap_bus_read(void *context, unsigned int reg,
2709 unsigned int *val)
2710{
2711 int ret;
2712 struct regmap *map = context;
2713 void *work_val = map->work_buf + map->format.reg_bytes +
2714 map->format.pad_bytes;
2715
2716 if (!map->format.parse_val)
2717 return -EINVAL;
2718
2719 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2720 if (ret == 0)
2721 *val = map->format.parse_val(work_val);
2722
2723 return ret;
2724}
2725
2726/*static*/ int _regmap_read(struct regmap *map, unsigned int reg,
2727 unsigned int *val)
2728{
2729 int ret;
2730 void *context = _regmap_map_get_context(map);
2731
2732 if (!map->cache_bypass) {
2733 ret = regcache_read(map, reg, val);
2734 if (ret == 0)
2735 return 0;
2736 }
2737
2738 if (map->cache_only)
2739 return -EBUSY;
2740
2741 if (!regmap_readable(map, reg))
2742 return -EIO;
2743
2744 ret = map->reg_read(context, reg, val);
2745 if (ret == 0) {
2746 if (regmap_should_log(map))
2747 dev_info(map->dev, "%x => %x\n", reg, *val);
2748
2749 if (!map->cache_bypass)
2750 regcache_write(map, reg, *val);
2751 }
2752
2753 return ret;
2754}
2755
2756/**
2757 * regmap_read() - Read a value from a single register
2758 *
2759 * @map: Register map to read from
2760 * @reg: Register to be read from
2761 * @val: Pointer to store read value
2762 *
2763 * A value of zero will be returned on success, a negative errno will
2764 * be returned in error cases.
2765 */
2766int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2767{
2768 int ret;
2769
2770 if (!IS_ALIGNED(reg, map->reg_stride))
2771 return -EINVAL;
2772
2773 map->lock(map->lock_arg);
2774
2775 ret = _regmap_read(map, reg, val);
2776
2777 map->unlock(map->lock_arg);
2778
2779 return ret;
2780}
2781EXPORT_SYMBOL_GPL(regmap_read);
2782
2783/**
2784 * regmap_raw_read() - Read raw data from the device
2785 *
2786 * @map: Register map to read from
2787 * @reg: First register to be read from
2788 * @val: Pointer to store read value
2789 * @val_len: Size of data to read
2790 *
2791 * A value of zero will be returned on success, a negative errno will
2792 * be returned in error cases.
2793 */
2794int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2795 size_t val_len)
2796{
2797 size_t val_bytes = map->format.val_bytes;
2798 size_t val_count = val_len / val_bytes;
2799 unsigned int v;
2800 int ret, i;
2801
2802 if (!map->bus)
2803 return -EINVAL;
2804 if (val_len % map->format.val_bytes)
2805 return -EINVAL;
2806 if (!IS_ALIGNED(reg, map->reg_stride))
2807 return -EINVAL;
2808 if (val_count == 0)
2809 return -EINVAL;
2810
2811 map->lock(map->lock_arg);
2812
2813 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2814 map->cache_type == REGCACHE_NONE) {
2815 size_t chunk_count, chunk_bytes;
2816 size_t chunk_regs = val_count;
2817
2818 if (!map->bus->read) {
2819 ret = -ENOTSUPP;
2820 goto out;
2821 }
2822
2823 if (map->use_single_read)
2824 chunk_regs = 1;
2825 else if (map->max_raw_read && val_len > map->max_raw_read)
2826 chunk_regs = map->max_raw_read / val_bytes;
2827
2828 chunk_count = val_count / chunk_regs;
2829 chunk_bytes = chunk_regs * val_bytes;
2830
2831 /* Read bytes that fit into whole chunks */
2832 for (i = 0; i < chunk_count; i++) {
2833 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2834 if (ret != 0)
2835 goto out;
2836
2837 reg += regmap_get_offset(map, chunk_regs);
2838 val += chunk_bytes;
2839 val_len -= chunk_bytes;
2840 }
2841
2842 /* Read remaining bytes */
2843 if (val_len) {
2844 ret = _regmap_raw_read(map, reg, val, val_len, false);
2845 if (ret != 0)
2846 goto out;
2847 }
2848 } else {
2849 /* Otherwise go word by word for the cache; should be low
2850 * cost as we expect to hit the cache.
2851 */
2852 for (i = 0; i < val_count; i++) {
2853 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2854 &v);
2855 if (ret != 0)
2856 goto out;
2857
2858 map->format.format_val(val + (i * val_bytes), v, 0);
2859 }
2860 }
2861
2862 out:
2863 map->unlock(map->lock_arg);
2864
2865 return ret;
2866}
2867EXPORT_SYMBOL_GPL(regmap_raw_read);
2868
2869/**
2870 * regmap_noinc_read(): Read data from a register without incrementing the
2871 * register number
2872 *
2873 * @map: Register map to read from
2874 * @reg: Register to read from
2875 * @val: Pointer to data buffer
2876 * @val_len: Length of output buffer in bytes.
2877 *
2878 * The regmap API usually assumes that bulk bus read operations will read a
2879 * range of registers. Some devices have certain registers for which a read
2880 * operation read will read from an internal FIFO.
2881 *
2882 * The target register must be volatile but registers after it can be
2883 * completely unrelated cacheable registers.
2884 *
2885 * This will attempt multiple reads as required to read val_len bytes.
2886 *
2887 * A value of zero will be returned on success, a negative errno will be
2888 * returned in error cases.
2889 */
2890int regmap_noinc_read(struct regmap *map, unsigned int reg,
2891 void *val, size_t val_len)
2892{
2893 size_t read_len;
2894 int ret;
2895
2896 if (!map->bus)
2897 return -EINVAL;
2898 if (!map->bus->read)
2899 return -ENOTSUPP;
2900 if (val_len % map->format.val_bytes)
2901 return -EINVAL;
2902 if (!IS_ALIGNED(reg, map->reg_stride))
2903 return -EINVAL;
2904 if (val_len == 0)
2905 return -EINVAL;
2906
2907 map->lock(map->lock_arg);
2908
2909 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2910 ret = -EINVAL;
2911 goto out_unlock;
2912 }
2913
2914 while (val_len) {
2915 if (map->max_raw_read && map->max_raw_read < val_len)
2916 read_len = map->max_raw_read;
2917 else
2918 read_len = val_len;
2919 ret = _regmap_raw_read(map, reg, val, read_len, true);
2920 if (ret)
2921 goto out_unlock;
2922 val = ((u8 *)val) + read_len;
2923 val_len -= read_len;
2924 }
2925
2926out_unlock:
2927 map->unlock(map->lock_arg);
2928 return ret;
2929}
2930EXPORT_SYMBOL_GPL(regmap_noinc_read);
2931
2932/**
2933 * regmap_field_read(): Read a value to a single register field
2934 *
2935 * @field: Register field to read from
2936 * @val: Pointer to store read value
2937 *
2938 * A value of zero will be returned on success, a negative errno will
2939 * be returned in error cases.
2940 */
2941int regmap_field_read(struct regmap_field *field, unsigned int *val)
2942{
2943 int ret;
2944 unsigned int reg_val;
2945 ret = regmap_read(field->regmap, field->reg, &reg_val);
2946 if (ret != 0)
2947 return ret;
2948
2949 reg_val &= field->mask;
2950 reg_val >>= field->shift;
2951 *val = reg_val;
2952
2953 return ret;
2954}
2955EXPORT_SYMBOL_GPL(regmap_field_read);
2956
2957/**
2958 * regmap_fields_read() - Read a value to a single register field with port ID
2959 *
2960 * @field: Register field to read from
2961 * @id: port ID
2962 * @val: Pointer to store read value
2963 *
2964 * A value of zero will be returned on success, a negative errno will
2965 * be returned in error cases.
2966 */
2967int regmap_fields_read(struct regmap_field *field, unsigned int id,
2968 unsigned int *val)
2969{
2970 int ret;
2971 unsigned int reg_val;
2972
2973 if (id >= field->id_size)
2974 return -EINVAL;
2975
2976 ret = regmap_read(field->regmap,
2977 field->reg + (field->id_offset * id),
2978 &reg_val);
2979 if (ret != 0)
2980 return ret;
2981
2982 reg_val &= field->mask;
2983 reg_val >>= field->shift;
2984 *val = reg_val;
2985
2986 return ret;
2987}
2988EXPORT_SYMBOL_GPL(regmap_fields_read);
2989
2990/**
2991 * regmap_bulk_read() - Read multiple registers from the device
2992 *
2993 * @map: Register map to read from
2994 * @reg: First register to be read from
2995 * @val: Pointer to store read value, in native register size for device
2996 * @val_count: Number of registers to read
2997 *
2998 * A value of zero will be returned on success, a negative errno will
2999 * be returned in error cases.
3000 */
3001int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3002 size_t val_count)
3003{
3004 int ret, i;
3005 size_t val_bytes = map->format.val_bytes;
3006 bool vol = regmap_volatile_range(map, reg, val_count);
3007
3008 if (!IS_ALIGNED(reg, map->reg_stride))
3009 return -EINVAL;
3010 if (val_count == 0)
3011 return -EINVAL;
3012
3013 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3014 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3015 if (ret != 0)
3016 return ret;
3017
3018 for (i = 0; i < val_count * val_bytes; i += val_bytes)
3019 map->format.parse_inplace(val + i);
3020 } else {
3021#ifdef CONFIG_64BIT
3022 u64 *u64 = val;
3023#endif
3024 u32 *u32 = val;
3025 u16 *u16 = val;
3026 u8 *u8 = val;
3027
3028 map->lock(map->lock_arg);
3029
3030 for (i = 0; i < val_count; i++) {
3031 unsigned int ival;
3032
3033 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
3034 &ival);
3035 if (ret != 0)
3036 goto out;
3037
3038 switch (map->format.val_bytes) {
3039#ifdef CONFIG_64BIT
3040 case 8:
3041 u64[i] = ival;
3042 break;
3043#endif
3044 case 4:
3045 u32[i] = ival;
3046 break;
3047 case 2:
3048 u16[i] = ival;
3049 break;
3050 case 1:
3051 u8[i] = ival;
3052 break;
3053 default:
3054 ret = -EINVAL;
3055 goto out;
3056 }
3057 }
3058
3059out:
3060 map->unlock(map->lock_arg);
3061 }
3062
3063 return ret;
3064}
3065EXPORT_SYMBOL_GPL(regmap_bulk_read);
3066
3067/*static*/ int _regmap_update_bits(struct regmap *map, unsigned int reg,
3068 unsigned int mask, unsigned int val,
3069 bool *change, bool force_write)
3070{
3071 int ret;
3072 unsigned int tmp, orig;
3073
3074 if (change)
3075 *change = false;
3076
3077 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3078 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3079 if (ret == 0 && change)
3080 *change = true;
3081 } else {
3082 ret = _regmap_read(map, reg, &orig);
3083 if (ret != 0)
3084 return ret;
3085
3086 tmp = orig & ~mask;
3087 tmp |= val & mask;
3088
3089 if (force_write || (tmp != orig)) {
3090 ret = _regmap_write(map, reg, tmp);
3091 if (ret == 0 && change)
3092 *change = true;
3093 }
3094 }
3095
3096 return ret;
3097}
3098
3099/**
3100 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3101 *
3102 * @map: Register map to update
3103 * @reg: Register to update
3104 * @mask: Bitmask to change
3105 * @val: New value for bitmask
3106 * @change: Boolean indicating if a write was done
3107 * @async: Boolean indicating asynchronously
3108 * @force: Boolean indicating use force update
3109 *
3110 * Perform a read/modify/write cycle on a register map with change, async, force
3111 * options.
3112 *
3113 * If async is true:
3114 *
3115 * With most buses the read must be done synchronously so this is most useful
3116 * for devices with a cache which do not need to interact with the hardware to
3117 * determine the current register value.
3118 *
3119 * Returns zero for success, a negative number on error.
3120 */
3121int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3122 unsigned int mask, unsigned int val,
3123 bool *change, bool async, bool force)
3124{
3125 int ret;
3126
3127 map->lock(map->lock_arg);
3128
3129 map->async = async;
3130
3131 ret = _regmap_update_bits(map, reg, mask, val, change, force);
3132
3133 map->async = false;
3134
3135 map->unlock(map->lock_arg);
3136
3137 return ret;
3138}
3139EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3140
3141/**
3142 * regmap_test_bits() - Check if all specified bits are set in a register.
3143 *
3144 * @map: Register map to operate on
3145 * @reg: Register to read from
3146 * @bits: Bits to test
3147 *
3148 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3149 * bits are set and a negative error number if the underlying regmap_read()
3150 * fails.
3151 */
3152int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3153{
3154 unsigned int val, ret;
3155
3156 ret = regmap_read(map, reg, &val);
3157 if (ret)
3158 return ret;
3159
3160 return (val & bits) == bits;
3161}
3162EXPORT_SYMBOL_GPL(regmap_test_bits);
3163
3164void regmap_async_complete_cb(struct regmap_async *async, int ret)
3165{
3166 struct regmap *map = async->map;
3167 bool wake;
3168
3169 spin_lock(&map->async_lock);
3170 list_move(&async->list, &map->async_free);
3171 wake = list_empty(&map->async_list);
3172
3173 if (ret != 0)
3174 map->async_ret = ret;
3175
3176 spin_unlock(&map->async_lock);
3177
3178 if (wake)
3179 wake_up(&map->async_waitq);
3180}
3181EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3182
3183#ifndef TARGET_OS2
3184/*static*/ int regmap_async_is_done(struct regmap *map)
3185{
3186 unsigned long flags;
3187 int ret;
3188
3189 spin_lock_irqsave(&map->async_lock, flags);
3190 ret = list_empty(&map->async_list);
3191 spin_unlock_irqrestore(&map->async_lock, flags);
3192
3193 return ret;
3194}
3195#endif
3196
3197/**
3198 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3199 *
3200 * @map: Map to operate on.
3201 *
3202 * Blocks until any pending asynchronous I/O has completed. Returns
3203 * an error code for any failed I/O operations.
3204 */
3205int regmap_async_complete(struct regmap *map)
3206{
3207 unsigned long flags;
3208 int ret;
3209
3210 /* Nothing to do with no async support */
3211 if (!map->bus || !map->bus->async_write)
3212 return 0;
3213
3214#ifndef TARGET_OS2
3215 wait_event(map->async_waitq, regmap_async_is_done(map));
3216#endif
3217 spin_lock_irqsave(&map->async_lock, flags);
3218 ret = map->async_ret;
3219 map->async_ret = 0;
3220 spin_unlock_irqrestore(&map->async_lock, flags);
3221
3222 return ret;
3223}
3224EXPORT_SYMBOL_GPL(regmap_async_complete);
3225
3226/**
3227 * regmap_register_patch - Register and apply register updates to be applied
3228 * on device initialistion
3229 *
3230 * @map: Register map to apply updates to.
3231 * @regs: Values to update.
3232 * @num_regs: Number of entries in regs.
3233 *
3234 * Register a set of register updates to be applied to the device
3235 * whenever the device registers are synchronised with the cache and
3236 * apply them immediately. Typically this is used to apply
3237 * corrections to be applied to the device defaults on startup, such
3238 * as the updates some vendors provide to undocumented registers.
3239 *
3240 * The caller must ensure that this function cannot be called
3241 * concurrently with either itself or regcache_sync().
3242 */
3243int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3244 int num_regs)
3245{
3246 struct reg_sequence *p;
3247 int ret;
3248 bool bypass;
3249
3250#ifndef TARGET_OS2
3251 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3252 num_regs))
3253 return 0;
3254#else
3255 if (num_regs <= 0) {
3256 pr_warn("invalid registers number (%d)", num_regs);
3257 return 0;
3258 }
3259#endif
3260 p = krealloc(map->patch,
3261 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3262 GFP_KERNEL);
3263 if (p) {
3264 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3265 map->patch = p;
3266 map->patch_regs += num_regs;
3267 } else {
3268 return -ENOMEM;
3269 }
3270
3271 map->lock(map->lock_arg);
3272
3273 bypass = map->cache_bypass;
3274
3275 map->cache_bypass = true;
3276 map->async = true;
3277
3278 ret = _regmap_multi_reg_write(map, regs, num_regs);
3279
3280 map->async = false;
3281 map->cache_bypass = bypass;
3282
3283 map->unlock(map->lock_arg);
3284
3285 regmap_async_complete(map);
3286
3287 return ret;
3288}
3289EXPORT_SYMBOL_GPL(regmap_register_patch);
3290
3291/**
3292 * regmap_get_val_bytes() - Report the size of a register value
3293 *
3294 * @map: Register map to operate on.
3295 *
3296 * Report the size of a register value, mainly intended to for use by
3297 * generic infrastructure built on top of regmap.
3298 */
3299int regmap_get_val_bytes(struct regmap *map)
3300{
3301 if (map->format.format_write)
3302 return -EINVAL;
3303
3304 return map->format.val_bytes;
3305}
3306EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3307
3308/**
3309 * regmap_get_max_register() - Report the max register value
3310 *
3311 * @map: Register map to operate on.
3312 *
3313 * Report the max register value, mainly intended to for use by
3314 * generic infrastructure built on top of regmap.
3315 */
3316int regmap_get_max_register(struct regmap *map)
3317{
3318 return map->max_register ? map->max_register : -EINVAL;
3319}
3320EXPORT_SYMBOL_GPL(regmap_get_max_register);
3321
3322/**
3323 * regmap_get_reg_stride() - Report the register address stride
3324 *
3325 * @map: Register map to operate on.
3326 *
3327 * Report the register address stride, mainly intended to for use by
3328 * generic infrastructure built on top of regmap.
3329 */
3330int regmap_get_reg_stride(struct regmap *map)
3331{
3332 return map->reg_stride;
3333}
3334EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3335
3336int regmap_parse_val(struct regmap *map, const void *buf,
3337 unsigned int *val)
3338{
3339 if (!map->format.parse_val)
3340 return -EINVAL;
3341
3342 *val = map->format.parse_val(buf);
3343
3344 return 0;
3345}
3346EXPORT_SYMBOL_GPL(regmap_parse_val);
3347
3348/*static*/ int __init regmap_initcall(void)
3349{
3350 regmap_debugfs_initcall();
3351
3352 return 0;
3353}
3354postcore_initcall(regmap_initcall);
Note: See TracBrowser for help on using the repository browser.