source: GPL/branches/uniaud32-next/lib32/regmap.c@ 621

Last change on this file since 621 was 621, checked in by Paul Smedley, 5 years ago

Remove some logging messages

File size: 70.0 KB
Line 
1/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.14.202 */
13
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/export.h>
17#include <linux/mutex.h>
18#include <linux/err.h>
19#include <linux/of.h>
20#include <linux/rbtree.h>
21#include <linux/sched.h>
22#include <linux/delay.h>
23#include <linux/log2.h>
24#include <linux/module.h>
25#include <linux/workqueue.h>
26#include <linux/byteorder/little_endian.h>
27#include <linux/printk.h>
28
29#define CREATE_TRACE_POINTS
30//#include "trace.h"
31
32#include "internal.h"
33
34/*
35 * Sometimes for failures during very early init the trace
36 * infrastructure isn't available early enough to be used. For this
37 * sort of problem defining LOG_DEVICE will add printks for basic
38 * register I/O on a specific device.
39 */
40#undef LOG_DEVICE
41
42static int _regmap_update_bits(struct regmap *map, unsigned int reg,
43 unsigned int mask, unsigned int val,
44 bool *change, bool force_write);
45
46static int _regmap_bus_reg_read(void *context, unsigned int reg,
47 unsigned int *val);
48static int _regmap_bus_read(void *context, unsigned int reg,
49 unsigned int *val);
50static int _regmap_bus_formatted_write(void *context, unsigned int reg,
51 unsigned int val);
52static int _regmap_bus_reg_write(void *context, unsigned int reg,
53 unsigned int val);
54static int _regmap_bus_raw_write(void *context, unsigned int reg,
55 unsigned int val);
56
57bool regmap_reg_in_ranges(unsigned int reg,
58 const struct regmap_range *ranges,
59 unsigned int nranges)
60{
61 const struct regmap_range *r;
62 int i;
63
64 for (i = 0, r = ranges; i < nranges; i++, r++)
65 if (regmap_reg_in_range(reg, r))
66 return true;
67 return false;
68}
69EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
70
71bool regmap_check_range_table(struct regmap *map, unsigned int reg,
72 const struct regmap_access_table *table)
73{
74 /* Check "no ranges" first */
75 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
76 return false;
77
78 /* In case zero "yes ranges" are supplied, any reg is OK */
79 if (!table->n_yes_ranges)
80 return true;
81
82 return regmap_reg_in_ranges(reg, table->yes_ranges,
83 table->n_yes_ranges);
84}
85EXPORT_SYMBOL_GPL(regmap_check_range_table);
86
87bool regmap_writeable(struct regmap *map, unsigned int reg)
88{
89 if (map->max_register && reg > map->max_register)
90 return false;
91
92 if (map->writeable_reg)
93 return map->writeable_reg(map->dev, reg);
94
95 if (map->wr_table)
96 return regmap_check_range_table(map, reg, map->wr_table);
97
98 return true;
99}
100
101bool regmap_cached(struct regmap *map, unsigned int reg)
102{
103 int ret;
104 unsigned int val;
105
106 if (map->cache_type == REGCACHE_NONE)
107 return false;
108
109 if (!map->cache_ops)
110 return false;
111
112 if (map->max_register && reg > map->max_register)
113 return false;
114
115 map->lock(map->lock_arg);
116 ret = regcache_read(map, reg, &val);
117 map->unlock(map->lock_arg);
118 if (ret)
119 return false;
120
121 return true;
122}
123
124bool regmap_readable(struct regmap *map, unsigned int reg)
125{
126 if (!map->reg_read)
127 return false;
128
129 if (map->max_register && reg > map->max_register)
130 return false;
131
132 if (map->format.format_write)
133 return false;
134
135 if (map->readable_reg)
136 return map->readable_reg(map->dev, reg);
137
138 if (map->rd_table)
139 return regmap_check_range_table(map, reg, map->rd_table);
140
141 return true;
142}
143
144bool regmap_volatile(struct regmap *map, unsigned int reg)
145{
146 if (!map->format.format_write && !regmap_readable(map, reg))
147 return false;
148
149 if (map->volatile_reg)
150 return map->volatile_reg(map->dev, reg);
151
152 if (map->volatile_table)
153 return regmap_check_range_table(map, reg, map->volatile_table);
154
155 if (map->cache_ops)
156 return false;
157 else
158 return true;
159}
160
161bool regmap_precious(struct regmap *map, unsigned int reg)
162{
163 if (!regmap_readable(map, reg))
164 return false;
165
166 if (map->precious_reg)
167 return map->precious_reg(map->dev, reg);
168
169 if (map->precious_table)
170 return regmap_check_range_table(map, reg, map->precious_table);
171
172 return false;
173}
174
175static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
176 size_t num)
177{
178 unsigned int i;
179
180 for (i = 0; i < num; i++)
181 if (!regmap_volatile(map, reg + i))
182 return false;
183
184 return true;
185}
186
187static void regmap_format_2_6_write(struct regmap *map,
188 unsigned int reg, unsigned int val)
189{
190 u8 *out = map->work_buf;
191
192 *out = (reg << 6) | val;
193}
194
195static void regmap_format_4_12_write(struct regmap *map,
196 unsigned int reg, unsigned int val)
197{
198 __be16 *out = map->work_buf;
199 *out = cpu_to_be16((reg << 12) | val);
200}
201
202static void regmap_format_7_9_write(struct regmap *map,
203 unsigned int reg, unsigned int val)
204{
205 __be16 *out = map->work_buf;
206 *out = cpu_to_be16((reg << 9) | val);
207}
208
209static void regmap_format_10_14_write(struct regmap *map,
210 unsigned int reg, unsigned int val)
211{
212 u8 *out = map->work_buf;
213
214 out[2] = val;
215 out[1] = (val >> 8) | (reg << 6);
216 out[0] = reg >> 2;
217}
218
219static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
220{
221 u8 *b = buf;
222
223 b[0] = val << shift;
224}
225
226static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
227{
228 __be16 *b = buf;
229
230 b[0] = cpu_to_be16(val << shift);
231}
232
233static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
234{
235 __le16 *b = buf;
236
237 b[0] = cpu_to_le16(val << shift);
238}
239
240static void regmap_format_16_native(void *buf, unsigned int val,
241 unsigned int shift)
242{
243 *(u16 *)buf = val << shift;
244}
245
246static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
247{
248 u8 *b = buf;
249
250 val <<= shift;
251
252 b[0] = val >> 16;
253 b[1] = val >> 8;
254 b[2] = val;
255}
256
257static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
258{
259 __be32 *b = buf;
260
261 b[0] = cpu_to_be32(val << shift);
262}
263
264static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
265{
266 __le32 *b = buf;
267
268 b[0] = cpu_to_le32(val << shift);
269}
270
271static void regmap_format_32_native(void *buf, unsigned int val,
272 unsigned int shift)
273{
274 *(u32 *)buf = val << shift;
275}
276
277#ifdef CONFIG_64BIT
278static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
279{
280 __be64 *b = buf;
281
282 b[0] = cpu_to_be64((u64)val << shift);
283}
284
285static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
286{
287 __le64 *b = buf;
288
289 b[0] = cpu_to_le64((u64)val << shift);
290}
291
292static void regmap_format_64_native(void *buf, unsigned int val,
293 unsigned int shift)
294{
295 *(u64 *)buf = (u64)val << shift;
296}
297#endif
298
299static void regmap_parse_inplace_noop(void *buf)
300{
301}
302
303static unsigned int regmap_parse_8(const void *buf)
304{
305 const u8 *b = buf;
306
307 return b[0];
308}
309
310static unsigned int regmap_parse_16_be(const void *buf)
311{
312 const __be16 *b = buf;
313
314 return be16_to_cpu(b[0]);
315}
316
317static unsigned int regmap_parse_16_le(const void *buf)
318{
319 const __le16 *b = buf;
320
321 return le16_to_cpu(b[0]);
322}
323
324static void regmap_parse_16_be_inplace(void *buf)
325{
326 __be16 *b = buf;
327
328 b[0] = be16_to_cpu(b[0]);
329}
330
331static void regmap_parse_16_le_inplace(void *buf)
332{
333 __le16 *b = buf;
334
335 b[0] = le16_to_cpu(b[0]);
336}
337
338static unsigned int regmap_parse_16_native(const void *buf)
339{
340 return *(u16 *)buf;
341}
342
343static unsigned int regmap_parse_24(const void *buf)
344{
345 const u8 *b = buf;
346 unsigned int ret = b[2];
347 ret |= ((unsigned int)b[1]) << 8;
348 ret |= ((unsigned int)b[0]) << 16;
349
350 return ret;
351}
352
353static unsigned int regmap_parse_32_be(const void *buf)
354{
355 const __be32 *b = buf;
356
357 return be32_to_cpu(b[0]);
358}
359
360static unsigned int regmap_parse_32_le(const void *buf)
361{
362 const __le32 *b = buf;
363
364 return le32_to_cpu(b[0]);
365}
366
367static void regmap_parse_32_be_inplace(void *buf)
368{
369 __be32 *b = buf;
370
371 b[0] = be32_to_cpu(b[0]);
372}
373
374static void regmap_parse_32_le_inplace(void *buf)
375{
376 __le32 *b = buf;
377
378 b[0] = le32_to_cpu(b[0]);
379}
380
381static unsigned int regmap_parse_32_native(const void *buf)
382{
383 return *(u32 *)buf;
384}
385
386#ifdef CONFIG_64BIT
387static unsigned int regmap_parse_64_be(const void *buf)
388{
389 const __be64 *b = buf;
390
391 return be64_to_cpu(b[0]);
392}
393
394static unsigned int regmap_parse_64_le(const void *buf)
395{
396 const __le64 *b = buf;
397
398 return le64_to_cpu(b[0]);
399}
400
401static void regmap_parse_64_be_inplace(void *buf)
402{
403 __be64 *b = buf;
404
405 b[0] = be64_to_cpu(b[0]);
406}
407
408static void regmap_parse_64_le_inplace(void *buf)
409{
410 __le64 *b = buf;
411
412 b[0] = le64_to_cpu(b[0]);
413}
414
415static unsigned int regmap_parse_64_native(const void *buf)
416{
417 return *(u64 *)buf;
418}
419#endif
420
421static void regmap_lock_mutex(void *__map)
422{
423 struct regmap *map = __map;
424 mutex_lock(&map->mutex);
425}
426
427static void regmap_unlock_mutex(void *__map)
428{
429 struct regmap *map = __map;
430 mutex_unlock(&map->mutex);
431}
432
433static void regmap_lock_spinlock(void *__map)
434__acquires(&map->spinlock)
435{
436 struct regmap *map = __map;
437 unsigned long flags;
438
439 spin_lock_irqsave(&map->spinlock, flags);
440 map->spinlock_flags = flags;
441}
442
443static void regmap_unlock_spinlock(void *__map)
444__releases(&map->spinlock)
445{
446 struct regmap *map = __map;
447 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
448}
449
450static void dev_get_regmap_release(struct device *dev, void *res)
451{
452 /*
453 * We don't actually have anything to do here; the goal here
454 * is not to manage the regmap but to provide a simple way to
455 * get the regmap back given a struct device.
456 */
457}
458
459static bool _regmap_range_add(struct regmap *map,
460 struct regmap_range_node *data)
461{
462 struct rb_root *root = &map->range_tree;
463 struct rb_node **new = &(root->rb_node), *parent = NULL;
464
465 while (*new) {
466 struct regmap_range_node *this =
467 rb_entry(*new, struct regmap_range_node, node);
468
469 parent = *new;
470 if (data->range_max < this->range_min)
471 new = &((*new)->rb_left);
472 else if (data->range_min > this->range_max)
473 new = &((*new)->rb_right);
474 else
475 return false;
476 }
477
478 rb_link_node(&data->node, parent, new);
479 rb_insert_color(&data->node, root);
480
481 return true;
482}
483
484static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
485 unsigned int reg)
486{
487 struct rb_node *node = map->range_tree.rb_node;
488
489 while (node) {
490 struct regmap_range_node *this =
491 rb_entry(node, struct regmap_range_node, node);
492
493 if (reg < this->range_min)
494 node = node->rb_left;
495 else if (reg > this->range_max)
496 node = node->rb_right;
497 else
498 return this;
499 }
500
501 return NULL;
502}
503
504static void regmap_range_exit(struct regmap *map)
505{
506 struct rb_node *next;
507 struct regmap_range_node *range_node;
508
509 next = rb_first(&map->range_tree);
510 while (next) {
511 range_node = rb_entry(next, struct regmap_range_node, node);
512 next = rb_next(&range_node->node);
513 rb_erase(&range_node->node, &map->range_tree);
514 kfree(range_node);
515 }
516
517 kfree(map->selector_work_buf);
518}
519
520int regmap_attach_dev(struct device *dev, struct regmap *map,
521 const struct regmap_config *config)
522{
523 struct regmap **m;
524
525 map->dev = dev;
526
527 regmap_debugfs_init(map, config->name);
528
529 /* Add a devres resource for dev_get_regmap() */
530 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
531 if (!m) {
532 regmap_debugfs_exit(map);
533 return -ENOMEM;
534 }
535 *m = map;
536 devres_add(dev, m);
537
538 return 0;
539}
540EXPORT_SYMBOL_GPL(regmap_attach_dev);
541
542static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
543 const struct regmap_config *config)
544{
545 enum regmap_endian endian;
546
547 /* Retrieve the endianness specification from the regmap config */
548 endian = config->reg_format_endian;
549
550 /* If the regmap config specified a non-default value, use that */
551 if (endian != REGMAP_ENDIAN_DEFAULT)
552 return endian;
553
554 /* Retrieve the endianness specification from the bus config */
555 if (bus && bus->reg_format_endian_default)
556 endian = bus->reg_format_endian_default;
557
558 /* If the bus specified a non-default value, use that */
559 if (endian != REGMAP_ENDIAN_DEFAULT)
560 return endian;
561
562 /* Use this if no other value was found */
563 return REGMAP_ENDIAN_BIG;
564}
565
566enum regmap_endian regmap_get_val_endian(struct device *dev,
567 const struct regmap_bus *bus,
568 const struct regmap_config *config)
569{
570 struct device_node *np;
571 enum regmap_endian endian;
572
573 /* Retrieve the endianness specification from the regmap config */
574 endian = config->val_format_endian;
575
576 /* If the regmap config specified a non-default value, use that */
577 if (endian != REGMAP_ENDIAN_DEFAULT)
578 return endian;
579
580 /* If the dev and dev->of_node exist try to get endianness from DT */
581 if (dev && dev->of_node) {
582 np = dev->of_node;
583
584 /* Parse the device's DT node for an endianness specification */
585 if (of_property_read_bool(np, "big-endian"))
586 endian = REGMAP_ENDIAN_BIG;
587 else if (of_property_read_bool(np, "little-endian"))
588 endian = REGMAP_ENDIAN_LITTLE;
589 else if (of_property_read_bool(np, "native-endian"))
590 endian = REGMAP_ENDIAN_NATIVE;
591
592 /* If the endianness was specified in DT, use that */
593 if (endian != REGMAP_ENDIAN_DEFAULT)
594 return endian;
595 }
596
597 /* Retrieve the endianness specification from the bus config */
598 if (bus && bus->val_format_endian_default)
599 endian = bus->val_format_endian_default;
600
601 /* If the bus specified a non-default value, use that */
602 if (endian != REGMAP_ENDIAN_DEFAULT)
603 return endian;
604
605 /* Use this if no other value was found */
606 return REGMAP_ENDIAN_BIG;
607}
608EXPORT_SYMBOL_GPL(regmap_get_val_endian);
609
610struct regmap *__regmap_init(struct device *dev,
611 const struct regmap_bus *bus,
612 void *bus_context,
613 const struct regmap_config *config,
614 struct lock_class_key *lock_key,
615 const char *lock_name)
616{
617 struct regmap *map;
618 int ret = -EINVAL;
619 enum regmap_endian reg_endian, val_endian;
620 int i, j;
621#ifdef TARGET_OS2
622 // 2020-11-17 SHL FIXME patched struct rb_root
623 struct rb_root _RB_ROOT = { NULL, };
624#endif
625
626 if (!config)
627 goto err;
628
629 map = kzalloc(sizeof(*map), GFP_KERNEL);
630 if (map == NULL) {
631 ret = -ENOMEM;
632 goto err;
633 }
634
635 if (config->lock && config->unlock) {
636 map->lock = config->lock;
637 map->unlock = config->unlock;
638 map->lock_arg = config->lock_arg;
639 } else {
640 if ((bus && bus->fast_io) ||
641 config->fast_io) {
642 spin_lock_init(&map->spinlock);
643 map->lock = regmap_lock_spinlock;
644 map->unlock = regmap_unlock_spinlock;
645 lockdep_set_class_and_name(&map->spinlock,
646 lock_key, lock_name);
647 } else {
648 mutex_init(&map->mutex);
649 map->lock = regmap_lock_mutex;
650 map->unlock = regmap_unlock_mutex;
651 lockdep_set_class_and_name(&map->mutex,
652 lock_key, lock_name);
653 }
654 map->lock_arg = map;
655 }
656
657 /*
658 * When we write in fast-paths with regmap_bulk_write() don't allocate
659 * scratch buffers with sleeping allocations.
660 */
661 if ((bus && bus->fast_io) || config->fast_io)
662 map->alloc_flags = GFP_ATOMIC;
663 else
664 map->alloc_flags = GFP_KERNEL;
665
666 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
667 map->format.pad_bytes = config->pad_bits / 8;
668 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
669 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
670 config->val_bits + config->pad_bits, 8);
671 map->reg_shift = config->pad_bits % 8;
672 if (config->reg_stride)
673 map->reg_stride = config->reg_stride;
674 else
675 map->reg_stride = 1;
676 if (is_power_of_2(map->reg_stride))
677 map->reg_stride_order = ilog2(map->reg_stride);
678 else
679 map->reg_stride_order = -1;
680 map->use_single_read = config->use_single_read || !bus || !bus->read;
681 map->use_single_write = config->use_single_read || !bus || !bus->write;
682 map->can_multi_write = config->can_multi_write && bus && bus->write;
683 if (bus) {
684 map->max_raw_read = bus->max_raw_read;
685 map->max_raw_write = bus->max_raw_write;
686 }
687 map->dev = dev;
688 map->bus = bus;
689 map->bus_context = bus_context;
690 map->max_register = config->max_register;
691 map->wr_table = config->wr_table;
692 map->rd_table = config->rd_table;
693 map->volatile_table = config->volatile_table;
694 map->precious_table = config->precious_table;
695 map->writeable_reg = config->writeable_reg;
696 map->readable_reg = config->readable_reg;
697 map->volatile_reg = config->volatile_reg;
698 map->precious_reg = config->precious_reg;
699 map->cache_type = config->cache_type;
700 map->name = config->name;
701
702 spin_lock_init(&map->async_lock);
703 INIT_LIST_HEAD(&map->async_list);
704 INIT_LIST_HEAD(&map->async_free);
705 init_waitqueue_head(&map->async_waitq);
706
707 if (config->read_flag_mask || config->write_flag_mask) {
708 map->read_flag_mask = config->read_flag_mask;
709 map->write_flag_mask = config->write_flag_mask;
710 } else if (bus) {
711 map->read_flag_mask = bus->read_flag_mask;
712 }
713
714 if (!bus) {
715 map->reg_read = config->reg_read;
716 map->reg_write = config->reg_write;
717
718 map->defer_caching = false;
719 goto skip_format_initialization;
720 } else if (!bus->read || !bus->write) {
721 map->reg_read = _regmap_bus_reg_read;
722 map->reg_write = _regmap_bus_reg_write;
723
724 map->defer_caching = false;
725 goto skip_format_initialization;
726 } else {
727 map->reg_read = _regmap_bus_read;
728 map->reg_update_bits = bus->reg_update_bits;
729 }
730
731 reg_endian = regmap_get_reg_endian(bus, config);
732 val_endian = regmap_get_val_endian(dev, bus, config);
733
734 switch (config->reg_bits + map->reg_shift) {
735 case 2:
736 switch (config->val_bits) {
737 case 6:
738 map->format.format_write = regmap_format_2_6_write;
739 break;
740 default:
741 goto err_map;
742 }
743 break;
744
745 case 4:
746 switch (config->val_bits) {
747 case 12:
748 map->format.format_write = regmap_format_4_12_write;
749 break;
750 default:
751 goto err_map;
752 }
753 break;
754
755 case 7:
756 switch (config->val_bits) {
757 case 9:
758 map->format.format_write = regmap_format_7_9_write;
759 break;
760 default:
761 goto err_map;
762 }
763 break;
764
765 case 10:
766 switch (config->val_bits) {
767 case 14:
768 map->format.format_write = regmap_format_10_14_write;
769 break;
770 default:
771 goto err_map;
772 }
773 break;
774
775 case 8:
776 map->format.format_reg = regmap_format_8;
777 break;
778
779 case 16:
780 switch (reg_endian) {
781 case REGMAP_ENDIAN_BIG:
782 map->format.format_reg = regmap_format_16_be;
783 break;
784 case REGMAP_ENDIAN_LITTLE:
785 map->format.format_reg = regmap_format_16_le;
786 break;
787 case REGMAP_ENDIAN_NATIVE:
788 map->format.format_reg = regmap_format_16_native;
789 break;
790 default:
791 goto err_map;
792 }
793 break;
794
795 case 24:
796 if (reg_endian != REGMAP_ENDIAN_BIG)
797 goto err_map;
798 map->format.format_reg = regmap_format_24;
799 break;
800
801 case 32:
802 switch (reg_endian) {
803 case REGMAP_ENDIAN_BIG:
804 map->format.format_reg = regmap_format_32_be;
805 break;
806 case REGMAP_ENDIAN_LITTLE:
807 map->format.format_reg = regmap_format_32_le;
808 break;
809 case REGMAP_ENDIAN_NATIVE:
810 map->format.format_reg = regmap_format_32_native;
811 break;
812 default:
813 goto err_map;
814 }
815 break;
816
817#ifdef CONFIG_64BIT
818 case 64:
819 switch (reg_endian) {
820 case REGMAP_ENDIAN_BIG:
821 map->format.format_reg = regmap_format_64_be;
822 break;
823 case REGMAP_ENDIAN_LITTLE:
824 map->format.format_reg = regmap_format_64_le;
825 break;
826 case REGMAP_ENDIAN_NATIVE:
827 map->format.format_reg = regmap_format_64_native;
828 break;
829 default:
830 goto err_map;
831 }
832 break;
833#endif
834
835 default:
836 goto err_map;
837 }
838
839 if (val_endian == REGMAP_ENDIAN_NATIVE)
840 map->format.parse_inplace = regmap_parse_inplace_noop;
841
842 switch (config->val_bits) {
843 case 8:
844 map->format.format_val = regmap_format_8;
845 map->format.parse_val = regmap_parse_8;
846 map->format.parse_inplace = regmap_parse_inplace_noop;
847 break;
848 case 16:
849 switch (val_endian) {
850 case REGMAP_ENDIAN_BIG:
851 map->format.format_val = regmap_format_16_be;
852 map->format.parse_val = regmap_parse_16_be;
853 map->format.parse_inplace = regmap_parse_16_be_inplace;
854 break;
855 case REGMAP_ENDIAN_LITTLE:
856 map->format.format_val = regmap_format_16_le;
857 map->format.parse_val = regmap_parse_16_le;
858 map->format.parse_inplace = regmap_parse_16_le_inplace;
859 break;
860 case REGMAP_ENDIAN_NATIVE:
861 map->format.format_val = regmap_format_16_native;
862 map->format.parse_val = regmap_parse_16_native;
863 break;
864 default:
865 goto err_map;
866 }
867 break;
868 case 24:
869 if (val_endian != REGMAP_ENDIAN_BIG)
870 goto err_map;
871 map->format.format_val = regmap_format_24;
872 map->format.parse_val = regmap_parse_24;
873 break;
874 case 32:
875 switch (val_endian) {
876 case REGMAP_ENDIAN_BIG:
877 map->format.format_val = regmap_format_32_be;
878 map->format.parse_val = regmap_parse_32_be;
879 map->format.parse_inplace = regmap_parse_32_be_inplace;
880 break;
881 case REGMAP_ENDIAN_LITTLE:
882 map->format.format_val = regmap_format_32_le;
883 map->format.parse_val = regmap_parse_32_le;
884 map->format.parse_inplace = regmap_parse_32_le_inplace;
885 break;
886 case REGMAP_ENDIAN_NATIVE:
887 map->format.format_val = regmap_format_32_native;
888 map->format.parse_val = regmap_parse_32_native;
889 break;
890 default:
891 goto err_map;
892 }
893 break;
894#ifdef CONFIG_64BIT
895 case 64:
896 switch (val_endian) {
897 case REGMAP_ENDIAN_BIG:
898 map->format.format_val = regmap_format_64_be;
899 map->format.parse_val = regmap_parse_64_be;
900 map->format.parse_inplace = regmap_parse_64_be_inplace;
901 break;
902 case REGMAP_ENDIAN_LITTLE:
903 map->format.format_val = regmap_format_64_le;
904 map->format.parse_val = regmap_parse_64_le;
905 map->format.parse_inplace = regmap_parse_64_le_inplace;
906 break;
907 case REGMAP_ENDIAN_NATIVE:
908 map->format.format_val = regmap_format_64_native;
909 map->format.parse_val = regmap_parse_64_native;
910 break;
911 default:
912 goto err_map;
913 }
914 break;
915#endif
916 }
917
918 if (map->format.format_write) {
919 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
920 (val_endian != REGMAP_ENDIAN_BIG))
921 goto err_map;
922 map->use_single_write = true;
923 }
924
925 if (!map->format.format_write &&
926 !(map->format.format_reg && map->format.format_val))
927 goto err_map;
928
929 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
930 if (map->work_buf == NULL) {
931 ret = -ENOMEM;
932 goto err_map;
933 }
934
935 if (map->format.format_write) {
936 map->defer_caching = false;
937 map->reg_write = _regmap_bus_formatted_write;
938 } else if (map->format.format_val) {
939 map->defer_caching = true;
940 map->reg_write = _regmap_bus_raw_write;
941 }
942
943skip_format_initialization:
944
945#ifndef TARGET_OS2
946 map->range_tree = RB_ROOT;
947#else
948 map->range_tree = _RB_ROOT;
949 map->range_tree.rb_node = NULL;
950 memset(&map->range_tree, 0, sizeof(struct rb_root));
951#endif
952
953 for (i = 0; i < config->num_ranges; i++) {
954 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
955 struct regmap_range_node *new;
956
957 /* Sanity check */
958 if (range_cfg->range_max < range_cfg->range_min) {
959 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
960 range_cfg->range_max, range_cfg->range_min);
961 goto err_range;
962 }
963
964 if (range_cfg->range_max > map->max_register) {
965 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
966 range_cfg->range_max, map->max_register);
967 goto err_range;
968 }
969
970 if (range_cfg->selector_reg > map->max_register) {
971 dev_err(map->dev,
972 "Invalid range %d: selector out of map\n", i);
973 goto err_range;
974 }
975
976 if (range_cfg->window_len == 0) {
977 dev_err(map->dev, "Invalid range %d: window_len 0\n",
978 i);
979 goto err_range;
980 }
981
982 /* Make sure, that this register range has no selector
983 or data window within its boundary */
984 for (j = 0; j < config->num_ranges; j++) {
985 unsigned sel_reg = config->ranges[j].selector_reg;
986 unsigned win_min = config->ranges[j].window_start;
987 unsigned win_max = win_min +
988 config->ranges[j].window_len - 1;
989
990 /* Allow data window inside its own virtual range */
991 if (j == i)
992 continue;
993
994 if (range_cfg->range_min <= sel_reg &&
995 sel_reg <= range_cfg->range_max) {
996 dev_err(map->dev,
997 "Range %d: selector for %d in window\n",
998 i, j);
999 goto err_range;
1000 }
1001
1002 if (!(win_max < range_cfg->range_min ||
1003 win_min > range_cfg->range_max)) {
1004 dev_err(map->dev,
1005 "Range %d: window for %d in window\n",
1006 i, j);
1007 goto err_range;
1008 }
1009 }
1010
1011 new = kzalloc(sizeof(*new), GFP_KERNEL);
1012 if (new == NULL) {
1013 ret = -ENOMEM;
1014 goto err_range;
1015 }
1016
1017 new->map = map;
1018 new->name = range_cfg->name;
1019 new->range_min = range_cfg->range_min;
1020 new->range_max = range_cfg->range_max;
1021 new->selector_reg = range_cfg->selector_reg;
1022 new->selector_mask = range_cfg->selector_mask;
1023 new->selector_shift = range_cfg->selector_shift;
1024 new->window_start = range_cfg->window_start;
1025 new->window_len = range_cfg->window_len;
1026
1027 if (!_regmap_range_add(map, new)) {
1028 dev_err(map->dev, "Failed to add range %d\n", i);
1029 kfree(new);
1030 goto err_range;
1031 }
1032
1033 if (map->selector_work_buf == NULL) {
1034 map->selector_work_buf =
1035 kzalloc(map->format.buf_size, GFP_KERNEL);
1036 if (map->selector_work_buf == NULL) {
1037 ret = -ENOMEM;
1038 goto err_range;
1039 }
1040 }
1041 }
1042
1043 ret = regcache_init(map, config);
1044 if (ret != 0)
1045 goto err_range;
1046
1047 if (dev) {
1048 ret = regmap_attach_dev(dev, map, config);
1049
1050 if (ret != 0)
1051 goto err_regcache;
1052 }
1053
1054 return map;
1055
1056err_regcache:
1057 regcache_exit(map);
1058err_range:
1059 regmap_range_exit(map);
1060 kfree(map->work_buf);
1061err_map:
1062 kfree(map);
1063err:
1064 return ERR_PTR(ret);
1065}
1066EXPORT_SYMBOL_GPL(__regmap_init);
1067
1068#ifndef TARGET_OS2
1069static void devm_regmap_release(struct device *dev, void *res)
1070{
1071 regmap_exit(*(struct regmap **)res);
1072}
1073
1074struct regmap *__devm_regmap_init(struct device *dev,
1075 const struct regmap_bus *bus,
1076 void *bus_context,
1077 const struct regmap_config *config,
1078 struct lock_class_key *lock_key,
1079 const char *lock_name)
1080{
1081 struct regmap **ptr, *regmap;
1082
1083 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1084 if (!ptr)
1085 return ERR_PTR(-ENOMEM);
1086
1087 regmap = __regmap_init(dev, bus, bus_context, config,
1088 lock_key, lock_name);
1089 if (!IS_ERR(regmap)) {
1090 *ptr = regmap;
1091 devres_add(dev, ptr);
1092 } else {
1093 devres_free(ptr);
1094 }
1095
1096 return regmap;
1097}
1098EXPORT_SYMBOL_GPL(__devm_regmap_init);
1099#endif
1100
1101static void regmap_field_init(struct regmap_field *rm_field,
1102 struct regmap *regmap, struct reg_field reg_field)
1103{
1104 rm_field->regmap = regmap;
1105 rm_field->reg = reg_field.reg;
1106 rm_field->shift = reg_field.lsb;
1107 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1108 rm_field->id_size = reg_field.id_size;
1109 rm_field->id_offset = reg_field.id_offset;
1110}
1111
1112#ifndef TARGET_OS2
1113/**
1114 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1115 *
1116 * @dev: Device that will be interacted with
1117 * @regmap: regmap bank in which this register field is located.
1118 * @reg_field: Register field with in the bank.
1119 *
1120 * The return value will be an ERR_PTR() on error or a valid pointer
1121 * to a struct regmap_field. The regmap_field will be automatically freed
1122 * by the device management code.
1123 */
1124struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1125 struct regmap *regmap, struct reg_field reg_field)
1126{
1127 struct regmap_field *rm_field = devm_kzalloc(dev,
1128 sizeof(*rm_field), GFP_KERNEL);
1129 if (!rm_field)
1130 return ERR_PTR(-ENOMEM);
1131
1132 regmap_field_init(rm_field, regmap, reg_field);
1133
1134 return rm_field;
1135
1136}
1137EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1138
1139/**
1140 * devm_regmap_field_free() - Free a register field allocated using
1141 * devm_regmap_field_alloc.
1142 *
1143 * @dev: Device that will be interacted with
1144 * @field: regmap field which should be freed.
1145 *
1146 * Free register field allocated using devm_regmap_field_alloc(). Usually
1147 * drivers need not call this function, as the memory allocated via devm
1148 * will be freed as per device-driver life-cyle.
1149 */
1150void devm_regmap_field_free(struct device *dev,
1151 struct regmap_field *field)
1152{
1153 devm_kfree(dev, field);
1154}
1155EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1156#endif
1157
1158/**
1159 * regmap_field_alloc() - Allocate and initialise a register field.
1160 *
1161 * @regmap: regmap bank in which this register field is located.
1162 * @reg_field: Register field with in the bank.
1163 *
1164 * The return value will be an ERR_PTR() on error or a valid pointer
1165 * to a struct regmap_field. The regmap_field should be freed by the
1166 * user once its finished working with it using regmap_field_free().
1167 */
1168struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1169 struct reg_field reg_field)
1170{
1171 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1172
1173 if (!rm_field)
1174 return ERR_PTR(-ENOMEM);
1175
1176 regmap_field_init(rm_field, regmap, reg_field);
1177
1178 return rm_field;
1179}
1180EXPORT_SYMBOL_GPL(regmap_field_alloc);
1181
1182/**
1183 * regmap_field_free() - Free register field allocated using
1184 * regmap_field_alloc.
1185 *
1186 * @field: regmap field which should be freed.
1187 */
1188void regmap_field_free(struct regmap_field *field)
1189{
1190 kfree(field);
1191}
1192EXPORT_SYMBOL_GPL(regmap_field_free);
1193
1194/**
1195 * regmap_reinit_cache() - Reinitialise the current register cache
1196 *
1197 * @map: Register map to operate on.
1198 * @config: New configuration. Only the cache data will be used.
1199 *
1200 * Discard any existing register cache for the map and initialize a
1201 * new cache. This can be used to restore the cache to defaults or to
1202 * update the cache configuration to reflect runtime discovery of the
1203 * hardware.
1204 *
1205 * No explicit locking is done here, the user needs to ensure that
1206 * this function will not race with other calls to regmap.
1207 */
1208int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1209{
1210 regcache_exit(map);
1211 regmap_debugfs_exit(map);
1212
1213 map->max_register = config->max_register;
1214 map->writeable_reg = config->writeable_reg;
1215 map->readable_reg = config->readable_reg;
1216 map->volatile_reg = config->volatile_reg;
1217 map->precious_reg = config->precious_reg;
1218 map->cache_type = config->cache_type;
1219
1220 regmap_debugfs_init(map, config->name);
1221
1222 map->cache_bypass = false;
1223 map->cache_only = false;
1224
1225 return regcache_init(map, config);
1226}
1227EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1228
1229/**
1230 * regmap_exit() - Free a previously allocated register map
1231 *
1232 * @map: Register map to operate on.
1233 */
1234void regmap_exit(struct regmap *map)
1235{
1236 struct regmap_async *async;
1237
1238 regcache_exit(map);
1239 regmap_debugfs_exit(map);
1240 regmap_range_exit(map);
1241 if (map->bus && map->bus->free_context)
1242 map->bus->free_context(map->bus_context);
1243 kfree(map->work_buf);
1244 while (!list_empty(&map->async_free)) {
1245 async = list_first_entry_or_null(&map->async_free,
1246 struct regmap_async,
1247 list);
1248 list_del(&async->list);
1249 kfree(async->work_buf);
1250 kfree(async);
1251 }
1252 kfree(map);
1253}
1254EXPORT_SYMBOL_GPL(regmap_exit);
1255
1256static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1257{
1258 struct regmap **r = res;
1259 if (!r || !*r) {
1260 WARN_ON(!r || !*r);
1261 return 0;
1262 }
1263
1264 /* If the user didn't specify a name match any */
1265 if (data)
1266 return !strcmp((*r)->name, data);
1267 else
1268 return 1;
1269}
1270
1271/**
1272 * dev_get_regmap() - Obtain the regmap (if any) for a device
1273 *
1274 * @dev: Device to retrieve the map for
1275 * @name: Optional name for the register map, usually NULL.
1276 *
1277 * Returns the regmap for the device if one is present, or NULL. If
1278 * name is specified then it must match the name specified when
1279 * registering the device, if it is NULL then the first regmap found
1280 * will be used. Devices with multiple register maps are very rare,
1281 * generic code should normally not need to specify a name.
1282 */
1283struct regmap *dev_get_regmap(struct device *dev, const char *name)
1284{
1285 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1286 dev_get_regmap_match, (void *)name);
1287
1288 if (!r)
1289 return NULL;
1290 return *r;
1291}
1292EXPORT_SYMBOL_GPL(dev_get_regmap);
1293
1294/**
1295 * regmap_get_device() - Obtain the device from a regmap
1296 *
1297 * @map: Register map to operate on.
1298 *
1299 * Returns the underlying device that the regmap has been created for.
1300 */
1301struct device *regmap_get_device(struct regmap *map)
1302{
1303 return map->dev;
1304}
1305EXPORT_SYMBOL_GPL(regmap_get_device);
1306
1307static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1308 struct regmap_range_node *range,
1309 unsigned int val_num)
1310{
1311 void *orig_work_buf;
1312 unsigned int win_offset;
1313 unsigned int win_page;
1314 bool page_chg;
1315 int ret;
1316
1317 win_offset = (*reg - range->range_min) % range->window_len;
1318 win_page = (*reg - range->range_min) / range->window_len;
1319
1320 if (val_num > 1) {
1321 /* Bulk write shouldn't cross range boundary */
1322 if (*reg + val_num - 1 > range->range_max)
1323 return -EINVAL;
1324
1325 /* ... or single page boundary */
1326 if (val_num > range->window_len - win_offset)
1327 return -EINVAL;
1328 }
1329
1330 /* It is possible to have selector register inside data window.
1331 In that case, selector register is located on every page and
1332 it needs no page switching, when accessed alone. */
1333 if (val_num > 1 ||
1334 range->window_start + win_offset != range->selector_reg) {
1335 /* Use separate work_buf during page switching */
1336 orig_work_buf = map->work_buf;
1337 map->work_buf = map->selector_work_buf;
1338
1339 ret = _regmap_update_bits(map, range->selector_reg,
1340 range->selector_mask,
1341 win_page << range->selector_shift,
1342 &page_chg, false);
1343
1344 map->work_buf = orig_work_buf;
1345
1346 if (ret != 0)
1347 return ret;
1348 }
1349
1350 *reg = range->window_start + win_offset;
1351
1352 return 0;
1353}
1354
1355static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1356 unsigned long mask)
1357{
1358 u8 *buf;
1359 int i;
1360
1361 if (!mask || !map->work_buf)
1362 return;
1363
1364 buf = map->work_buf;
1365
1366 for (i = 0; i < max_bytes; i++)
1367 buf[i] |= (mask >> (8 * i)) & 0xff;
1368}
1369
1370int _regmap_raw_write(struct regmap *map, unsigned int reg,
1371 const void *val, size_t val_len)
1372{
1373 struct regmap_range_node *range;
1374 unsigned long flags;
1375 void *work_val = map->work_buf + map->format.reg_bytes +
1376 map->format.pad_bytes;
1377 void *buf;
1378 int ret = -ENOTSUPP;
1379 size_t len;
1380 int i;
1381
1382 WARN_ON(!map->bus);
1383
1384 /* Check for unwritable registers before we start */
1385 if (map->writeable_reg)
1386 for (i = 0; i < val_len / map->format.val_bytes; i++)
1387 if (!map->writeable_reg(map->dev,
1388 reg + regmap_get_offset(map, i)))
1389 return -EINVAL;
1390
1391 if (!map->cache_bypass && map->format.parse_val) {
1392 unsigned int ival;
1393 int val_bytes = map->format.val_bytes;
1394 for (i = 0; i < val_len / val_bytes; i++) {
1395 ival = map->format.parse_val(val + (i * val_bytes));
1396 ret = regcache_write(map,
1397 reg + regmap_get_offset(map, i),
1398 ival);
1399 if (ret) {
1400 dev_err(map->dev,
1401 "Error in caching of register: %x ret: %d\n",
1402 reg + i, ret);
1403 return ret;
1404 }
1405 }
1406 if (map->cache_only) {
1407 map->cache_dirty = true;
1408 return 0;
1409 }
1410 }
1411
1412 range = _regmap_range_lookup(map, reg);
1413 if (range) {
1414 int val_num = val_len / map->format.val_bytes;
1415 int win_offset = (reg - range->range_min) % range->window_len;
1416 int win_residue = range->window_len - win_offset;
1417
1418 /* If the write goes beyond the end of the window split it */
1419 while (val_num > win_residue) {
1420 dev_dbg(map->dev, "Writing window %d/%zu\n",
1421 win_residue, val_len / map->format.val_bytes);
1422 ret = _regmap_raw_write(map, reg, val, win_residue *
1423 map->format.val_bytes);
1424 if (ret != 0)
1425 return ret;
1426
1427 reg += win_residue;
1428 val_num -= win_residue;
1429 val += win_residue * map->format.val_bytes;
1430 val_len -= win_residue * map->format.val_bytes;
1431
1432 win_offset = (reg - range->range_min) %
1433 range->window_len;
1434 win_residue = range->window_len - win_offset;
1435 }
1436
1437 ret = _regmap_select_page(map, &reg, range, val_num);
1438 if (ret != 0)
1439 return ret;
1440 }
1441
1442 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1443 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1444 map->write_flag_mask);
1445
1446 /*
1447 * Essentially all I/O mechanisms will be faster with a single
1448 * buffer to write. Since register syncs often generate raw
1449 * writes of single registers optimise that case.
1450 */
1451 if (val != work_val && val_len == map->format.val_bytes) {
1452 memcpy(work_val, val, map->format.val_bytes);
1453 val = work_val;
1454 }
1455
1456 if (map->async && map->bus->async_write) {
1457 struct regmap_async *async;
1458
1459 spin_lock_irqsave(&map->async_lock, flags);
1460 async = list_first_entry_or_null(&map->async_free,
1461 struct regmap_async,
1462 list);
1463 if (async)
1464 list_del(&async->list);
1465 spin_unlock_irqrestore(&map->async_lock, flags);
1466
1467 if (!async) {
1468 async = map->bus->async_alloc();
1469 if (!async)
1470 return -ENOMEM;
1471
1472 async->work_buf = kzalloc(map->format.buf_size,
1473 GFP_KERNEL | GFP_DMA);
1474 if (!async->work_buf) {
1475 kfree(async);
1476 return -ENOMEM;
1477 }
1478 }
1479
1480 async->map = map;
1481
1482 /* If the caller supplied the value we can use it safely. */
1483 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1484 map->format.reg_bytes + map->format.val_bytes);
1485
1486 spin_lock_irqsave(&map->async_lock, flags);
1487 list_add_tail(&async->list, &map->async_list);
1488 spin_unlock_irqrestore(&map->async_lock, flags);
1489
1490 if (val != work_val)
1491 ret = map->bus->async_write(map->bus_context,
1492 async->work_buf,
1493 map->format.reg_bytes +
1494 map->format.pad_bytes,
1495 val, val_len, async);
1496 else
1497 ret = map->bus->async_write(map->bus_context,
1498 async->work_buf,
1499 map->format.reg_bytes +
1500 map->format.pad_bytes +
1501 val_len, NULL, 0, async);
1502
1503 if (ret != 0) {
1504 dev_err(map->dev, "Failed to schedule write: %d\n",
1505 ret);
1506
1507 spin_lock_irqsave(&map->async_lock, flags);
1508 list_move(&async->list, &map->async_free);
1509 spin_unlock_irqrestore(&map->async_lock, flags);
1510 }
1511
1512 return ret;
1513 }
1514
1515 /* If we're doing a single register write we can probably just
1516 * send the work_buf directly, otherwise try to do a gather
1517 * write.
1518 */
1519 if (val == work_val)
1520 ret = map->bus->write(map->bus_context, map->work_buf,
1521 map->format.reg_bytes +
1522 map->format.pad_bytes +
1523 val_len);
1524 else if (map->bus->gather_write)
1525 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1526 map->format.reg_bytes +
1527 map->format.pad_bytes,
1528 val, val_len);
1529 else
1530 ret = -ENOTSUPP;
1531
1532 /* If that didn't work fall back on linearising by hand. */
1533 if (ret == -ENOTSUPP) {
1534 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1535 buf = kzalloc(len, GFP_KERNEL);
1536 if (!buf)
1537 return -ENOMEM;
1538
1539 memcpy(buf, map->work_buf, map->format.reg_bytes);
1540 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1541 val, val_len);
1542 ret = map->bus->write(map->bus_context, buf, len);
1543
1544 kfree(buf);
1545 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1546 /* regcache_drop_region() takes lock that we already have,
1547 * thus call map->cache_ops->drop() directly
1548 */
1549 if (map->cache_ops && map->cache_ops->drop)
1550 map->cache_ops->drop(map, reg, reg + 1);
1551 }
1552
1553 return ret;
1554}
1555
1556/**
1557 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1558 *
1559 * @map: Map to check.
1560 */
1561bool regmap_can_raw_write(struct regmap *map)
1562{
1563 return map->bus && map->bus->write && map->format.format_val &&
1564 map->format.format_reg;
1565}
1566EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1567
1568/**
1569 * regmap_get_raw_read_max - Get the maximum size we can read
1570 *
1571 * @map: Map to check.
1572 */
1573size_t regmap_get_raw_read_max(struct regmap *map)
1574{
1575 return map->max_raw_read;
1576}
1577EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1578
1579/**
1580 * regmap_get_raw_write_max - Get the maximum size we can read
1581 *
1582 * @map: Map to check.
1583 */
1584size_t regmap_get_raw_write_max(struct regmap *map)
1585{
1586 return map->max_raw_write;
1587}
1588EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1589
1590static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1591 unsigned int val)
1592{
1593 int ret;
1594 struct regmap_range_node *range;
1595 struct regmap *map = context;
1596
1597 WARN_ON(!map->bus || !map->format.format_write);
1598
1599 range = _regmap_range_lookup(map, reg);
1600 if (range) {
1601 ret = _regmap_select_page(map, &reg, range, 1);
1602 if (ret != 0)
1603 return ret;
1604 }
1605
1606 map->format.format_write(map, reg, val);
1607
1608 ret = map->bus->write(map->bus_context, map->work_buf,
1609 map->format.buf_size);
1610
1611 return ret;
1612}
1613
1614static int _regmap_bus_reg_write(void *context, unsigned int reg,
1615 unsigned int val)
1616{
1617 struct regmap *map = context;
1618
1619 return map->bus->reg_write(map->bus_context, reg, val);
1620}
1621
1622static int _regmap_bus_raw_write(void *context, unsigned int reg,
1623 unsigned int val)
1624{
1625 struct regmap *map = context;
1626
1627 WARN_ON(!map->bus || !map->format.format_val);
1628
1629 map->format.format_val(map->work_buf + map->format.reg_bytes
1630 + map->format.pad_bytes, val, 0);
1631 return _regmap_raw_write(map, reg,
1632 map->work_buf +
1633 map->format.reg_bytes +
1634 map->format.pad_bytes,
1635 map->format.val_bytes);
1636}
1637
1638static inline void *_regmap_map_get_context(struct regmap *map)
1639{
1640 return (map->bus) ? map : map->bus_context;
1641}
1642
1643int _regmap_write(struct regmap *map, unsigned int reg,
1644 unsigned int val)
1645{
1646 int ret;
1647 void *context = _regmap_map_get_context(map);
1648
1649 if (!regmap_writeable(map, reg))
1650 return -EIO;
1651
1652 if (!map->cache_bypass && !map->defer_caching) {
1653 ret = regcache_write(map, reg, val);
1654 if (ret != 0)
1655 return ret;
1656 if (map->cache_only) {
1657 map->cache_dirty = true;
1658 return 0;
1659 }
1660 }
1661
1662#ifdef LOG_DEVICE
1663 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1664 dev_info(map->dev, "%x <= %x\n", reg, val);
1665#endif
1666
1667 return map->reg_write(context, reg, val);
1668}
1669
1670#define IS_ALIGNED(x, a) (((x) & ((unsigned int)(a) - 1)) == 0)
1671
1672/**
1673 * regmap_write() - Write a value to a single register
1674 *
1675 * @map: Register map to write to
1676 * @reg: Register to write to
1677 * @val: Value to be written
1678 *
1679 * A value of zero will be returned on success, a negative errno will
1680 * be returned in error cases.
1681 */
1682int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1683{
1684 int ret;
1685
1686 if (!IS_ALIGNED(reg, map->reg_stride))
1687 return -EINVAL;
1688
1689 map->lock(map->lock_arg);
1690
1691 ret = _regmap_write(map, reg, val);
1692
1693 map->unlock(map->lock_arg);
1694
1695 return ret;
1696}
1697EXPORT_SYMBOL_GPL(regmap_write);
1698
1699/**
1700 * regmap_write_async() - Write a value to a single register asynchronously
1701 *
1702 * @map: Register map to write to
1703 * @reg: Register to write to
1704 * @val: Value to be written
1705 *
1706 * A value of zero will be returned on success, a negative errno will
1707 * be returned in error cases.
1708 */
1709int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1710{
1711 int ret;
1712
1713 if (!IS_ALIGNED(reg, map->reg_stride))
1714 return -EINVAL;
1715
1716 map->lock(map->lock_arg);
1717
1718 map->async = true;
1719
1720 ret = _regmap_write(map, reg, val);
1721
1722 map->async = false;
1723
1724 map->unlock(map->lock_arg);
1725
1726 return ret;
1727}
1728EXPORT_SYMBOL_GPL(regmap_write_async);
1729
1730/**
1731 * regmap_raw_write() - Write raw values to one or more registers
1732 *
1733 * @map: Register map to write to
1734 * @reg: Initial register to write to
1735 * @val: Block of data to be written, laid out for direct transmission to the
1736 * device
1737 * @val_len: Length of data pointed to by val.
1738 *
1739 * This function is intended to be used for things like firmware
1740 * download where a large block of data needs to be transferred to the
1741 * device. No formatting will be done on the data provided.
1742 *
1743 * A value of zero will be returned on success, a negative errno will
1744 * be returned in error cases.
1745 */
1746int regmap_raw_write(struct regmap *map, unsigned int reg,
1747 const void *val, size_t val_len)
1748{
1749 int ret;
1750
1751 if (!regmap_can_raw_write(map))
1752 return -EINVAL;
1753 if (val_len % map->format.val_bytes)
1754 return -EINVAL;
1755 if (map->max_raw_write && map->max_raw_write < val_len)
1756 return -E2BIG;
1757
1758 map->lock(map->lock_arg);
1759
1760 ret = _regmap_raw_write(map, reg, val, val_len);
1761
1762 map->unlock(map->lock_arg);
1763
1764 return ret;
1765}
1766EXPORT_SYMBOL_GPL(regmap_raw_write);
1767
1768/**
1769 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1770 * register field.
1771 *
1772 * @field: Register field to write to
1773 * @mask: Bitmask to change
1774 * @val: Value to be written
1775 * @change: Boolean indicating if a write was done
1776 * @async: Boolean indicating asynchronously
1777 * @force: Boolean indicating use force update
1778 *
1779 * Perform a read/modify/write cycle on the register field with change,
1780 * async, force option.
1781 *
1782 * A value of zero will be returned on success, a negative errno will
1783 * be returned in error cases.
1784 */
1785int regmap_field_update_bits_base(struct regmap_field *field,
1786 unsigned int mask, unsigned int val,
1787 bool *change, bool async, bool force)
1788{
1789 mask = (mask << field->shift) & field->mask;
1790
1791 return regmap_update_bits_base(field->regmap, field->reg,
1792 mask, val << field->shift,
1793 change, async, force);
1794}
1795EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1796
1797/**
1798 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1799 * register field with port ID
1800 *
1801 * @field: Register field to write to
1802 * @id: port ID
1803 * @mask: Bitmask to change
1804 * @val: Value to be written
1805 * @change: Boolean indicating if a write was done
1806 * @async: Boolean indicating asynchronously
1807 * @force: Boolean indicating use force update
1808 *
1809 * A value of zero will be returned on success, a negative errno will
1810 * be returned in error cases.
1811 */
1812int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
1813 unsigned int mask, unsigned int val,
1814 bool *change, bool async, bool force)
1815{
1816 if (id >= field->id_size)
1817 return -EINVAL;
1818
1819 mask = (mask << field->shift) & field->mask;
1820
1821 return regmap_update_bits_base(field->regmap,
1822 field->reg + (field->id_offset * id),
1823 mask, val << field->shift,
1824 change, async, force);
1825}
1826EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1827
1828/**
1829 * regmap_bulk_write() - Write multiple registers to the device
1830 *
1831 * @map: Register map to write to
1832 * @reg: First register to be write from
1833 * @val: Block of data to be written, in native register size for device
1834 * @val_count: Number of registers to write
1835 *
1836 * This function is intended to be used for writing a large block of
1837 * data to the device either in single transfer or multiple transfer.
1838 *
1839 * A value of zero will be returned on success, a negative errno will
1840 * be returned in error cases.
1841 */
1842int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1843 size_t val_count)
1844{
1845 int ret = 0, i;
1846 size_t val_bytes = map->format.val_bytes;
1847 size_t total_size = val_bytes * val_count;
1848
1849 if (!IS_ALIGNED(reg, map->reg_stride))
1850 return -EINVAL;
1851
1852 /*
1853 * Some devices don't support bulk write, for
1854 * them we have a series of single write operations in the first two if
1855 * blocks.
1856 *
1857 * The first if block is used for memory mapped io. It does not allow
1858 * val_bytes of 3 for example.
1859 * The second one is for busses that do not provide raw I/O.
1860 * The third one is used for busses which do not have these limitations
1861 * and can write arbitrary value lengths.
1862 */
1863 if (!map->bus) {
1864 map->lock(map->lock_arg);
1865 for (i = 0; i < val_count; i++) {
1866 unsigned int ival;
1867
1868 switch (val_bytes) {
1869 case 1:
1870 ival = *(u8 *)(val + (i * val_bytes));
1871 break;
1872 case 2:
1873 ival = *(u16 *)(val + (i * val_bytes));
1874 break;
1875 case 4:
1876 ival = *(u32 *)(val + (i * val_bytes));
1877 break;
1878#ifdef CONFIG_64BIT
1879 case 8:
1880 ival = *(u64 *)(val + (i * val_bytes));
1881 break;
1882#endif
1883 default:
1884 ret = -EINVAL;
1885 goto out;
1886 }
1887
1888 ret = _regmap_write(map,
1889 reg + regmap_get_offset(map, i),
1890 ival);
1891 if (ret != 0)
1892 goto out;
1893 }
1894out:
1895 map->unlock(map->lock_arg);
1896 } else if (map->bus && !map->format.parse_inplace) {
1897 const u8 *u8 = val;
1898 const u16 *u16 = val;
1899 const u32 *u32 = val;
1900 unsigned int ival;
1901
1902 for (i = 0; i < val_count; i++) {
1903 switch (map->format.val_bytes) {
1904 case 4:
1905 ival = u32[i];
1906 break;
1907 case 2:
1908 ival = u16[i];
1909 break;
1910 case 1:
1911 ival = u8[i];
1912 break;
1913 default:
1914 return -EINVAL;
1915 }
1916
1917 ret = regmap_write(map, reg + (i * map->reg_stride),
1918 ival);
1919 if (ret)
1920 return ret;
1921 }
1922 } else if (map->use_single_write ||
1923 (map->max_raw_write && map->max_raw_write < total_size)) {
1924 int chunk_stride = map->reg_stride;
1925 size_t chunk_size = val_bytes;
1926 size_t chunk_count = val_count;
1927
1928 if (!map->use_single_write) {
1929 chunk_size = map->max_raw_write;
1930 if (chunk_size % val_bytes)
1931 chunk_size -= chunk_size % val_bytes;
1932 chunk_count = total_size / chunk_size;
1933 chunk_stride *= chunk_size / val_bytes;
1934 }
1935
1936 map->lock(map->lock_arg);
1937 /* Write as many bytes as possible with chunk_size */
1938 for (i = 0; i < chunk_count; i++) {
1939 ret = _regmap_raw_write(map,
1940 reg + (i * chunk_stride),
1941 val + (i * chunk_size),
1942 chunk_size);
1943 if (ret)
1944 break;
1945 }
1946
1947 /* Write remaining bytes */
1948 if (!ret && chunk_size * i < total_size) {
1949 ret = _regmap_raw_write(map, reg + (i * chunk_stride),
1950 val + (i * chunk_size),
1951 total_size - i * chunk_size);
1952 }
1953 map->unlock(map->lock_arg);
1954 } else {
1955 void *wval;
1956
1957 if (!val_count)
1958 return -EINVAL;
1959
1960 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
1961 if (!wval) {
1962 dev_err(map->dev, "Error in memory allocation\n");
1963 return -ENOMEM;
1964 }
1965 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1966 map->format.parse_inplace(wval + i);
1967
1968 map->lock(map->lock_arg);
1969 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1970 map->unlock(map->lock_arg);
1971
1972 kfree(wval);
1973 }
1974 return ret;
1975}
1976EXPORT_SYMBOL_GPL(regmap_bulk_write);
1977
1978/*
1979 * _regmap_raw_multi_reg_write()
1980 *
1981 * the (register,newvalue) pairs in regs have not been formatted, but
1982 * they are all in the same page and have been changed to being page
1983 * relative. The page register has been written if that was necessary.
1984 */
1985static int _regmap_raw_multi_reg_write(struct regmap *map,
1986 const struct reg_sequence *regs,
1987 size_t num_regs)
1988{
1989 int ret;
1990 void *buf;
1991 int i;
1992 u8 *u8;
1993 size_t val_bytes = map->format.val_bytes;
1994 size_t reg_bytes = map->format.reg_bytes;
1995 size_t pad_bytes = map->format.pad_bytes;
1996 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1997 size_t len = pair_size * num_regs;
1998
1999 if (!len)
2000 return -EINVAL;
2001
2002 buf = kzalloc(len, GFP_KERNEL);
2003 if (!buf)
2004 return -ENOMEM;
2005
2006 /* We have to linearise by hand. */
2007
2008 u8 = buf;
2009
2010 for (i = 0; i < num_regs; i++) {
2011 unsigned int reg = regs[i].reg;
2012 unsigned int val = regs[i].def;
2013 map->format.format_reg(u8, reg, map->reg_shift);
2014 u8 += reg_bytes + pad_bytes;
2015 map->format.format_val(u8, val, 0);
2016 u8 += val_bytes;
2017 }
2018 u8 = buf;
2019 *u8 |= map->write_flag_mask;
2020
2021 ret = map->bus->write(map->bus_context, buf, len);
2022
2023 kfree(buf);
2024
2025 for (i = 0; i < num_regs; i++) {
2026 int reg = regs[i].reg;
2027 }
2028 return ret;
2029}
2030
2031static unsigned int _regmap_register_page(struct regmap *map,
2032 unsigned int reg,
2033 struct regmap_range_node *range)
2034{
2035 unsigned int win_page = (reg - range->range_min) / range->window_len;
2036
2037 return win_page;
2038}
2039
2040static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2041 struct reg_sequence *regs,
2042 size_t num_regs)
2043{
2044 int ret;
2045 int i, n;
2046 struct reg_sequence *base;
2047 unsigned int this_page = 0;
2048 unsigned int page_change = 0;
2049 /*
2050 * the set of registers are not neccessarily in order, but
2051 * since the order of write must be preserved this algorithm
2052 * chops the set each time the page changes. This also applies
2053 * if there is a delay required at any point in the sequence.
2054 */
2055 base = regs;
2056 for (i = 0, n = 0; i < num_regs; i++, n++) {
2057 unsigned int reg = regs[i].reg;
2058 struct regmap_range_node *range;
2059
2060 range = _regmap_range_lookup(map, reg);
2061 if (range) {
2062 unsigned int win_page = _regmap_register_page(map, reg,
2063 range);
2064
2065 if (i == 0)
2066 this_page = win_page;
2067 if (win_page != this_page) {
2068 this_page = win_page;
2069 page_change = 1;
2070 }
2071 }
2072
2073 /* If we have both a page change and a delay make sure to
2074 * write the regs and apply the delay before we change the
2075 * page.
2076 */
2077
2078 if (page_change || regs[i].delay_us) {
2079
2080 /* For situations where the first write requires
2081 * a delay we need to make sure we don't call
2082 * raw_multi_reg_write with n=0
2083 * This can't occur with page breaks as we
2084 * never write on the first iteration
2085 */
2086 if (regs[i].delay_us && i == 0)
2087 n = 1;
2088
2089 ret = _regmap_raw_multi_reg_write(map, base, n);
2090 if (ret != 0)
2091 return ret;
2092
2093 if (regs[i].delay_us)
2094 udelay(regs[i].delay_us);
2095
2096 base += n;
2097 n = 0;
2098
2099 if (page_change) {
2100 ret = _regmap_select_page(map,
2101 &base[n].reg,
2102 range, 1);
2103 if (ret != 0)
2104 return ret;
2105
2106 page_change = 0;
2107 }
2108
2109 }
2110
2111 }
2112 if (n > 0)
2113 return _regmap_raw_multi_reg_write(map, base, n);
2114 return 0;
2115}
2116
2117static int _regmap_multi_reg_write(struct regmap *map,
2118 const struct reg_sequence *regs,
2119 size_t num_regs)
2120{
2121 int i;
2122 int ret;
2123
2124 if (!map->can_multi_write) {
2125 for (i = 0; i < num_regs; i++) {
2126 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2127 if (ret != 0)
2128 return ret;
2129
2130 if (regs[i].delay_us)
2131 udelay(regs[i].delay_us);
2132 }
2133 return 0;
2134 }
2135
2136 if (!map->format.parse_inplace)
2137 return -EINVAL;
2138
2139 if (map->writeable_reg)
2140 for (i = 0; i < num_regs; i++) {
2141 int reg = regs[i].reg;
2142 if (!map->writeable_reg(map->dev, reg))
2143 return -EINVAL;
2144 if (!IS_ALIGNED(reg, map->reg_stride))
2145 return -EINVAL;
2146 }
2147
2148 if (!map->cache_bypass) {
2149 for (i = 0; i < num_regs; i++) {
2150 unsigned int val = regs[i].def;
2151 unsigned int reg = regs[i].reg;
2152 ret = regcache_write(map, reg, val);
2153 if (ret) {
2154 dev_err(map->dev,
2155 "Error in caching of register: %x ret: %d\n",
2156 reg, ret);
2157 return ret;
2158 }
2159 }
2160 if (map->cache_only) {
2161 map->cache_dirty = true;
2162 return 0;
2163 }
2164 }
2165
2166 WARN_ON(!map->bus);
2167
2168 for (i = 0; i < num_regs; i++) {
2169 unsigned int reg = regs[i].reg;
2170 struct regmap_range_node *range;
2171
2172 /* Coalesce all the writes between a page break or a delay
2173 * in a sequence
2174 */
2175 range = _regmap_range_lookup(map, reg);
2176 if (range || regs[i].delay_us) {
2177 size_t len = sizeof(struct reg_sequence)*num_regs;
2178 struct reg_sequence *base = kmemdup(regs, len,
2179 GFP_KERNEL);
2180 if (!base)
2181 return -ENOMEM;
2182 ret = _regmap_range_multi_paged_reg_write(map, base,
2183 num_regs);
2184 kfree(base);
2185
2186 return ret;
2187 }
2188 }
2189 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2190}
2191
2192/**
2193 * regmap_multi_reg_write() - Write multiple registers to the device
2194 *
2195 * @map: Register map to write to
2196 * @regs: Array of structures containing register,value to be written
2197 * @num_regs: Number of registers to write
2198 *
2199 * Write multiple registers to the device where the set of register, value
2200 * pairs are supplied in any order, possibly not all in a single range.
2201 *
2202 * The 'normal' block write mode will send ultimately send data on the
2203 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2204 * addressed. However, this alternative block multi write mode will send
2205 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2206 * must of course support the mode.
2207 *
2208 * A value of zero will be returned on success, a negative errno will be
2209 * returned in error cases.
2210 */
2211int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2212 int num_regs)
2213{
2214 int ret;
2215
2216 map->lock(map->lock_arg);
2217
2218 ret = _regmap_multi_reg_write(map, regs, num_regs);
2219
2220 map->unlock(map->lock_arg);
2221
2222 return ret;
2223}
2224EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2225
2226/**
2227 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2228 * device but not the cache
2229 *
2230 * @map: Register map to write to
2231 * @regs: Array of structures containing register,value to be written
2232 * @num_regs: Number of registers to write
2233 *
2234 * Write multiple registers to the device but not the cache where the set
2235 * of register are supplied in any order.
2236 *
2237 * This function is intended to be used for writing a large block of data
2238 * atomically to the device in single transfer for those I2C client devices
2239 * that implement this alternative block write mode.
2240 *
2241 * A value of zero will be returned on success, a negative errno will
2242 * be returned in error cases.
2243 */
2244int regmap_multi_reg_write_bypassed(struct regmap *map,
2245 const struct reg_sequence *regs,
2246 int num_regs)
2247{
2248 int ret;
2249 bool bypass;
2250
2251 map->lock(map->lock_arg);
2252
2253 bypass = map->cache_bypass;
2254 map->cache_bypass = true;
2255
2256 ret = _regmap_multi_reg_write(map, regs, num_regs);
2257
2258 map->cache_bypass = bypass;
2259
2260 map->unlock(map->lock_arg);
2261
2262 return ret;
2263}
2264EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2265
2266/**
2267 * regmap_raw_write_async() - Write raw values to one or more registers
2268 * asynchronously
2269 *
2270 * @map: Register map to write to
2271 * @reg: Initial register to write to
2272 * @val: Block of data to be written, laid out for direct transmission to the
2273 * device. Must be valid until regmap_async_complete() is called.
2274 * @val_len: Length of data pointed to by val.
2275 *
2276 * This function is intended to be used for things like firmware
2277 * download where a large block of data needs to be transferred to the
2278 * device. No formatting will be done on the data provided.
2279 *
2280 * If supported by the underlying bus the write will be scheduled
2281 * asynchronously, helping maximise I/O speed on higher speed buses
2282 * like SPI. regmap_async_complete() can be called to ensure that all
2283 * asynchrnous writes have been completed.
2284 *
2285 * A value of zero will be returned on success, a negative errno will
2286 * be returned in error cases.
2287 */
2288int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2289 const void *val, size_t val_len)
2290{
2291 int ret;
2292
2293 if (val_len % map->format.val_bytes)
2294 return -EINVAL;
2295 if (!IS_ALIGNED(reg, map->reg_stride))
2296 return -EINVAL;
2297
2298 map->lock(map->lock_arg);
2299
2300 map->async = true;
2301
2302 ret = _regmap_raw_write(map, reg, val, val_len);
2303
2304 map->async = false;
2305
2306 map->unlock(map->lock_arg);
2307
2308 return ret;
2309}
2310EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2311
2312static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2313 unsigned int val_len)
2314{
2315 struct regmap_range_node *range;
2316 int ret;
2317
2318 WARN_ON(!map->bus);
2319
2320 if (!map->bus || !map->bus->read)
2321 return -EINVAL;
2322
2323 range = _regmap_range_lookup(map, reg);
2324 if (range) {
2325 ret = _regmap_select_page(map, &reg, range,
2326 val_len / map->format.val_bytes);
2327 if (ret != 0)
2328 return ret;
2329 }
2330
2331 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2332 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2333 map->read_flag_mask);
2334
2335 ret = map->bus->read(map->bus_context, map->work_buf,
2336 map->format.reg_bytes + map->format.pad_bytes,
2337 val, val_len);
2338
2339 return ret;
2340}
2341
2342static int _regmap_bus_reg_read(void *context, unsigned int reg,
2343 unsigned int *val)
2344{
2345 struct regmap *map = context;
2346
2347 return map->bus->reg_read(map->bus_context, reg, val);
2348}
2349
2350static int _regmap_bus_read(void *context, unsigned int reg,
2351 unsigned int *val)
2352{
2353 int ret;
2354 struct regmap *map = context;
2355
2356 if (!map->format.parse_val)
2357 return -EINVAL;
2358
2359 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2360 if (ret == 0)
2361 *val = map->format.parse_val(map->work_buf);
2362
2363 return ret;
2364}
2365
2366static int _regmap_read(struct regmap *map, unsigned int reg,
2367 unsigned int *val)
2368{
2369 int ret;
2370 void *context = _regmap_map_get_context(map);
2371
2372 if (!map->cache_bypass) {
2373 ret = regcache_read(map, reg, val);
2374 if (ret == 0)
2375 return 0;
2376 }
2377
2378 if (map->cache_only)
2379 return -EBUSY;
2380
2381 if (!regmap_readable(map, reg))
2382 return -EIO;
2383
2384 ret = map->reg_read(context, reg, val);
2385 if (ret == 0) {
2386#ifdef LOG_DEVICE
2387 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2388 dev_info(map->dev, "%x => %x\n", reg, *val);
2389#endif
2390
2391 if (!map->cache_bypass)
2392 regcache_write(map, reg, *val);
2393 }
2394
2395 return ret;
2396}
2397
2398/**
2399 * regmap_read() - Read a value from a single register
2400 *
2401 * @map: Register map to read from
2402 * @reg: Register to be read from
2403 * @val: Pointer to store read value
2404 *
2405 * A value of zero will be returned on success, a negative errno will
2406 * be returned in error cases.
2407 */
2408int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2409{
2410 int ret;
2411
2412 if (!IS_ALIGNED(reg, map->reg_stride))
2413 return -EINVAL;
2414
2415 map->lock(map->lock_arg);
2416
2417 ret = _regmap_read(map, reg, val);
2418
2419 map->unlock(map->lock_arg);
2420
2421 return ret;
2422}
2423EXPORT_SYMBOL_GPL(regmap_read);
2424
2425/**
2426 * regmap_raw_read() - Read raw data from the device
2427 *
2428 * @map: Register map to read from
2429 * @reg: First register to be read from
2430 * @val: Pointer to store read value
2431 * @val_len: Size of data to read
2432 *
2433 * A value of zero will be returned on success, a negative errno will
2434 * be returned in error cases.
2435 */
2436int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2437 size_t val_len)
2438{
2439 size_t val_bytes = map->format.val_bytes;
2440 size_t val_count = val_len / val_bytes;
2441 unsigned int v;
2442 int ret, i;
2443
2444 if (!map->bus)
2445 return -EINVAL;
2446 if (val_len % map->format.val_bytes)
2447 return -EINVAL;
2448 if (!IS_ALIGNED(reg, map->reg_stride))
2449 return -EINVAL;
2450 if (val_count == 0)
2451 return -EINVAL;
2452
2453 map->lock(map->lock_arg);
2454
2455 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2456 map->cache_type == REGCACHE_NONE) {
2457 if (!map->bus->read) {
2458 ret = -ENOTSUPP;
2459 goto out;
2460 }
2461 if (map->max_raw_read && map->max_raw_read < val_len) {
2462 ret = -E2BIG;
2463 goto out;
2464 }
2465
2466 /* Physical block read if there's no cache involved */
2467 ret = _regmap_raw_read(map, reg, val, val_len);
2468
2469 } else {
2470 /* Otherwise go word by word for the cache; should be low
2471 * cost as we expect to hit the cache.
2472 */
2473 for (i = 0; i < val_count; i++) {
2474 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2475 &v);
2476 if (ret != 0)
2477 goto out;
2478
2479 map->format.format_val(val + (i * val_bytes), v, 0);
2480 }
2481 }
2482
2483 out:
2484 map->unlock(map->lock_arg);
2485
2486 return ret;
2487}
2488EXPORT_SYMBOL_GPL(regmap_raw_read);
2489
2490/**
2491 * regmap_field_read() - Read a value to a single register field
2492 *
2493 * @field: Register field to read from
2494 * @val: Pointer to store read value
2495 *
2496 * A value of zero will be returned on success, a negative errno will
2497 * be returned in error cases.
2498 */
2499int regmap_field_read(struct regmap_field *field, unsigned int *val)
2500{
2501 int ret;
2502 unsigned int reg_val;
2503 ret = regmap_read(field->regmap, field->reg, &reg_val);
2504 if (ret != 0)
2505 return ret;
2506
2507 reg_val &= field->mask;
2508 reg_val >>= field->shift;
2509 *val = reg_val;
2510
2511 return ret;
2512}
2513EXPORT_SYMBOL_GPL(regmap_field_read);
2514
2515/**
2516 * regmap_fields_read() - Read a value to a single register field with port ID
2517 *
2518 * @field: Register field to read from
2519 * @id: port ID
2520 * @val: Pointer to store read value
2521 *
2522 * A value of zero will be returned on success, a negative errno will
2523 * be returned in error cases.
2524 */
2525int regmap_fields_read(struct regmap_field *field, unsigned int id,
2526 unsigned int *val)
2527{
2528 int ret;
2529 unsigned int reg_val;
2530
2531 if (id >= field->id_size)
2532 return -EINVAL;
2533
2534 ret = regmap_read(field->regmap,
2535 field->reg + (field->id_offset * id),
2536 &reg_val);
2537 if (ret != 0)
2538 return ret;
2539
2540 reg_val &= field->mask;
2541 reg_val >>= field->shift;
2542 *val = reg_val;
2543
2544 return ret;
2545}
2546EXPORT_SYMBOL_GPL(regmap_fields_read);
2547
2548/**
2549 * regmap_bulk_read() - Read multiple registers from the device
2550 *
2551 * @map: Register map to read from
2552 * @reg: First register to be read from
2553 * @val: Pointer to store read value, in native register size for device
2554 * @val_count: Number of registers to read
2555 *
2556 * A value of zero will be returned on success, a negative errno will
2557 * be returned in error cases.
2558 */
2559int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2560 size_t val_count)
2561{
2562 int ret, i;
2563 size_t val_bytes = map->format.val_bytes;
2564 bool vol = regmap_volatile_range(map, reg, val_count);
2565
2566 if (!IS_ALIGNED(reg, map->reg_stride))
2567 return -EINVAL;
2568
2569 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2570 /*
2571 * Some devices does not support bulk read, for
2572 * them we have a series of single read operations.
2573 */
2574 size_t total_size = val_bytes * val_count;
2575
2576 if (!map->use_single_read &&
2577 (!map->max_raw_read || map->max_raw_read > total_size)) {
2578 ret = regmap_raw_read(map, reg, val,
2579 val_bytes * val_count);
2580 if (ret != 0)
2581 return ret;
2582 } else {
2583 /*
2584 * Some devices do not support bulk read or do not
2585 * support large bulk reads, for them we have a series
2586 * of read operations.
2587 */
2588 int chunk_stride = map->reg_stride;
2589 size_t chunk_size = val_bytes;
2590 size_t chunk_count = val_count;
2591
2592 if (!map->use_single_read) {
2593 chunk_size = map->max_raw_read;
2594 if (chunk_size % val_bytes)
2595 chunk_size -= chunk_size % val_bytes;
2596 chunk_count = total_size / chunk_size;
2597 chunk_stride *= chunk_size / val_bytes;
2598 }
2599
2600 /* Read bytes that fit into a multiple of chunk_size */
2601 for (i = 0; i < chunk_count; i++) {
2602 ret = regmap_raw_read(map,
2603 reg + (i * chunk_stride),
2604 val + (i * chunk_size),
2605 chunk_size);
2606 if (ret != 0)
2607 return ret;
2608 }
2609
2610 /* Read remaining bytes */
2611 if (chunk_size * i < total_size) {
2612 ret = regmap_raw_read(map,
2613 reg + (i * chunk_stride),
2614 val + (i * chunk_size),
2615 total_size - i * chunk_size);
2616 if (ret != 0)
2617 return ret;
2618 }
2619 }
2620
2621 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2622 map->format.parse_inplace(val + i);
2623 } else {
2624 for (i = 0; i < val_count; i++) {
2625 unsigned int ival;
2626 ret = regmap_read(map, reg + regmap_get_offset(map, i),
2627 &ival);
2628 if (ret != 0)
2629 return ret;
2630
2631 if (map->format.format_val) {
2632 map->format.format_val(val + (i * val_bytes), ival, 0);
2633 } else {
2634 /* Devices providing read and write
2635 * operations can use the bulk I/O
2636 * functions if they define a val_bytes,
2637 * we assume that the values are native
2638 * endian.
2639 */
2640#ifdef CONFIG_64BIT
2641 u64 *u64 = val;
2642#endif
2643 u32 *u32 = val;
2644 u16 *u16 = val;
2645 u8 *u8 = val;
2646
2647 switch (map->format.val_bytes) {
2648#ifdef CONFIG_64BIT
2649 case 8:
2650 u64[i] = ival;
2651 break;
2652#endif
2653 case 4:
2654 u32[i] = ival;
2655 break;
2656 case 2:
2657 u16[i] = ival;
2658 break;
2659 case 1:
2660 u8[i] = ival;
2661 break;
2662 default:
2663 return -EINVAL;
2664 }
2665 }
2666 }
2667 }
2668
2669 return 0;
2670}
2671EXPORT_SYMBOL_GPL(regmap_bulk_read);
2672
2673static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2674 unsigned int mask, unsigned int val,
2675 bool *change, bool force_write)
2676{
2677 int ret;
2678 unsigned int tmp, orig;
2679
2680 if (change)
2681 *change = false;
2682
2683 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2684 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2685 if (ret == 0 && change)
2686 *change = true;
2687 } else {
2688 ret = _regmap_read(map, reg, &orig);
2689 if (ret != 0)
2690 return ret;
2691
2692 tmp = orig & ~mask;
2693 tmp |= val & mask;
2694
2695 if (force_write || (tmp != orig)) {
2696 ret = _regmap_write(map, reg, tmp);
2697 if (ret == 0 && change)
2698 *change = true;
2699 }
2700 }
2701
2702 return ret;
2703}
2704
2705/**
2706 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2707 *
2708 * @map: Register map to update
2709 * @reg: Register to update
2710 * @mask: Bitmask to change
2711 * @val: New value for bitmask
2712 * @change: Boolean indicating if a write was done
2713 * @async: Boolean indicating asynchronously
2714 * @force: Boolean indicating use force update
2715 *
2716 * Perform a read/modify/write cycle on a register map with change, async, force
2717 * options.
2718 *
2719 * If async is true:
2720 *
2721 * With most buses the read must be done synchronously so this is most useful
2722 * for devices with a cache which do not need to interact with the hardware to
2723 * determine the current register value.
2724 *
2725 * Returns zero for success, a negative number on error.
2726 */
2727int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2728 unsigned int mask, unsigned int val,
2729 bool *change, bool async, bool force)
2730{
2731 int ret;
2732
2733 map->lock(map->lock_arg);
2734
2735 map->async = async;
2736
2737 ret = _regmap_update_bits(map, reg, mask, val, change, force);
2738
2739 map->async = false;
2740
2741 map->unlock(map->lock_arg);
2742
2743 return ret;
2744}
2745EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2746
2747void regmap_async_complete_cb(struct regmap_async *async, int ret)
2748{
2749 struct regmap *map = async->map;
2750 bool wake;
2751
2752 spin_lock(&map->async_lock);
2753 list_move(&async->list, &map->async_free);
2754 wake = list_empty(&map->async_list);
2755
2756 if (ret != 0)
2757 map->async_ret = ret;
2758
2759 spin_unlock(&map->async_lock);
2760
2761 if (wake)
2762 wake_up(&map->async_waitq);
2763}
2764EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2765
2766#ifndef TARGET_OS2
2767static int regmap_async_is_done(struct regmap *map)
2768{
2769 unsigned long flags;
2770 int ret;
2771
2772 spin_lock_irqsave(&map->async_lock, flags);
2773 ret = list_empty(&map->async_list);
2774 spin_unlock_irqrestore(&map->async_lock, flags);
2775
2776 return ret;
2777}
2778#endif
2779
2780/**
2781 * regmap_async_complete - Ensure all asynchronous I/O has completed.
2782 *
2783 * @map: Map to operate on.
2784 *
2785 * Blocks until any pending asynchronous I/O has completed. Returns
2786 * an error code for any failed I/O operations.
2787 */
2788int regmap_async_complete(struct regmap *map)
2789{
2790 unsigned long flags;
2791 int ret;
2792
2793 /* Nothing to do with no async support */
2794 if (!map->bus || !map->bus->async_write)
2795 return 0;
2796
2797//FIXME wait_event(map->async_waitq, regmap_async_is_done(map));
2798
2799 spin_lock_irqsave(&map->async_lock, flags);
2800 ret = map->async_ret;
2801 map->async_ret = 0;
2802 spin_unlock_irqrestore(&map->async_lock, flags);
2803
2804 return ret;
2805}
2806EXPORT_SYMBOL_GPL(regmap_async_complete);
2807
2808/**
2809 * regmap_register_patch - Register and apply register updates to be applied
2810 * on device initialistion
2811 *
2812 * @map: Register map to apply updates to.
2813 * @regs: Values to update.
2814 * @num_regs: Number of entries in regs.
2815 *
2816 * Register a set of register updates to be applied to the device
2817 * whenever the device registers are synchronised with the cache and
2818 * apply them immediately. Typically this is used to apply
2819 * corrections to be applied to the device defaults on startup, such
2820 * as the updates some vendors provide to undocumented registers.
2821 *
2822 * The caller must ensure that this function cannot be called
2823 * concurrently with either itself or regcache_sync().
2824 */
2825int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2826 int num_regs)
2827{
2828 struct reg_sequence *p;
2829 int ret;
2830 bool bypass;
2831
2832#if 0
2833 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2834 num_regs))
2835 return 0;
2836#endif
2837
2838 p = krealloc(map->patch,
2839 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2840 GFP_KERNEL);
2841 if (p) {
2842 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2843 map->patch = p;
2844 map->patch_regs += num_regs;
2845 } else {
2846 return -ENOMEM;
2847 }
2848
2849 map->lock(map->lock_arg);
2850
2851 bypass = map->cache_bypass;
2852
2853 map->cache_bypass = true;
2854 map->async = true;
2855
2856 ret = _regmap_multi_reg_write(map, regs, num_regs);
2857
2858 map->async = false;
2859 map->cache_bypass = bypass;
2860
2861 map->unlock(map->lock_arg);
2862
2863 regmap_async_complete(map);
2864
2865 return ret;
2866}
2867EXPORT_SYMBOL_GPL(regmap_register_patch);
2868
2869/**
2870 * regmap_get_val_bytes() - Report the size of a register value
2871 *
2872 * @map: Register map to operate on.
2873 *
2874 * Report the size of a register value, mainly intended to for use by
2875 * generic infrastructure built on top of regmap.
2876 */
2877int regmap_get_val_bytes(struct regmap *map)
2878{
2879 if (map->format.format_write)
2880 return -EINVAL;
2881
2882 return map->format.val_bytes;
2883}
2884EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2885
2886/**
2887 * regmap_get_max_register() - Report the max register value
2888 *
2889 * @map: Register map to operate on.
2890 *
2891 * Report the max register value, mainly intended to for use by
2892 * generic infrastructure built on top of regmap.
2893 */
2894int regmap_get_max_register(struct regmap *map)
2895{
2896 return map->max_register ? map->max_register : -EINVAL;
2897}
2898EXPORT_SYMBOL_GPL(regmap_get_max_register);
2899
2900/**
2901 * regmap_get_reg_stride() - Report the register address stride
2902 *
2903 * @map: Register map to operate on.
2904 *
2905 * Report the register address stride, mainly intended to for use by
2906 * generic infrastructure built on top of regmap.
2907 */
2908int regmap_get_reg_stride(struct regmap *map)
2909{
2910 return map->reg_stride;
2911}
2912EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2913
2914int regmap_parse_val(struct regmap *map, const void *buf,
2915 unsigned int *val)
2916{
2917 if (!map->format.parse_val)
2918 return -EINVAL;
2919
2920 *val = map->format.parse_val(buf);
2921
2922 return 0;
2923}
2924EXPORT_SYMBOL_GPL(regmap_parse_val);
2925
2926static int __init regmap_initcall(void)
2927{
2928 regmap_debugfs_initcall();
2929
2930 return 0;
2931}
2932postcore_initcall(regmap_initcall);
Note: See TracBrowser for help on using the repository browser.