source: GPL/branches/uniaud32-exp/lib32/regcache.c@ 764

Last change on this file since 764 was 764, checked in by Paul Smedley, 4 months ago

Fix warnings & typo that was causing HDA to trap

File size: 18.0 KB
Line 
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register cache access API
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8
9/* from 5.10.10 */
10
11//#include <linux/bsearch.h>
12#include <linux/device.h>
13#include <linux/export.h>
14#include <linux/slab.h>
15#include <linux/sort.h>
16#include <linux/module.h>
17#include <linux/workqueue.h>
18#include <linux/byteorder/little_endian.h>
19#include <linux/printk.h>
20
21//#include "trace.h"
22#include "internal.h"
23
24static const struct regcache_ops *cache_types[] = {
25 &regcache_rbtree_ops,
26 &regcache_maple_ops,
27 &regcache_flat_ops,
28};
29
30static int regcache_hw_init(struct regmap *map)
31{
32 int i, j;
33 int ret;
34 int count;
35 unsigned int reg, val;
36 void *tmp_buf;
37
38 if (!map->num_reg_defaults_raw)
39 return -EINVAL;
40
41 /* calculate the size of reg_defaults */
42 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
43 if (regmap_readable(map, i * map->reg_stride) &&
44 !regmap_volatile(map, i * map->reg_stride))
45 count++;
46
47 /* all registers are unreadable or volatile, so just bypass */
48 if (!count) {
49 map->cache_bypass = true;
50 return 0;
51 }
52
53 map->num_reg_defaults = count;
54 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
55 GFP_KERNEL);
56 if (!map->reg_defaults)
57 return -ENOMEM;
58
59 if (!map->reg_defaults_raw) {
60 bool cache_bypass = map->cache_bypass;
61 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
62
63 /* Bypass the cache access till data read from HW */
64 map->cache_bypass = true;
65 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
66 if (!tmp_buf) {
67 ret = -ENOMEM;
68 goto err_free;
69 }
70 ret = regmap_raw_read(map, 0, tmp_buf,
71 map->cache_size_raw);
72 map->cache_bypass = cache_bypass;
73 if (ret == 0) {
74 map->reg_defaults_raw = tmp_buf;
75 map->cache_free = 1;
76 } else {
77 kfree(tmp_buf);
78 }
79 }
80
81 /* fill the reg_defaults */
82 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
83 reg = i * map->reg_stride;
84
85 if (!regmap_readable(map, reg))
86 continue;
87
88 if (regmap_volatile(map, reg))
89 continue;
90
91 if (map->reg_defaults_raw) {
92 val = regcache_get_val(map, map->reg_defaults_raw, i);
93 } else {
94 bool cache_bypass = map->cache_bypass;
95
96 map->cache_bypass = true;
97 ret = regmap_read(map, reg, &val);
98 map->cache_bypass = cache_bypass;
99 if (ret != 0) {
100 dev_err(map->dev, "Failed to read %d: %d\n",
101 reg, ret);
102 goto err_free;
103 }
104 }
105
106 map->reg_defaults[j].reg = reg;
107 map->reg_defaults[j].def = val;
108 j++;
109 }
110
111 return 0;
112
113err_free:
114 kfree(map->reg_defaults);
115
116 return ret;
117}
118
119int regcache_init(struct regmap *map, const struct regmap_config *config)
120{
121 int ret;
122 int i;
123 void *tmp_buf;
124
125 if (map->cache_type == REGCACHE_NONE) {
126 if (config->reg_defaults || config->num_reg_defaults_raw)
127 dev_warn(map->dev,
128 "No cache used with register defaults set!\n");
129
130 map->cache_bypass = true;
131 return 0;
132 }
133
134 if (config->reg_defaults && !config->num_reg_defaults) {
135 dev_err(map->dev,
136 "Register defaults are set without the number!\n");
137 return -EINVAL;
138 }
139
140 for (i = 0; i < config->num_reg_defaults; i++)
141 if (config->reg_defaults[i].reg % map->reg_stride)
142 return -EINVAL;
143
144 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
145 if (cache_types[i]->type == map->cache_type)
146 break;
147
148 if (i == ARRAY_SIZE(cache_types)) {
149 dev_err(map->dev, "Could not match compress type: %d\n",
150 map->cache_type);
151 return -EINVAL;
152 }
153
154 map->num_reg_defaults = config->num_reg_defaults;
155 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
156 map->reg_defaults_raw = config->reg_defaults_raw;
157 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
158 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
159
160 map->cache = NULL;
161 map->cache_ops = cache_types[i];
162
163 if (!map->cache_ops->read ||
164 !map->cache_ops->write ||
165 !map->cache_ops->name)
166 return -EINVAL;
167
168 /* We still need to ensure that the reg_defaults
169 * won't vanish from under us. We'll need to make
170 * a copy of it.
171 */
172 if (config->reg_defaults) {
173 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
174 sizeof(struct reg_default), GFP_KERNEL);
175 if (!tmp_buf)
176 return -ENOMEM;
177 map->reg_defaults = tmp_buf;
178 } else if (map->num_reg_defaults_raw) {
179 /* Some devices such as PMICs don't have cache defaults,
180 * we cope with this by reading back the HW registers and
181 * crafting the cache defaults by hand.
182 */
183 ret = regcache_hw_init(map);
184 if (ret < 0)
185 return ret;
186 if (map->cache_bypass)
187 return 0;
188 }
189
190 if (!map->max_register)
191 map->max_register = map->num_reg_defaults_raw;
192
193 if (map->cache_ops->init) {
194 dev_dbg(map->dev, "Initializing %s cache\n",
195 map->cache_ops->name);
196 ret = map->cache_ops->init(map);
197 if (ret)
198 goto err_free;
199 }
200 return 0;
201
202err_free:
203 kfree(map->reg_defaults);
204 if (map->cache_free)
205 kfree(map->reg_defaults_raw);
206
207 return ret;
208}
209
210void regcache_exit(struct regmap *map)
211{
212 if (map->cache_type == REGCACHE_NONE)
213 return;
214
215 BUG_ON(!map->cache_ops);
216
217 kfree(map->reg_defaults);
218 if (map->cache_free)
219 kfree(map->reg_defaults_raw);
220
221 if (map->cache_ops->exit) {
222 dev_dbg(map->dev, "Destroying %s cache\n",
223 map->cache_ops->name);
224 map->cache_ops->exit(map);
225 }
226}
227
228/**
229 * regcache_read - Fetch the value of a given register from the cache.
230 *
231 * @map: map to configure.
232 * @reg: The register index.
233 * @value: The value to be returned.
234 *
235 * Return a negative value on failure, 0 on success.
236 */
237int regcache_read(struct regmap *map,
238 unsigned int reg, unsigned int *value)
239{
240 int ret;
241
242 if (map->cache_type == REGCACHE_NONE)
243 return -ENOSYS;
244
245 BUG_ON(!map->cache_ops);
246
247 if (!regmap_volatile(map, reg)) {
248 ret = map->cache_ops->read(map, reg, value);
249
250#ifndef TARGET_OS2
251 if (ret == 0)
252 trace_regmap_reg_read_cache(map, reg, *value);
253#endif
254 return ret;
255 }
256
257 return -EINVAL;
258}
259
260/**
261 * regcache_write - Set the value of a given register in the cache.
262 *
263 * @map: map to configure.
264 * @reg: The register index.
265 * @value: The new register value.
266 *
267 * Return a negative value on failure, 0 on success.
268 */
269int regcache_write(struct regmap *map,
270 unsigned int reg, unsigned int value)
271{
272 if (map->cache_type == REGCACHE_NONE)
273 return 0;
274
275 BUG_ON(!map->cache_ops);
276
277 if (!regmap_volatile(map, reg))
278 return map->cache_ops->write(map, reg, value);
279
280 return 0;
281}
282
283static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
284 unsigned int val)
285{
286 int ret;
287
288 /* If we don't know the chip just got reset, then sync everything. */
289 if (!map->no_sync_defaults)
290 return true;
291
292 /* Is this the hardware default? If so skip. */
293 ret = regcache_lookup_reg(map, reg);
294 if (ret >= 0 && val == map->reg_defaults[ret].def)
295 return false;
296 return true;
297}
298
299static int regcache_default_sync(struct regmap *map, unsigned int min,
300 unsigned int max)
301{
302 unsigned int reg;
303
304 for (reg = min; reg <= max; reg += map->reg_stride) {
305 unsigned int val;
306 int ret;
307
308 if (regmap_volatile(map, reg) ||
309 !regmap_writeable(map, reg))
310 continue;
311
312 ret = regcache_read(map, reg, &val);
313 if (ret)
314 return ret;
315
316 if (!regcache_reg_needs_sync(map, reg, val))
317 continue;
318
319 map->cache_bypass = true;
320 ret = _regmap_write(map, reg, val);
321 map->cache_bypass = false;
322 if (ret) {
323 dev_err(map->dev, "Unable to sync register %#x. %d\n",
324 reg, ret);
325 return ret;
326 }
327 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
328 }
329
330 return 0;
331}
332
333/**
334 * regcache_sync - Sync the register cache with the hardware.
335 *
336 * @map: map to configure.
337 *
338 * Any registers that should not be synced should be marked as
339 * volatile. In general drivers can choose not to use the provided
340 * syncing functionality if they so require.
341 *
342 * Return a negative value on failure, 0 on success.
343 */
344int regcache_sync(struct regmap *map)
345{
346 int ret = 0;
347 unsigned int i;
348 const char *name;
349 bool bypass;
350
351 BUG_ON(!map->cache_ops);
352
353 map->lock(map->lock_arg);
354 /* Remember the initial bypass state */
355 bypass = map->cache_bypass;
356 dev_dbg(map->dev, "Syncing %s cache\n",
357 map->cache_ops->name);
358 name = map->cache_ops->name;
359#ifndef TARGET_OS2
360 trace_regcache_sync(map, name, "start");
361#endif
362 if (!map->cache_dirty)
363 goto out;
364
365 map->async = true;
366
367 /* Apply any patch first */
368 map->cache_bypass = true;
369 for (i = 0; i < map->patch_regs; i++) {
370 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
371 if (ret != 0) {
372 dev_err(map->dev, "Failed to write %x = %x: %d\n",
373 map->patch[i].reg, map->patch[i].def, ret);
374 goto out;
375 }
376 }
377 map->cache_bypass = false;
378
379 if (map->cache_ops->sync)
380 ret = map->cache_ops->sync(map, 0, map->max_register);
381 else
382 ret = regcache_default_sync(map, 0, map->max_register);
383
384 if (ret == 0)
385 map->cache_dirty = false;
386
387out:
388 /* Restore the bypass state */
389 map->async = false;
390 map->cache_bypass = bypass;
391 map->no_sync_defaults = false;
392 map->unlock(map->lock_arg);
393
394 regmap_async_complete(map);
395
396#ifndef TARGET_OS2
397 trace_regcache_sync(map, name, "stop");
398#endif
399 return ret;
400}
401EXPORT_SYMBOL_GPL(regcache_sync);
402
403/**
404 * regcache_sync_region - Sync part of the register cache with the hardware.
405 *
406 * @map: map to sync.
407 * @min: first register to sync
408 * @max: last register to sync
409 *
410 * Write all non-default register values in the specified region to
411 * the hardware.
412 *
413 * Return a negative value on failure, 0 on success.
414 */
415int regcache_sync_region(struct regmap *map, unsigned int min,
416 unsigned int max)
417{
418 int ret = 0;
419 const char *name;
420 bool bypass;
421
422 BUG_ON(!map->cache_ops);
423
424 map->lock(map->lock_arg);
425
426 /* Remember the initial bypass state */
427 bypass = map->cache_bypass;
428
429 name = map->cache_ops->name;
430 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
431
432#ifndef TARGET_OS2
433 trace_regcache_sync(map, name, "start region");
434#endif
435 if (!map->cache_dirty)
436 goto out;
437
438 map->async = true;
439
440 if (map->cache_ops->sync)
441 ret = map->cache_ops->sync(map, min, max);
442 else
443 ret = regcache_default_sync(map, min, max);
444
445out:
446 /* Restore the bypass state */
447 map->cache_bypass = bypass;
448 map->async = false;
449 map->no_sync_defaults = false;
450 map->unlock(map->lock_arg);
451
452 regmap_async_complete(map);
453
454#ifndef TARGET_OS2
455 trace_regcache_sync(map, name, "stop region");
456#endif
457 return ret;
458}
459EXPORT_SYMBOL_GPL(regcache_sync_region);
460
461/**
462 * regcache_drop_region - Discard part of the register cache
463 *
464 * @map: map to operate on
465 * @min: first register to discard
466 * @max: last register to discard
467 *
468 * Discard part of the register cache.
469 *
470 * Return a negative value on failure, 0 on success.
471 */
472int regcache_drop_region(struct regmap *map, unsigned int min,
473 unsigned int max)
474{
475 int ret = 0;
476
477 if (!map->cache_ops || !map->cache_ops->drop)
478 return -EINVAL;
479
480 map->lock(map->lock_arg);
481
482#ifndef TARGET_OS2
483 trace_regcache_drop_region(map, min, max);
484#endif
485 ret = map->cache_ops->drop(map, min, max);
486
487 map->unlock(map->lock_arg);
488
489 return ret;
490}
491EXPORT_SYMBOL_GPL(regcache_drop_region);
492
493/**
494 * regcache_cache_only - Put a register map into cache only mode
495 *
496 * @map: map to configure
497 * @enable: flag if changes should be written to the hardware
498 *
499 * When a register map is marked as cache only writes to the register
500 * map API will only update the register cache, they will not cause
501 * any hardware changes. This is useful for allowing portions of
502 * drivers to act as though the device were functioning as normal when
503 * it is disabled for power saving reasons.
504 */
505void regcache_cache_only(struct regmap *map, bool enable)
506{
507 map->lock(map->lock_arg);
508 WARN_ON(map->cache_bypass && enable);
509 map->cache_only = enable;
510#ifndef TARGET_OS2
511 trace_regmap_cache_only(map, enable);
512#endif
513 map->unlock(map->lock_arg);
514}
515EXPORT_SYMBOL_GPL(regcache_cache_only);
516
517/**
518 * regcache_mark_dirty - Indicate that HW registers were reset to default values
519 *
520 * @map: map to mark
521 *
522 * Inform regcache that the device has been powered down or reset, so that
523 * on resume, regcache_sync() knows to write out all non-default values
524 * stored in the cache.
525 *
526 * If this function is not called, regcache_sync() will assume that
527 * the hardware state still matches the cache state, modulo any writes that
528 * happened when cache_only was true.
529 */
530void regcache_mark_dirty(struct regmap *map)
531{
532 map->lock(map->lock_arg);
533 map->cache_dirty = true;
534 map->no_sync_defaults = true;
535 map->unlock(map->lock_arg);
536}
537EXPORT_SYMBOL_GPL(regcache_mark_dirty);
538
539/**
540 * regcache_cache_bypass - Put a register map into cache bypass mode
541 *
542 * @map: map to configure
543 * @enable: flag if changes should not be written to the cache
544 *
545 * When a register map is marked with the cache bypass option, writes
546 * to the register map API will only update the hardware and not the
547 * the cache directly. This is useful when syncing the cache back to
548 * the hardware.
549 */
550void regcache_cache_bypass(struct regmap *map, bool enable)
551{
552 map->lock(map->lock_arg);
553 WARN_ON(map->cache_only && enable);
554 map->cache_bypass = enable;
555#ifndef TARGET_OS2
556 trace_regmap_cache_bypass(map, enable);
557#endif
558 map->unlock(map->lock_arg);
559}
560EXPORT_SYMBOL_GPL(regcache_cache_bypass);
561
562/**
563 * regcache_reg_cached - Check if a register is cached
564 *
565 * @map: map to check
566 * @reg: register to check
567 *
568 * Reports if a register is cached.
569 */
570bool regcache_reg_cached(struct regmap *map, unsigned int reg)
571{
572 unsigned int val;
573 int ret;
574
575 map->lock(map->lock_arg);
576
577 ret = regcache_read(map, reg, &val);
578
579 map->unlock(map->lock_arg);
580
581 return ret == 0;
582}
583EXPORT_SYMBOL_GPL(regcache_reg_cached);
584
585bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
586 unsigned int val)
587{
588 if (regcache_get_val(map, base, idx) == val)
589 return true;
590
591 /* Use device native format if possible */
592 if (map->format.format_val) {
593 map->format.format_val(base + (map->cache_word_size * idx),
594 val, 0);
595 return false;
596 }
597
598 switch (map->cache_word_size) {
599 case 1: {
600 u8 *cache = base;
601
602 cache[idx] = val;
603 break;
604 }
605 case 2: {
606 u16 *cache = base;
607
608 cache[idx] = val;
609 break;
610 }
611 case 4: {
612 u32 *cache = base;
613
614 cache[idx] = val;
615 break;
616 }
617#ifdef CONFIG_64BIT
618 case 8: {
619 u64 *cache = base;
620
621 cache[idx] = val;
622 break;
623 }
624#endif
625 default:
626 BUG();
627 }
628 return false;
629}
630
631unsigned int regcache_get_val(struct regmap *map, const void *base,
632 unsigned int idx)
633{
634 if (!base)
635 return -EINVAL;
636
637 /* Use device native format if possible */
638 if (map->format.parse_val)
639 return map->format.parse_val(regcache_get_val_addr(map, base,
640 idx));
641
642 switch (map->cache_word_size) {
643 case 1: {
644 const u8 *cache = base;
645
646 return cache[idx];
647 }
648 case 2: {
649 const u16 *cache = base;
650
651 return cache[idx];
652 }
653 case 4: {
654 const u32 *cache = base;
655
656 return cache[idx];
657 }
658#ifdef CONFIG_64BIT
659 case 8: {
660 const u64 *cache = base;
661
662 return cache[idx];
663 }
664#endif
665 default:
666 BUG();
667 }
668 /* unreachable */
669 return -1;
670}
671
672static int regcache_default_cmp(const void *a, const void *b)
673{
674 const struct reg_default *_a = a;
675 const struct reg_default *_b = b;
676
677 return _a->reg - _b->reg;
678}
679
680int regcache_lookup_reg(struct regmap *map, unsigned int reg)
681{
682 struct reg_default key;
683 struct reg_default *r;
684
685 key.reg = reg;
686 key.def = 0;
687
688 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
689 sizeof(struct reg_default), regcache_default_cmp);
690
691 if (r)
692 return r - map->reg_defaults;
693 else
694 return -ENOENT;
695}
696
697static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
698{
699 if (!cache_present)
700 return true;
701
702 return test_bit(idx, cache_present);
703}
704
705static int regcache_sync_block_single(struct regmap *map, void *block,
706 unsigned long *cache_present,
707 unsigned int block_base,
708 unsigned int start, unsigned int end)
709{
710 unsigned int i, regtmp, val;
711 int ret;
712
713 for (i = start; i < end; i++) {
714 regtmp = block_base + (i * map->reg_stride);
715
716 if (!regcache_reg_present(cache_present, i) ||
717 !regmap_writeable(map, regtmp))
718 continue;
719
720 val = regcache_get_val(map, block, i);
721 if (!regcache_reg_needs_sync(map, regtmp, val))
722 continue;
723
724 map->cache_bypass = true;
725
726 ret = _regmap_write(map, regtmp, val);
727
728 map->cache_bypass = false;
729 if (ret != 0) {
730 dev_err(map->dev, "Unable to sync register %#x. %d\n",
731 regtmp, ret);
732 return ret;
733 }
734 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
735 regtmp, val);
736 }
737
738 return 0;
739}
740
741static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
742 unsigned int base, unsigned int cur)
743{
744 size_t val_bytes = map->format.val_bytes;
745 int ret, count;
746
747 if (*data == NULL)
748 return 0;
749
750 count = (cur - base) / map->reg_stride;
751
752 dev_dbg(map->dev, "Writing %lu bytes for %d registers from 0x%x-0x%x\n",
753 count * val_bytes, count, base, cur - map->reg_stride);
754
755 map->cache_bypass = true;
756
757 ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
758 if (ret)
759 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
760 base, cur - map->reg_stride, ret);
761
762 map->cache_bypass = false;
763
764 *data = NULL;
765
766 return ret;
767}
768
769static int regcache_sync_block_raw(struct regmap *map, void *block,
770 unsigned long *cache_present,
771 unsigned int block_base, unsigned int start,
772 unsigned int end)
773{
774 unsigned int i, val;
775 unsigned int regtmp = 0;
776 unsigned int base = 0;
777 const void *data = NULL;
778 int ret;
779
780 for (i = start; i < end; i++) {
781 regtmp = block_base + (i * map->reg_stride);
782
783 if (!regcache_reg_present(cache_present, i) ||
784 !regmap_writeable(map, regtmp)) {
785 ret = regcache_sync_block_raw_flush(map, &data,
786 base, regtmp);
787 if (ret != 0)
788 return ret;
789 continue;
790 }
791
792 val = regcache_get_val(map, block, i);
793 if (!regcache_reg_needs_sync(map, regtmp, val)) {
794 ret = regcache_sync_block_raw_flush(map, &data,
795 base, regtmp);
796 if (ret != 0)
797 return ret;
798 continue;
799 }
800
801 if (!data) {
802 data = regcache_get_val_addr(map, block, i);
803 base = regtmp;
804 }
805 }
806
807 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
808 map->reg_stride);
809}
810
811int regcache_sync_block(struct regmap *map, void *block,
812 unsigned long *cache_present,
813 unsigned int block_base, unsigned int start,
814 unsigned int end)
815{
816 if (regmap_can_raw_write(map) && !map->use_single_write)
817 return regcache_sync_block_raw(map, block, cache_present,
818 block_base, start, end);
819 else
820 return regcache_sync_block_single(map, block, cache_present,
821 block_base, start, end);
822}
Note: See TracBrowser for help on using the repository browser.