source: GPL/branches/uniaud32-next/lib32/regcache.c@ 625

Last change on this file since 625 was 625, checked in by Paul Smedley, 5 years ago

Code cleanups to simplify future maintenance, update regmap/regcache/rbtree to linux 4.19.163 level

File size: 17.2 KB
Line 
1/*
2 * Register cache access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.19.163 */
13
14//#include <linux/bsearch.h>
15#include <linux/device.h>
16#include <linux/export.h>
17#include <linux/slab.h>
18#include <linux/sort.h>
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include <linux/byteorder/little_endian.h>
22#include <linux/printk.h>
23
24//#include "trace.h"
25#include "internal.h"
26
27static const struct regcache_ops *cache_types[] = {
28 &regcache_rbtree_ops,
29#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
30 &regcache_lzo_ops,
31#endif
32 &regcache_flat_ops,
33};
34
35static int regcache_hw_init(struct regmap *map)
36{
37 int i, j;
38 int ret;
39 int count;
40 unsigned int reg, val;
41 void *tmp_buf;
42
43 if (!map->num_reg_defaults_raw)
44 return -EINVAL;
45
46 /* calculate the size of reg_defaults */
47 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
48 if (regmap_readable(map, i * map->reg_stride) &&
49 !regmap_volatile(map, i * map->reg_stride))
50 count++;
51
52 /* all registers are unreadable or volatile, so just bypass */
53 if (!count) {
54 map->cache_bypass = true;
55 return 0;
56 }
57
58 map->num_reg_defaults = count;
59 map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
60 GFP_KERNEL);
61 if (!map->reg_defaults)
62 return -ENOMEM;
63
64 if (!map->reg_defaults_raw) {
65 bool cache_bypass = map->cache_bypass;
66 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
67
68 /* Bypass the cache access till data read from HW */
69 map->cache_bypass = true;
70 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
71 if (!tmp_buf) {
72 ret = -ENOMEM;
73 goto err_free;
74 }
75 ret = regmap_raw_read(map, 0, tmp_buf,
76 map->cache_size_raw);
77 map->cache_bypass = cache_bypass;
78 if (ret == 0) {
79 map->reg_defaults_raw = tmp_buf;
80 map->cache_free = 1;
81 } else {
82 kfree(tmp_buf);
83 }
84 }
85
86 /* fill the reg_defaults */
87 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
88 reg = i * map->reg_stride;
89
90 if (!regmap_readable(map, reg))
91 continue;
92
93 if (regmap_volatile(map, reg))
94 continue;
95
96 if (map->reg_defaults_raw) {
97 val = regcache_get_val(map, map->reg_defaults_raw, i);
98 } else {
99 bool cache_bypass = map->cache_bypass;
100
101 map->cache_bypass = true;
102 ret = regmap_read(map, reg, &val);
103 map->cache_bypass = cache_bypass;
104 if (ret != 0) {
105 dev_err(map->dev, "Failed to read %d: %d\n",
106 reg, ret);
107 goto err_free;
108 }
109 }
110
111 map->reg_defaults[j].reg = reg;
112 map->reg_defaults[j].def = val;
113 j++;
114 }
115
116 return 0;
117
118err_free:
119 kfree(map->reg_defaults);
120
121 return ret;
122}
123
124int regcache_init(struct regmap *map, const struct regmap_config *config)
125{
126 int ret;
127 int i;
128 void *tmp_buf;
129
130 if (map->cache_type == REGCACHE_NONE) {
131 if (config->reg_defaults || config->num_reg_defaults_raw)
132 dev_warn(map->dev,
133 "No cache used with register defaults set!\n");
134
135 map->cache_bypass = true;
136 return 0;
137 }
138
139 if (config->reg_defaults && !config->num_reg_defaults) {
140 dev_err(map->dev,
141 "Register defaults are set without the number!\n");
142 return -EINVAL;
143 }
144
145 for (i = 0; i < config->num_reg_defaults; i++)
146 if (config->reg_defaults[i].reg % map->reg_stride)
147 return -EINVAL;
148
149 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
150 if (cache_types[i]->type == map->cache_type)
151 break;
152
153 if (i == ARRAY_SIZE(cache_types)) {
154 dev_err(map->dev, "Could not match compress type: %d\n",
155 map->cache_type);
156 return -EINVAL;
157 }
158
159 map->num_reg_defaults = config->num_reg_defaults;
160 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
161 map->reg_defaults_raw = config->reg_defaults_raw;
162 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
163 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
164
165 map->cache = NULL;
166 map->cache_ops = cache_types[i];
167
168 if (!map->cache_ops->read ||
169 !map->cache_ops->write ||
170 !map->cache_ops->name)
171 return -EINVAL;
172
173 /* We still need to ensure that the reg_defaults
174 * won't vanish from under us. We'll need to make
175 * a copy of it.
176 */
177 if (config->reg_defaults) {
178 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
179 sizeof(struct reg_default), GFP_KERNEL);
180 if (!tmp_buf)
181 return -ENOMEM;
182 map->reg_defaults = tmp_buf;
183 } else if (map->num_reg_defaults_raw) {
184 /* Some devices such as PMICs don't have cache defaults,
185 * we cope with this by reading back the HW registers and
186 * crafting the cache defaults by hand.
187 */
188 ret = regcache_hw_init(map);
189 if (ret < 0)
190 return ret;
191 if (map->cache_bypass)
192 return 0;
193 }
194
195 if (!map->max_register)
196 map->max_register = map->num_reg_defaults_raw;
197
198 if (map->cache_ops->init) {
199 dev_dbg(map->dev, "Initializing %s cache\n",
200 map->cache_ops->name);
201 ret = map->cache_ops->init(map);
202 if (ret)
203 goto err_free;
204 }
205 return 0;
206
207err_free:
208 kfree(map->reg_defaults);
209 if (map->cache_free)
210 kfree(map->reg_defaults_raw);
211
212 return ret;
213}
214
215void regcache_exit(struct regmap *map)
216{
217 if (map->cache_type == REGCACHE_NONE)
218 return;
219
220 BUG_ON(!map->cache_ops);
221
222 kfree(map->reg_defaults);
223 if (map->cache_free)
224 kfree(map->reg_defaults_raw);
225
226 if (map->cache_ops->exit) {
227 dev_dbg(map->dev, "Destroying %s cache\n",
228 map->cache_ops->name);
229 map->cache_ops->exit(map);
230 }
231}
232
233/**
234 * regcache_read - Fetch the value of a given register from the cache.
235 *
236 * @map: map to configure.
237 * @reg: The register index.
238 * @value: The value to be returned.
239 *
240 * Return a negative value on failure, 0 on success.
241 */
242int regcache_read(struct regmap *map,
243 unsigned int reg, unsigned int *value)
244{
245 int ret;
246
247 if (map->cache_type == REGCACHE_NONE)
248 return -ENOSYS;
249
250 BUG_ON(!map->cache_ops);
251
252 if (!regmap_volatile(map, reg)) {
253 ret = map->cache_ops->read(map, reg, value);
254
255 return ret;
256 }
257
258 return -EINVAL;
259}
260
261/**
262 * regcache_write - Set the value of a given register in the cache.
263 *
264 * @map: map to configure.
265 * @reg: The register index.
266 * @value: The new register value.
267 *
268 * Return a negative value on failure, 0 on success.
269 */
270int regcache_write(struct regmap *map,
271 unsigned int reg, unsigned int value)
272{
273 if (map->cache_type == REGCACHE_NONE)
274 return 0;
275
276 BUG_ON(!map->cache_ops);
277
278 if (!regmap_volatile(map, reg))
279 return map->cache_ops->write(map, reg, value);
280
281 return 0;
282}
283
284static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
285 unsigned int val)
286{
287 int ret;
288
289 /* If we don't know the chip just got reset, then sync everything. */
290 if (!map->no_sync_defaults)
291 return true;
292
293 /* Is this the hardware default? If so skip. */
294 ret = regcache_lookup_reg(map, reg);
295 if (ret >= 0 && val == map->reg_defaults[ret].def)
296 return false;
297 return true;
298}
299
300static int regcache_default_sync(struct regmap *map, unsigned int min,
301 unsigned int max)
302{
303 unsigned int reg;
304
305 for (reg = min; reg <= max; reg += map->reg_stride) {
306 unsigned int val;
307 int ret;
308
309 if (regmap_volatile(map, reg) ||
310 !regmap_writeable(map, reg))
311 continue;
312
313 ret = regcache_read(map, reg, &val);
314 if (ret)
315 return ret;
316
317 if (!regcache_reg_needs_sync(map, reg, val))
318 continue;
319
320 map->cache_bypass = true;
321 ret = _regmap_write(map, reg, val);
322 map->cache_bypass = false;
323 if (ret) {
324 dev_err(map->dev, "Unable to sync register %#x. %d\n",
325 reg, ret);
326 return ret;
327 }
328 dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
329 }
330
331 return 0;
332}
333
334/**
335 * regcache_sync - Sync the register cache with the hardware.
336 *
337 * @map: map to configure.
338 *
339 * Any registers that should not be synced should be marked as
340 * volatile. In general drivers can choose not to use the provided
341 * syncing functionality if they so require.
342 *
343 * Return a negative value on failure, 0 on success.
344 */
345int regcache_sync(struct regmap *map)
346{
347 int ret = 0;
348 unsigned int i;
349 const char *name;
350 bool bypass;
351
352 BUG_ON(!map->cache_ops);
353
354 map->lock(map->lock_arg);
355 /* Remember the initial bypass state */
356 bypass = map->cache_bypass;
357 dev_dbg(map->dev, "Syncing %s cache\n",
358 map->cache_ops->name);
359 name = map->cache_ops->name;
360
361 if (!map->cache_dirty)
362 goto out;
363
364 map->async = true;
365
366 /* Apply any patch first */
367 map->cache_bypass = true;
368 for (i = 0; i < map->patch_regs; i++) {
369 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
370 if (ret != 0) {
371 dev_err(map->dev, "Failed to write %x = %x: %d\n",
372 map->patch[i].reg, map->patch[i].def, ret);
373 goto out;
374 }
375 }
376 map->cache_bypass = false;
377
378 if (map->cache_ops->sync)
379 ret = map->cache_ops->sync(map, 0, map->max_register);
380 else
381 ret = regcache_default_sync(map, 0, map->max_register);
382
383 if (ret == 0)
384 map->cache_dirty = false;
385
386out:
387 /* Restore the bypass state */
388 map->async = false;
389 map->cache_bypass = bypass;
390 map->no_sync_defaults = false;
391 map->unlock(map->lock_arg);
392
393 regmap_async_complete(map);
394
395 return ret;
396}
397EXPORT_SYMBOL_GPL(regcache_sync);
398
399/**
400 * regcache_sync_region - Sync part of the register cache with the hardware.
401 *
402 * @map: map to sync.
403 * @min: first register to sync
404 * @max: last register to sync
405 *
406 * Write all non-default register values in the specified region to
407 * the hardware.
408 *
409 * Return a negative value on failure, 0 on success.
410 */
411int regcache_sync_region(struct regmap *map, unsigned int min,
412 unsigned int max)
413{
414 int ret = 0;
415 const char *name;
416 bool bypass;
417
418 BUG_ON(!map->cache_ops);
419
420 map->lock(map->lock_arg);
421
422 /* Remember the initial bypass state */
423 bypass = map->cache_bypass;
424
425 name = map->cache_ops->name;
426 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
427
428 if (!map->cache_dirty)
429 goto out;
430
431 map->async = true;
432
433 if (map->cache_ops->sync)
434 ret = map->cache_ops->sync(map, min, max);
435 else
436 ret = regcache_default_sync(map, min, max);
437
438out:
439 /* Restore the bypass state */
440 map->cache_bypass = bypass;
441 map->async = false;
442 map->no_sync_defaults = false;
443 map->unlock(map->lock_arg);
444
445 regmap_async_complete(map);
446
447 return ret;
448}
449EXPORT_SYMBOL_GPL(regcache_sync_region);
450
451/**
452 * regcache_drop_region - Discard part of the register cache
453 *
454 * @map: map to operate on
455 * @min: first register to discard
456 * @max: last register to discard
457 *
458 * Discard part of the register cache.
459 *
460 * Return a negative value on failure, 0 on success.
461 */
462int regcache_drop_region(struct regmap *map, unsigned int min,
463 unsigned int max)
464{
465 int ret = 0;
466
467 if (!map->cache_ops || !map->cache_ops->drop)
468 return -EINVAL;
469
470 map->lock(map->lock_arg);
471
472 ret = map->cache_ops->drop(map, min, max);
473
474 map->unlock(map->lock_arg);
475
476 return ret;
477}
478EXPORT_SYMBOL_GPL(regcache_drop_region);
479
480/**
481 * regcache_cache_only - Put a register map into cache only mode
482 *
483 * @map: map to configure
484 * @enable: flag if changes should be written to the hardware
485 *
486 * When a register map is marked as cache only writes to the register
487 * map API will only update the register cache, they will not cause
488 * any hardware changes. This is useful for allowing portions of
489 * drivers to act as though the device were functioning as normal when
490 * it is disabled for power saving reasons.
491 */
492void regcache_cache_only(struct regmap *map, bool enable)
493{
494 map->lock(map->lock_arg);
495 WARN_ON(map->cache_bypass && enable);
496 map->cache_only = enable;
497 map->unlock(map->lock_arg);
498}
499EXPORT_SYMBOL_GPL(regcache_cache_only);
500
501/**
502 * regcache_mark_dirty - Indicate that HW registers were reset to default values
503 *
504 * @map: map to mark
505 *
506 * Inform regcache that the device has been powered down or reset, so that
507 * on resume, regcache_sync() knows to write out all non-default values
508 * stored in the cache.
509 *
510 * If this function is not called, regcache_sync() will assume that
511 * the hardware state still matches the cache state, modulo any writes that
512 * happened when cache_only was true.
513 */
514void regcache_mark_dirty(struct regmap *map)
515{
516 map->lock(map->lock_arg);
517 map->cache_dirty = true;
518 map->no_sync_defaults = true;
519 map->unlock(map->lock_arg);
520}
521EXPORT_SYMBOL_GPL(regcache_mark_dirty);
522
523/**
524 * regcache_cache_bypass - Put a register map into cache bypass mode
525 *
526 * @map: map to configure
527 * @enable: flag if changes should not be written to the cache
528 *
529 * When a register map is marked with the cache bypass option, writes
530 * to the register map API will only update the hardware and not the
531 * the cache directly. This is useful when syncing the cache back to
532 * the hardware.
533 */
534void regcache_cache_bypass(struct regmap *map, bool enable)
535{
536 map->lock(map->lock_arg);
537 WARN_ON(map->cache_only && enable);
538 map->cache_bypass = enable;
539 map->unlock(map->lock_arg);
540}
541EXPORT_SYMBOL_GPL(regcache_cache_bypass);
542
543bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
544 unsigned int val)
545{
546 if (regcache_get_val(map, base, idx) == val)
547 return true;
548
549 /* Use device native format if possible */
550 if (map->format.format_val) {
551 map->format.format_val(base + (map->cache_word_size * idx),
552 val, 0);
553 return false;
554 }
555
556 switch (map->cache_word_size) {
557 case 1: {
558 u8 *cache = base;
559
560 cache[idx] = val;
561 break;
562 }
563 case 2: {
564 u16 *cache = base;
565
566 cache[idx] = val;
567 break;
568 }
569 case 4: {
570 u32 *cache = base;
571
572 cache[idx] = val;
573 break;
574 }
575#ifdef CONFIG_64BIT
576 case 8: {
577 u64 *cache = base;
578
579 cache[idx] = val;
580 break;
581 }
582#endif
583 default:
584 BUG();
585 }
586 return false;
587}
588
589unsigned int regcache_get_val(struct regmap *map, const void *base,
590 unsigned int idx)
591{
592 if (!base)
593 return -EINVAL;
594
595 /* Use device native format if possible */
596 if (map->format.parse_val)
597 return map->format.parse_val(regcache_get_val_addr(map, base,
598 idx));
599
600 switch (map->cache_word_size) {
601 case 1: {
602 const u8 *cache = base;
603
604 return cache[idx];
605 }
606 case 2: {
607 const u16 *cache = base;
608
609 return cache[idx];
610 }
611 case 4: {
612 const u32 *cache = base;
613
614 return cache[idx];
615 }
616#ifdef CONFIG_64BIT
617 case 8: {
618 const u64 *cache = base;
619
620 return cache[idx];
621 }
622#endif
623 default:
624 BUG();
625 }
626 /* unreachable */
627 return -1;
628}
629
630static int regcache_default_cmp(const void *a, const void *b)
631{
632 const struct reg_default *_a = a;
633 const struct reg_default *_b = b;
634
635 return _a->reg - _b->reg;
636}
637
638int regcache_lookup_reg(struct regmap *map, unsigned int reg)
639{
640 struct reg_default key;
641 struct reg_default *r;
642
643 key.reg = reg;
644 key.def = 0;
645
646 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
647 sizeof(struct reg_default), regcache_default_cmp);
648
649 if (r)
650 return r - map->reg_defaults;
651 else
652 return -ENOENT;
653}
654
655static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
656{
657 if (!cache_present)
658 return true;
659
660 return test_bit(idx, cache_present);
661}
662
663static int regcache_sync_block_single(struct regmap *map, void *block,
664 unsigned long *cache_present,
665 unsigned int block_base,
666 unsigned int start, unsigned int end)
667{
668 unsigned int i, regtmp, val;
669 int ret;
670
671 for (i = start; i < end; i++) {
672 regtmp = block_base + (i * map->reg_stride);
673
674 if (!regcache_reg_present(cache_present, i) ||
675 !regmap_writeable(map, regtmp))
676 continue;
677
678 val = regcache_get_val(map, block, i);
679 if (!regcache_reg_needs_sync(map, regtmp, val))
680 continue;
681
682 map->cache_bypass = true;
683
684 ret = _regmap_write(map, regtmp, val);
685
686 map->cache_bypass = false;
687 if (ret != 0) {
688 dev_err(map->dev, "Unable to sync register %#x. %d\n",
689 regtmp, ret);
690 return ret;
691 }
692 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
693 regtmp, val);
694 }
695
696 return 0;
697}
698
699static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
700 unsigned int base, unsigned int cur)
701{
702 size_t val_bytes = map->format.val_bytes;
703 int ret, count;
704
705 if (*data == NULL)
706 return 0;
707
708 count = (cur - base) / map->reg_stride;
709
710 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
711 count * val_bytes, count, base, cur - map->reg_stride);
712
713 map->cache_bypass = true;
714
715 ret = _regmap_raw_write(map, base, *data, count * val_bytes);
716 if (ret)
717 dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
718 base, cur - map->reg_stride, ret);
719
720 map->cache_bypass = false;
721
722 *data = NULL;
723
724 return ret;
725}
726
727static int regcache_sync_block_raw(struct regmap *map, void *block,
728 unsigned long *cache_present,
729 unsigned int block_base, unsigned int start,
730 unsigned int end)
731{
732 unsigned int i, val;
733 unsigned int regtmp = 0;
734 unsigned int base = 0;
735 const void *data = NULL;
736 int ret;
737
738 for (i = start; i < end; i++) {
739 regtmp = block_base + (i * map->reg_stride);
740
741 if (!regcache_reg_present(cache_present, i) ||
742 !regmap_writeable(map, regtmp)) {
743 ret = regcache_sync_block_raw_flush(map, &data,
744 base, regtmp);
745 if (ret != 0)
746 return ret;
747 continue;
748 }
749
750 val = regcache_get_val(map, block, i);
751 if (!regcache_reg_needs_sync(map, regtmp, val)) {
752 ret = regcache_sync_block_raw_flush(map, &data,
753 base, regtmp);
754 if (ret != 0)
755 return ret;
756 continue;
757 }
758
759 if (!data) {
760 data = regcache_get_val_addr(map, block, i);
761 base = regtmp;
762 }
763 }
764
765 return regcache_sync_block_raw_flush(map, &data, base, regtmp +
766 map->reg_stride);
767}
768
769int regcache_sync_block(struct regmap *map, void *block,
770 unsigned long *cache_present,
771 unsigned int block_base, unsigned int start,
772 unsigned int end)
773{
774 if (regmap_can_raw_write(map) && !map->use_single_write)
775 return regcache_sync_block_raw(map, block, cache_present,
776 block_base, start, end);
777 else
778 return regcache_sync_block_single(map, block, cache_present,
779 block_base, start, end);
780}
Note: See TracBrowser for help on using the repository browser.