1 | // SPDX-License-Identifier: GPL-2.0
|
---|
2 | //
|
---|
3 | // Register cache access API
|
---|
4 | //
|
---|
5 | // Copyright 2011 Wolfson Microelectronics plc
|
---|
6 | //
|
---|
7 | // Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
|
---|
8 |
|
---|
9 | /* from 5.10.10 */
|
---|
10 |
|
---|
11 | //#include <linux/bsearch.h>
|
---|
12 | #include <linux/device.h>
|
---|
13 | #include <linux/export.h>
|
---|
14 | #include <linux/slab.h>
|
---|
15 | #include <linux/sort.h>
|
---|
16 | #include <linux/module.h>
|
---|
17 | #include <linux/workqueue.h>
|
---|
18 | #include <linux/byteorder/little_endian.h>
|
---|
19 | #include <linux/printk.h>
|
---|
20 |
|
---|
21 | //#include "trace.h"
|
---|
22 | #include "internal.h"
|
---|
23 |
|
---|
24 | /*static*/ const struct regcache_ops *cache_types[] = {
|
---|
25 | ®cache_rbtree_ops,
|
---|
26 | #if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
|
---|
27 | ®cache_lzo_ops,
|
---|
28 | #endif
|
---|
29 | ®cache_flat_ops,
|
---|
30 | };
|
---|
31 |
|
---|
32 | /*static*/ int regcache_hw_init(struct regmap *map)
|
---|
33 | {
|
---|
34 | int i, j;
|
---|
35 | int ret;
|
---|
36 | int count;
|
---|
37 | unsigned int reg, val;
|
---|
38 | void *tmp_buf;
|
---|
39 |
|
---|
40 | if (!map->num_reg_defaults_raw)
|
---|
41 | return -EINVAL;
|
---|
42 |
|
---|
43 | /* calculate the size of reg_defaults */
|
---|
44 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
|
---|
45 | if (regmap_readable(map, i * map->reg_stride) &&
|
---|
46 | !regmap_volatile(map, i * map->reg_stride))
|
---|
47 | count++;
|
---|
48 |
|
---|
49 | /* all registers are unreadable or volatile, so just bypass */
|
---|
50 | if (!count) {
|
---|
51 | map->cache_bypass = true;
|
---|
52 | return 0;
|
---|
53 | }
|
---|
54 |
|
---|
55 | map->num_reg_defaults = count;
|
---|
56 | map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
|
---|
57 | GFP_KERNEL);
|
---|
58 | if (!map->reg_defaults)
|
---|
59 | return -ENOMEM;
|
---|
60 |
|
---|
61 | if (!map->reg_defaults_raw) {
|
---|
62 | bool cache_bypass = map->cache_bypass;
|
---|
63 | dev_warn(map->dev, "No cache defaults, reading back from HW\n");
|
---|
64 |
|
---|
65 | /* Bypass the cache access till data read from HW */
|
---|
66 | map->cache_bypass = true;
|
---|
67 | tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
|
---|
68 | if (!tmp_buf) {
|
---|
69 | ret = -ENOMEM;
|
---|
70 | goto err_free;
|
---|
71 | }
|
---|
72 | ret = regmap_raw_read(map, 0, tmp_buf,
|
---|
73 | map->cache_size_raw);
|
---|
74 | map->cache_bypass = cache_bypass;
|
---|
75 | if (ret == 0) {
|
---|
76 | map->reg_defaults_raw = tmp_buf;
|
---|
77 | map->cache_free = 1;
|
---|
78 | } else {
|
---|
79 | kfree(tmp_buf);
|
---|
80 | }
|
---|
81 | }
|
---|
82 |
|
---|
83 | /* fill the reg_defaults */
|
---|
84 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
|
---|
85 | reg = i * map->reg_stride;
|
---|
86 |
|
---|
87 | if (!regmap_readable(map, reg))
|
---|
88 | continue;
|
---|
89 |
|
---|
90 | if (regmap_volatile(map, reg))
|
---|
91 | continue;
|
---|
92 |
|
---|
93 | if (map->reg_defaults_raw) {
|
---|
94 | val = regcache_get_val(map, map->reg_defaults_raw, i);
|
---|
95 | } else {
|
---|
96 | bool cache_bypass = map->cache_bypass;
|
---|
97 |
|
---|
98 | map->cache_bypass = true;
|
---|
99 | ret = regmap_read(map, reg, &val);
|
---|
100 | map->cache_bypass = cache_bypass;
|
---|
101 | if (ret != 0) {
|
---|
102 | dev_err(map->dev, "Failed to read %d: %d\n",
|
---|
103 | reg, ret);
|
---|
104 | goto err_free;
|
---|
105 | }
|
---|
106 | }
|
---|
107 |
|
---|
108 | map->reg_defaults[j].reg = reg;
|
---|
109 | map->reg_defaults[j].def = val;
|
---|
110 | j++;
|
---|
111 | }
|
---|
112 |
|
---|
113 | return 0;
|
---|
114 |
|
---|
115 | err_free:
|
---|
116 | kfree(map->reg_defaults);
|
---|
117 |
|
---|
118 | return ret;
|
---|
119 | }
|
---|
120 |
|
---|
121 | int regcache_init(struct regmap *map, const struct regmap_config *config)
|
---|
122 | {
|
---|
123 | int ret;
|
---|
124 | int i;
|
---|
125 | void *tmp_buf;
|
---|
126 |
|
---|
127 | if (map->cache_type == REGCACHE_NONE) {
|
---|
128 | if (config->reg_defaults || config->num_reg_defaults_raw)
|
---|
129 | dev_warn(map->dev,
|
---|
130 | "No cache used with register defaults set!\n");
|
---|
131 |
|
---|
132 | map->cache_bypass = true;
|
---|
133 | return 0;
|
---|
134 | }
|
---|
135 |
|
---|
136 | if (config->reg_defaults && !config->num_reg_defaults) {
|
---|
137 | dev_err(map->dev,
|
---|
138 | "Register defaults are set without the number!\n");
|
---|
139 | return -EINVAL;
|
---|
140 | }
|
---|
141 |
|
---|
142 | for (i = 0; i < config->num_reg_defaults; i++)
|
---|
143 | if (config->reg_defaults[i].reg % map->reg_stride)
|
---|
144 | return -EINVAL;
|
---|
145 |
|
---|
146 | for (i = 0; i < ARRAY_SIZE(cache_types); i++)
|
---|
147 | if (cache_types[i]->type == map->cache_type)
|
---|
148 | break;
|
---|
149 |
|
---|
150 | if (i == ARRAY_SIZE(cache_types)) {
|
---|
151 | dev_err(map->dev, "Could not match compress type: %d\n",
|
---|
152 | map->cache_type);
|
---|
153 | return -EINVAL;
|
---|
154 | }
|
---|
155 |
|
---|
156 | map->num_reg_defaults = config->num_reg_defaults;
|
---|
157 | map->num_reg_defaults_raw = config->num_reg_defaults_raw;
|
---|
158 | map->reg_defaults_raw = config->reg_defaults_raw;
|
---|
159 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
|
---|
160 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
|
---|
161 |
|
---|
162 | map->cache = NULL;
|
---|
163 | map->cache_ops = cache_types[i];
|
---|
164 |
|
---|
165 | if (!map->cache_ops->read ||
|
---|
166 | !map->cache_ops->write ||
|
---|
167 | !map->cache_ops->name)
|
---|
168 | return -EINVAL;
|
---|
169 |
|
---|
170 | /* We still need to ensure that the reg_defaults
|
---|
171 | * won't vanish from under us. We'll need to make
|
---|
172 | * a copy of it.
|
---|
173 | */
|
---|
174 | if (config->reg_defaults) {
|
---|
175 | tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
|
---|
176 | sizeof(struct reg_default), GFP_KERNEL);
|
---|
177 | if (!tmp_buf)
|
---|
178 | return -ENOMEM;
|
---|
179 | map->reg_defaults = tmp_buf;
|
---|
180 | } else if (map->num_reg_defaults_raw) {
|
---|
181 | /* Some devices such as PMICs don't have cache defaults,
|
---|
182 | * we cope with this by reading back the HW registers and
|
---|
183 | * crafting the cache defaults by hand.
|
---|
184 | */
|
---|
185 | ret = regcache_hw_init(map);
|
---|
186 | if (ret < 0)
|
---|
187 | return ret;
|
---|
188 | if (map->cache_bypass)
|
---|
189 | return 0;
|
---|
190 | }
|
---|
191 |
|
---|
192 | if (!map->max_register)
|
---|
193 | map->max_register = map->num_reg_defaults_raw;
|
---|
194 |
|
---|
195 | if (map->cache_ops->init) {
|
---|
196 | dev_dbg(map->dev, "Initializing %s cache\n",
|
---|
197 | map->cache_ops->name);
|
---|
198 | ret = map->cache_ops->init(map);
|
---|
199 | if (ret)
|
---|
200 | goto err_free;
|
---|
201 | }
|
---|
202 | return 0;
|
---|
203 |
|
---|
204 | err_free:
|
---|
205 | kfree(map->reg_defaults);
|
---|
206 | if (map->cache_free)
|
---|
207 | kfree(map->reg_defaults_raw);
|
---|
208 |
|
---|
209 | return ret;
|
---|
210 | }
|
---|
211 |
|
---|
212 | void regcache_exit(struct regmap *map)
|
---|
213 | {
|
---|
214 | if (map->cache_type == REGCACHE_NONE)
|
---|
215 | return;
|
---|
216 |
|
---|
217 | BUG_ON(!map->cache_ops);
|
---|
218 |
|
---|
219 | kfree(map->reg_defaults);
|
---|
220 | if (map->cache_free)
|
---|
221 | kfree(map->reg_defaults_raw);
|
---|
222 |
|
---|
223 | if (map->cache_ops->exit) {
|
---|
224 | dev_dbg(map->dev, "Destroying %s cache\n",
|
---|
225 | map->cache_ops->name);
|
---|
226 | map->cache_ops->exit(map);
|
---|
227 | }
|
---|
228 | }
|
---|
229 |
|
---|
230 | /**
|
---|
231 | * regcache_read - Fetch the value of a given register from the cache.
|
---|
232 | *
|
---|
233 | * @map: map to configure.
|
---|
234 | * @reg: The register index.
|
---|
235 | * @value: The value to be returned.
|
---|
236 | *
|
---|
237 | * Return a negative value on failure, 0 on success.
|
---|
238 | */
|
---|
239 | int regcache_read(struct regmap *map,
|
---|
240 | unsigned int reg, unsigned int *value)
|
---|
241 | {
|
---|
242 | int ret;
|
---|
243 |
|
---|
244 | if (map->cache_type == REGCACHE_NONE)
|
---|
245 | return -ENOSYS;
|
---|
246 |
|
---|
247 | BUG_ON(!map->cache_ops);
|
---|
248 |
|
---|
249 | if (!regmap_volatile(map, reg)) {
|
---|
250 | ret = map->cache_ops->read(map, reg, value);
|
---|
251 |
|
---|
252 | #ifndef TARGET_OS2
|
---|
253 | if (ret == 0)
|
---|
254 | trace_regmap_reg_read_cache(map, reg, *value);
|
---|
255 | #endif
|
---|
256 | return ret;
|
---|
257 | }
|
---|
258 |
|
---|
259 | return -EINVAL;
|
---|
260 | }
|
---|
261 |
|
---|
262 | /**
|
---|
263 | * regcache_write - Set the value of a given register in the cache.
|
---|
264 | *
|
---|
265 | * @map: map to configure.
|
---|
266 | * @reg: The register index.
|
---|
267 | * @value: The new register value.
|
---|
268 | *
|
---|
269 | * Return a negative value on failure, 0 on success.
|
---|
270 | */
|
---|
271 | int regcache_write(struct regmap *map,
|
---|
272 | unsigned int reg, unsigned int value)
|
---|
273 | {
|
---|
274 | if (map->cache_type == REGCACHE_NONE)
|
---|
275 | return 0;
|
---|
276 |
|
---|
277 | BUG_ON(!map->cache_ops);
|
---|
278 |
|
---|
279 | if (!regmap_volatile(map, reg))
|
---|
280 | return map->cache_ops->write(map, reg, value);
|
---|
281 |
|
---|
282 | return 0;
|
---|
283 | }
|
---|
284 |
|
---|
285 | /*static*/ bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
|
---|
286 | unsigned int val)
|
---|
287 | {
|
---|
288 | int ret;
|
---|
289 |
|
---|
290 | /* If we don't know the chip just got reset, then sync everything. */
|
---|
291 | if (!map->no_sync_defaults)
|
---|
292 | return true;
|
---|
293 |
|
---|
294 | /* Is this the hardware default? If so skip. */
|
---|
295 | ret = regcache_lookup_reg(map, reg);
|
---|
296 | if (ret >= 0 && val == map->reg_defaults[ret].def)
|
---|
297 | return false;
|
---|
298 | return true;
|
---|
299 | }
|
---|
300 |
|
---|
301 | /*static*/ int regcache_default_sync(struct regmap *map, unsigned int min,
|
---|
302 | unsigned int max)
|
---|
303 | {
|
---|
304 | unsigned int reg;
|
---|
305 |
|
---|
306 | for (reg = min; reg <= max; reg += map->reg_stride) {
|
---|
307 | unsigned int val;
|
---|
308 | int ret;
|
---|
309 |
|
---|
310 | if (regmap_volatile(map, reg) ||
|
---|
311 | !regmap_writeable(map, reg))
|
---|
312 | continue;
|
---|
313 |
|
---|
314 | ret = regcache_read(map, reg, &val);
|
---|
315 | if (ret)
|
---|
316 | return ret;
|
---|
317 |
|
---|
318 | if (!regcache_reg_needs_sync(map, reg, val))
|
---|
319 | continue;
|
---|
320 |
|
---|
321 | map->cache_bypass = true;
|
---|
322 | ret = _regmap_write(map, reg, val);
|
---|
323 | map->cache_bypass = false;
|
---|
324 | if (ret) {
|
---|
325 | dev_err(map->dev, "Unable to sync register %#x. %d\n",
|
---|
326 | reg, ret);
|
---|
327 | return ret;
|
---|
328 | }
|
---|
329 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
|
---|
330 | }
|
---|
331 |
|
---|
332 | return 0;
|
---|
333 | }
|
---|
334 |
|
---|
335 | /**
|
---|
336 | * regcache_sync - Sync the register cache with the hardware.
|
---|
337 | *
|
---|
338 | * @map: map to configure.
|
---|
339 | *
|
---|
340 | * Any registers that should not be synced should be marked as
|
---|
341 | * volatile. In general drivers can choose not to use the provided
|
---|
342 | * syncing functionality if they so require.
|
---|
343 | *
|
---|
344 | * Return a negative value on failure, 0 on success.
|
---|
345 | */
|
---|
346 | int regcache_sync(struct regmap *map)
|
---|
347 | {
|
---|
348 | int ret = 0;
|
---|
349 | unsigned int i;
|
---|
350 | const char *name;
|
---|
351 | bool bypass;
|
---|
352 |
|
---|
353 | BUG_ON(!map->cache_ops);
|
---|
354 |
|
---|
355 | map->lock(map->lock_arg);
|
---|
356 | /* Remember the initial bypass state */
|
---|
357 | bypass = map->cache_bypass;
|
---|
358 | dev_dbg(map->dev, "Syncing %s cache\n",
|
---|
359 | map->cache_ops->name);
|
---|
360 | name = map->cache_ops->name;
|
---|
361 | #ifndef TARGET_OS2
|
---|
362 | trace_regcache_sync(map, name, "start");
|
---|
363 | #endif
|
---|
364 | if (!map->cache_dirty)
|
---|
365 | goto out;
|
---|
366 |
|
---|
367 | map->async = true;
|
---|
368 |
|
---|
369 | /* Apply any patch first */
|
---|
370 | map->cache_bypass = true;
|
---|
371 | for (i = 0; i < map->patch_regs; i++) {
|
---|
372 | ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
|
---|
373 | if (ret != 0) {
|
---|
374 | dev_err(map->dev, "Failed to write %x = %x: %d\n",
|
---|
375 | map->patch[i].reg, map->patch[i].def, ret);
|
---|
376 | goto out;
|
---|
377 | }
|
---|
378 | }
|
---|
379 | map->cache_bypass = false;
|
---|
380 |
|
---|
381 | if (map->cache_ops->sync)
|
---|
382 | ret = map->cache_ops->sync(map, 0, map->max_register);
|
---|
383 | else
|
---|
384 | ret = regcache_default_sync(map, 0, map->max_register);
|
---|
385 |
|
---|
386 | if (ret == 0)
|
---|
387 | map->cache_dirty = false;
|
---|
388 |
|
---|
389 | out:
|
---|
390 | /* Restore the bypass state */
|
---|
391 | map->async = false;
|
---|
392 | map->cache_bypass = bypass;
|
---|
393 | map->no_sync_defaults = false;
|
---|
394 | map->unlock(map->lock_arg);
|
---|
395 |
|
---|
396 | regmap_async_complete(map);
|
---|
397 |
|
---|
398 | #ifndef TARGET_OS2
|
---|
399 | trace_regcache_sync(map, name, "stop");
|
---|
400 | #endif
|
---|
401 | return ret;
|
---|
402 | }
|
---|
403 | EXPORT_SYMBOL_GPL(regcache_sync);
|
---|
404 |
|
---|
405 | /**
|
---|
406 | * regcache_sync_region - Sync part of the register cache with the hardware.
|
---|
407 | *
|
---|
408 | * @map: map to sync.
|
---|
409 | * @min: first register to sync
|
---|
410 | * @max: last register to sync
|
---|
411 | *
|
---|
412 | * Write all non-default register values in the specified region to
|
---|
413 | * the hardware.
|
---|
414 | *
|
---|
415 | * Return a negative value on failure, 0 on success.
|
---|
416 | */
|
---|
417 | int regcache_sync_region(struct regmap *map, unsigned int min,
|
---|
418 | unsigned int max)
|
---|
419 | {
|
---|
420 | int ret = 0;
|
---|
421 | const char *name;
|
---|
422 | bool bypass;
|
---|
423 |
|
---|
424 | BUG_ON(!map->cache_ops);
|
---|
425 |
|
---|
426 | map->lock(map->lock_arg);
|
---|
427 |
|
---|
428 | /* Remember the initial bypass state */
|
---|
429 | bypass = map->cache_bypass;
|
---|
430 |
|
---|
431 | name = map->cache_ops->name;
|
---|
432 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
|
---|
433 |
|
---|
434 | #ifndef TARGET_OS2
|
---|
435 | trace_regcache_sync(map, name, "start region");
|
---|
436 | #endif
|
---|
437 | if (!map->cache_dirty)
|
---|
438 | goto out;
|
---|
439 |
|
---|
440 | map->async = true;
|
---|
441 |
|
---|
442 | if (map->cache_ops->sync)
|
---|
443 | ret = map->cache_ops->sync(map, min, max);
|
---|
444 | else
|
---|
445 | ret = regcache_default_sync(map, min, max);
|
---|
446 |
|
---|
447 | out:
|
---|
448 | /* Restore the bypass state */
|
---|
449 | map->cache_bypass = bypass;
|
---|
450 | map->async = false;
|
---|
451 | map->no_sync_defaults = false;
|
---|
452 | map->unlock(map->lock_arg);
|
---|
453 |
|
---|
454 | regmap_async_complete(map);
|
---|
455 |
|
---|
456 | #ifndef TARGET_OS2
|
---|
457 | trace_regcache_sync(map, name, "stop region");
|
---|
458 | #endif
|
---|
459 | return ret;
|
---|
460 | }
|
---|
461 | EXPORT_SYMBOL_GPL(regcache_sync_region);
|
---|
462 |
|
---|
463 | /**
|
---|
464 | * regcache_drop_region - Discard part of the register cache
|
---|
465 | *
|
---|
466 | * @map: map to operate on
|
---|
467 | * @min: first register to discard
|
---|
468 | * @max: last register to discard
|
---|
469 | *
|
---|
470 | * Discard part of the register cache.
|
---|
471 | *
|
---|
472 | * Return a negative value on failure, 0 on success.
|
---|
473 | */
|
---|
474 | int regcache_drop_region(struct regmap *map, unsigned int min,
|
---|
475 | unsigned int max)
|
---|
476 | {
|
---|
477 | int ret = 0;
|
---|
478 |
|
---|
479 | if (!map->cache_ops || !map->cache_ops->drop)
|
---|
480 | return -EINVAL;
|
---|
481 |
|
---|
482 | map->lock(map->lock_arg);
|
---|
483 |
|
---|
484 | #ifndef TARGET_OS2
|
---|
485 | trace_regcache_drop_region(map, min, max);
|
---|
486 | #endif
|
---|
487 | ret = map->cache_ops->drop(map, min, max);
|
---|
488 |
|
---|
489 | map->unlock(map->lock_arg);
|
---|
490 |
|
---|
491 | return ret;
|
---|
492 | }
|
---|
493 | EXPORT_SYMBOL_GPL(regcache_drop_region);
|
---|
494 |
|
---|
495 | /**
|
---|
496 | * regcache_cache_only - Put a register map into cache only mode
|
---|
497 | *
|
---|
498 | * @map: map to configure
|
---|
499 | * @enable: flag if changes should be written to the hardware
|
---|
500 | *
|
---|
501 | * When a register map is marked as cache only writes to the register
|
---|
502 | * map API will only update the register cache, they will not cause
|
---|
503 | * any hardware changes. This is useful for allowing portions of
|
---|
504 | * drivers to act as though the device were functioning as normal when
|
---|
505 | * it is disabled for power saving reasons.
|
---|
506 | */
|
---|
507 | void regcache_cache_only(struct regmap *map, bool enable)
|
---|
508 | {
|
---|
509 | map->lock(map->lock_arg);
|
---|
510 | WARN_ON(map->cache_bypass && enable);
|
---|
511 | map->cache_only = enable;
|
---|
512 | #ifndef TARGET_OS2
|
---|
513 | trace_regmap_cache_only(map, enable);
|
---|
514 | #endif
|
---|
515 | map->unlock(map->lock_arg);
|
---|
516 | }
|
---|
517 | EXPORT_SYMBOL_GPL(regcache_cache_only);
|
---|
518 |
|
---|
519 | /**
|
---|
520 | * regcache_mark_dirty - Indicate that HW registers were reset to default values
|
---|
521 | *
|
---|
522 | * @map: map to mark
|
---|
523 | *
|
---|
524 | * Inform regcache that the device has been powered down or reset, so that
|
---|
525 | * on resume, regcache_sync() knows to write out all non-default values
|
---|
526 | * stored in the cache.
|
---|
527 | *
|
---|
528 | * If this function is not called, regcache_sync() will assume that
|
---|
529 | * the hardware state still matches the cache state, modulo any writes that
|
---|
530 | * happened when cache_only was true.
|
---|
531 | */
|
---|
532 | void regcache_mark_dirty(struct regmap *map)
|
---|
533 | {
|
---|
534 | map->lock(map->lock_arg);
|
---|
535 | map->cache_dirty = true;
|
---|
536 | map->no_sync_defaults = true;
|
---|
537 | map->unlock(map->lock_arg);
|
---|
538 | }
|
---|
539 | EXPORT_SYMBOL_GPL(regcache_mark_dirty);
|
---|
540 |
|
---|
541 | /**
|
---|
542 | * regcache_cache_bypass - Put a register map into cache bypass mode
|
---|
543 | *
|
---|
544 | * @map: map to configure
|
---|
545 | * @enable: flag if changes should not be written to the cache
|
---|
546 | *
|
---|
547 | * When a register map is marked with the cache bypass option, writes
|
---|
548 | * to the register map API will only update the hardware and not the
|
---|
549 | * the cache directly. This is useful when syncing the cache back to
|
---|
550 | * the hardware.
|
---|
551 | */
|
---|
552 | void regcache_cache_bypass(struct regmap *map, bool enable)
|
---|
553 | {
|
---|
554 | map->lock(map->lock_arg);
|
---|
555 | WARN_ON(map->cache_only && enable);
|
---|
556 | map->cache_bypass = enable;
|
---|
557 | #ifndef TARGET_OS2
|
---|
558 | trace_regmap_cache_bypass(map, enable);
|
---|
559 | #endif
|
---|
560 | map->unlock(map->lock_arg);
|
---|
561 | }
|
---|
562 | EXPORT_SYMBOL_GPL(regcache_cache_bypass);
|
---|
563 |
|
---|
564 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
|
---|
565 | unsigned int val)
|
---|
566 | {
|
---|
567 | if (regcache_get_val(map, base, idx) == val)
|
---|
568 | return true;
|
---|
569 |
|
---|
570 | /* Use device native format if possible */
|
---|
571 | if (map->format.format_val) {
|
---|
572 | map->format.format_val(base + (map->cache_word_size * idx),
|
---|
573 | val, 0);
|
---|
574 | return false;
|
---|
575 | }
|
---|
576 |
|
---|
577 | switch (map->cache_word_size) {
|
---|
578 | case 1: {
|
---|
579 | u8 *cache = base;
|
---|
580 |
|
---|
581 | cache[idx] = val;
|
---|
582 | break;
|
---|
583 | }
|
---|
584 | case 2: {
|
---|
585 | u16 *cache = base;
|
---|
586 |
|
---|
587 | cache[idx] = val;
|
---|
588 | break;
|
---|
589 | }
|
---|
590 | case 4: {
|
---|
591 | u32 *cache = base;
|
---|
592 |
|
---|
593 | cache[idx] = val;
|
---|
594 | break;
|
---|
595 | }
|
---|
596 | #ifdef CONFIG_64BIT
|
---|
597 | case 8: {
|
---|
598 | u64 *cache = base;
|
---|
599 |
|
---|
600 | cache[idx] = val;
|
---|
601 | break;
|
---|
602 | }
|
---|
603 | #endif
|
---|
604 | default:
|
---|
605 | BUG();
|
---|
606 | }
|
---|
607 | return false;
|
---|
608 | }
|
---|
609 |
|
---|
610 | unsigned int regcache_get_val(struct regmap *map, const void *base,
|
---|
611 | unsigned int idx)
|
---|
612 | {
|
---|
613 | if (!base)
|
---|
614 | return -EINVAL;
|
---|
615 |
|
---|
616 | /* Use device native format if possible */
|
---|
617 | if (map->format.parse_val)
|
---|
618 | return map->format.parse_val(regcache_get_val_addr(map, base,
|
---|
619 | idx));
|
---|
620 |
|
---|
621 | switch (map->cache_word_size) {
|
---|
622 | case 1: {
|
---|
623 | const u8 *cache = base;
|
---|
624 |
|
---|
625 | return cache[idx];
|
---|
626 | }
|
---|
627 | case 2: {
|
---|
628 | const u16 *cache = base;
|
---|
629 |
|
---|
630 | return cache[idx];
|
---|
631 | }
|
---|
632 | case 4: {
|
---|
633 | const u32 *cache = base;
|
---|
634 |
|
---|
635 | return cache[idx];
|
---|
636 | }
|
---|
637 | #ifdef CONFIG_64BIT
|
---|
638 | case 8: {
|
---|
639 | const u64 *cache = base;
|
---|
640 |
|
---|
641 | return cache[idx];
|
---|
642 | }
|
---|
643 | #endif
|
---|
644 | default:
|
---|
645 | BUG();
|
---|
646 | }
|
---|
647 | /* unreachable */
|
---|
648 | return -1;
|
---|
649 | }
|
---|
650 |
|
---|
651 | /*static*/ int regcache_default_cmp(const void *a, const void *b)
|
---|
652 | {
|
---|
653 | const struct reg_default *_a = a;
|
---|
654 | const struct reg_default *_b = b;
|
---|
655 |
|
---|
656 | return _a->reg - _b->reg;
|
---|
657 | }
|
---|
658 |
|
---|
659 | int regcache_lookup_reg(struct regmap *map, unsigned int reg)
|
---|
660 | {
|
---|
661 | struct reg_default key;
|
---|
662 | struct reg_default *r;
|
---|
663 |
|
---|
664 | key.reg = reg;
|
---|
665 | key.def = 0;
|
---|
666 |
|
---|
667 | r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
|
---|
668 | sizeof(struct reg_default), regcache_default_cmp);
|
---|
669 |
|
---|
670 | if (r)
|
---|
671 | return r - map->reg_defaults;
|
---|
672 | else
|
---|
673 | return -ENOENT;
|
---|
674 | }
|
---|
675 |
|
---|
676 | /*static*/ bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
|
---|
677 | {
|
---|
678 | if (!cache_present)
|
---|
679 | return true;
|
---|
680 |
|
---|
681 | return test_bit(idx, cache_present);
|
---|
682 | }
|
---|
683 |
|
---|
684 | /*static*/ int regcache_sync_block_single(struct regmap *map, void *block,
|
---|
685 | unsigned long *cache_present,
|
---|
686 | unsigned int block_base,
|
---|
687 | unsigned int start, unsigned int end)
|
---|
688 | {
|
---|
689 | unsigned int i, regtmp, val;
|
---|
690 | int ret;
|
---|
691 |
|
---|
692 | for (i = start; i < end; i++) {
|
---|
693 | regtmp = block_base + (i * map->reg_stride);
|
---|
694 |
|
---|
695 | if (!regcache_reg_present(cache_present, i) ||
|
---|
696 | !regmap_writeable(map, regtmp))
|
---|
697 | continue;
|
---|
698 |
|
---|
699 | val = regcache_get_val(map, block, i);
|
---|
700 | if (!regcache_reg_needs_sync(map, regtmp, val))
|
---|
701 | continue;
|
---|
702 |
|
---|
703 | map->cache_bypass = true;
|
---|
704 |
|
---|
705 | ret = _regmap_write(map, regtmp, val);
|
---|
706 |
|
---|
707 | map->cache_bypass = false;
|
---|
708 | if (ret != 0) {
|
---|
709 | dev_err(map->dev, "Unable to sync register %#x. %d\n",
|
---|
710 | regtmp, ret);
|
---|
711 | return ret;
|
---|
712 | }
|
---|
713 | dev_dbg(map->dev, "Synced register %#x, value %#x\n",
|
---|
714 | regtmp, val);
|
---|
715 | }
|
---|
716 |
|
---|
717 | return 0;
|
---|
718 | }
|
---|
719 |
|
---|
720 | /*static*/ int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
|
---|
721 | unsigned int base, unsigned int cur)
|
---|
722 | {
|
---|
723 | size_t val_bytes = map->format.val_bytes;
|
---|
724 | int ret, count;
|
---|
725 |
|
---|
726 | if (*data == NULL)
|
---|
727 | return 0;
|
---|
728 |
|
---|
729 | count = (cur - base) / map->reg_stride;
|
---|
730 |
|
---|
731 | dev_dbg(map->dev, "Writing %lu bytes for %d registers from 0x%x-0x%x\n",
|
---|
732 | count * val_bytes, count, base, cur - map->reg_stride);
|
---|
733 |
|
---|
734 | map->cache_bypass = true;
|
---|
735 |
|
---|
736 | ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
|
---|
737 | if (ret)
|
---|
738 | dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
|
---|
739 | base, cur - map->reg_stride, ret);
|
---|
740 |
|
---|
741 | map->cache_bypass = false;
|
---|
742 |
|
---|
743 | *data = NULL;
|
---|
744 |
|
---|
745 | return ret;
|
---|
746 | }
|
---|
747 |
|
---|
748 | /*static*/ int regcache_sync_block_raw(struct regmap *map, void *block,
|
---|
749 | unsigned long *cache_present,
|
---|
750 | unsigned int block_base, unsigned int start,
|
---|
751 | unsigned int end)
|
---|
752 | {
|
---|
753 | unsigned int i, val;
|
---|
754 | unsigned int regtmp = 0;
|
---|
755 | unsigned int base = 0;
|
---|
756 | const void *data = NULL;
|
---|
757 | int ret;
|
---|
758 |
|
---|
759 | for (i = start; i < end; i++) {
|
---|
760 | regtmp = block_base + (i * map->reg_stride);
|
---|
761 |
|
---|
762 | if (!regcache_reg_present(cache_present, i) ||
|
---|
763 | !regmap_writeable(map, regtmp)) {
|
---|
764 | ret = regcache_sync_block_raw_flush(map, &data,
|
---|
765 | base, regtmp);
|
---|
766 | if (ret != 0)
|
---|
767 | return ret;
|
---|
768 | continue;
|
---|
769 | }
|
---|
770 |
|
---|
771 | val = regcache_get_val(map, block, i);
|
---|
772 | if (!regcache_reg_needs_sync(map, regtmp, val)) {
|
---|
773 | ret = regcache_sync_block_raw_flush(map, &data,
|
---|
774 | base, regtmp);
|
---|
775 | if (ret != 0)
|
---|
776 | return ret;
|
---|
777 | continue;
|
---|
778 | }
|
---|
779 |
|
---|
780 | if (!data) {
|
---|
781 | data = regcache_get_val_addr(map, block, i);
|
---|
782 | base = regtmp;
|
---|
783 | }
|
---|
784 | }
|
---|
785 |
|
---|
786 | return regcache_sync_block_raw_flush(map, &data, base, regtmp +
|
---|
787 | map->reg_stride);
|
---|
788 | }
|
---|
789 |
|
---|
790 | int regcache_sync_block(struct regmap *map, void *block,
|
---|
791 | unsigned long *cache_present,
|
---|
792 | unsigned int block_base, unsigned int start,
|
---|
793 | unsigned int end)
|
---|
794 | {
|
---|
795 | if (regmap_can_raw_write(map) && !map->use_single_write)
|
---|
796 | return regcache_sync_block_raw(map, block, cache_present,
|
---|
797 | block_base, start, end);
|
---|
798 | else
|
---|
799 | return regcache_sync_block_single(map, block, cache_present,
|
---|
800 | block_base, start, end);
|
---|
801 | }
|
---|