source: GPL/branches/uniaud32-next/lib32/regcache-rbtree.c@ 647

Last change on this file since 647 was 647, checked in by Paul Smedley, 5 years ago

Cleanup headers, fix warnings

File size: 14.5 KB
Line 
1/*
2 * Register cache access API - rbtree caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.19.163 */
13
14//#include <linux/debugfs.h>
15#include <linux/device.h>
16#include <linux/rbtree.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include <linux/byteorder/little_endian.h>
22#include <linux/printk.h>
23
24#include "internal.h"
25
26/*static inline*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
27 unsigned int value);
28/*static inline*/ int regcache_rbtree_exit(struct regmap *map);
29
30struct regcache_rbtree_node {
31 /* block of adjacent registers */
32 void *block;
33 /* Which registers are present */
34 long *cache_present;
35 /* base register handled by this block */
36 unsigned int base_reg;
37 /* number of registers available in the block */
38 unsigned int blklen;
39 /* the actual rbtree node holding this block */
40 struct rb_node node;
41} /*__attribute__ ((packed))*/;
42
43struct regcache_rbtree_ctx {
44 struct rb_root root;
45 struct regcache_rbtree_node *cached_rbnode;
46};
47
48/*static inline*/ inline void regcache_rbtree_get_base_top_reg(
49 struct regmap *map,
50 struct regcache_rbtree_node *rbnode,
51 unsigned int *base, unsigned int *top)
52{
53 *base = rbnode->base_reg;
54 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
55}
56
57/*static inline*/ unsigned int regcache_rbtree_get_register(struct regmap *map,
58 struct regcache_rbtree_node *rbnode, unsigned int idx)
59{
60 return regcache_get_val(map, rbnode->block, idx);
61}
62
63/*static inline*/ void regcache_rbtree_set_register(struct regmap *map,
64 struct regcache_rbtree_node *rbnode,
65 unsigned int idx, unsigned int val)
66{
67 set_bit(idx, rbnode->cache_present);
68 regcache_set_val(map, rbnode->block, idx, val);
69}
70
71/*static inline*/ struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
72 unsigned int reg)
73{
74 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
75 struct rb_node *node;
76 struct regcache_rbtree_node *rbnode;
77 unsigned int base_reg, top_reg;
78
79 rbnode = rbtree_ctx->cached_rbnode;
80 if (rbnode) {
81 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
82 &top_reg);
83 if (reg >= base_reg && reg <= top_reg)
84 return rbnode;
85 }
86
87 node = rbtree_ctx->root.rb_node;
88 while (node) {
89 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
90 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
91 &top_reg);
92 if (reg >= base_reg && reg <= top_reg) {
93 rbtree_ctx->cached_rbnode = rbnode;
94 return rbnode;
95 } else if (reg > top_reg) {
96 node = node->rb_right;
97 } else if (reg < base_reg) {
98 node = node->rb_left;
99 }
100 }
101
102 return NULL;
103}
104
105/*static inline*/ int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
106 struct regcache_rbtree_node *rbnode)
107{
108 struct rb_node **new, *parent;
109 struct regcache_rbtree_node *rbnode_tmp;
110 unsigned int base_reg_tmp, top_reg_tmp;
111 unsigned int base_reg;
112
113 parent = NULL;
114 new = &root->rb_node;
115 while (*new) {
116 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
117 /* base and top registers of the current rbnode */
118 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
119 &top_reg_tmp);
120 /* base register of the rbnode to be added */
121 base_reg = rbnode->base_reg;
122 parent = *new;
123 /* if this register has already been inserted, just return */
124 if (base_reg >= base_reg_tmp &&
125 base_reg <= top_reg_tmp)
126 return 0;
127 else if (base_reg > top_reg_tmp)
128 new = &((*new)->rb_right);
129 else if (base_reg < base_reg_tmp)
130 new = &((*new)->rb_left);
131 }
132
133 /* insert the node into the rbtree */
134 rb_link_node(&rbnode->node, parent, new);
135 rb_insert_color(&rbnode->node, root);
136
137 return 1;
138}
139
140#ifdef CONFIG_DEBUG_FS
141/*static inline*/ int rbtree_show(struct seq_file *s, void *ignored)
142{
143 struct regmap *map = s->private;
144 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
145 struct regcache_rbtree_node *n;
146 struct rb_node *node;
147 unsigned int base, top;
148 size_t mem_size;
149 int nodes = 0;
150 int registers = 0;
151 int this_registers, average;
152
153 map->lock(map->lock_arg);
154
155 mem_size = sizeof(*rbtree_ctx);
156
157 for (node = rb_first(&rbtree_ctx->root); node != NULL;
158 node = rb_next(node)) {
159 n = rb_entry(node, struct regcache_rbtree_node, node);
160 mem_size += sizeof(*n);
161 mem_size += (n->blklen * map->cache_word_size);
162 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
163
164 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
165 this_registers = ((top - base) / map->reg_stride) + 1;
166 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
167
168 nodes++;
169 registers += this_registers;
170 }
171
172 if (nodes)
173 average = registers / nodes;
174 else
175 average = 0;
176
177 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
178 nodes, registers, average, mem_size);
179
180 map->unlock(map->lock_arg);
181
182 return 0;
183}
184
185/*static inline*/ int rbtree_open(struct inode *inode, struct file *file)
186{
187 return single_open(file, rbtree_show, inode->i_private);
188}
189
190/*static inline*/ const struct file_operations rbtree_fops = {
191 .open = rbtree_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .release = single_release,
195};
196
197/*static inline*/ void rbtree_debugfs_init(struct regmap *map)
198{
199 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
200}
201#endif
202
203/*static inline*/ int regcache_rbtree_init(struct regmap *map)
204{
205 struct regcache_rbtree_ctx *rbtree_ctx;
206 int i;
207 int ret;
208#ifdef TARGET_OS2
209 // 2020-11-17 SHL FIXME patched struct rb_root
210 struct rb_root _RB_ROOT = { NULL, };
211#endif
212
213 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
214 if (!map->cache)
215 return -ENOMEM;
216
217 rbtree_ctx = map->cache;
218#ifndef TARGET_OS2
219 rbtree_ctx->root = RB_ROOT;
220#else
221 rbtree_ctx->root = _RB_ROOT;
222 rbtree_ctx->root.rb_node = NULL;
223 memset(&rbtree_ctx->root, 0, sizeof(struct rb_root));
224#endif
225 rbtree_ctx->cached_rbnode = NULL;
226
227 for (i = 0; i < map->num_reg_defaults; i++) {
228 ret = regcache_rbtree_write(map,
229 map->reg_defaults[i].reg,
230 map->reg_defaults[i].def);
231 if (ret)
232 goto err;
233 }
234
235 return 0;
236
237err:
238 regcache_rbtree_exit(map);
239 return ret;
240}
241
242/*static inline*/ int regcache_rbtree_exit(struct regmap *map)
243{
244 struct rb_node *next;
245 struct regcache_rbtree_ctx *rbtree_ctx;
246 struct regcache_rbtree_node *rbtree_node;
247
248 /* if we've already been called then just return */
249 rbtree_ctx = map->cache;
250 if (!rbtree_ctx)
251 return 0;
252
253 /* free up the rbtree */
254 next = rb_first(&rbtree_ctx->root);
255 while (next) {
256 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
257 next = rb_next(&rbtree_node->node);
258 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
259 kfree(rbtree_node->cache_present);
260 kfree(rbtree_node->block);
261 kfree(rbtree_node);
262 }
263
264 /* release the resources */
265 kfree(map->cache);
266 map->cache = NULL;
267
268 return 0;
269}
270
271/*static inline*/ int regcache_rbtree_read(struct regmap *map,
272 unsigned int reg, unsigned int *value)
273{
274 struct regcache_rbtree_node *rbnode;
275 unsigned int reg_tmp;
276
277 rbnode = regcache_rbtree_lookup(map, reg);
278 if (rbnode) {
279 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
280 if (!test_bit(reg_tmp, rbnode->cache_present))
281 return -ENOENT;
282 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
283 } else {
284 return -ENOENT;
285 }
286
287 return 0;
288}
289
290
291/*static inline*/ int regcache_rbtree_insert_to_block(struct regmap *map,
292 struct regcache_rbtree_node *rbnode,
293 unsigned int base_reg,
294 unsigned int top_reg,
295 unsigned int reg,
296 unsigned int value)
297{
298 unsigned int blklen;
299 unsigned int pos, offset;
300 unsigned long *present;
301 u8 *blk;
302
303 blklen = (top_reg - base_reg) / map->reg_stride + 1;
304 pos = (reg - base_reg) / map->reg_stride;
305 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
306
307 blk = krealloc(rbnode->block,
308 blklen * map->cache_word_size,
309 GFP_KERNEL);
310 if (!blk)
311 return -ENOMEM;
312
313 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
314 present = krealloc(rbnode->cache_present,
315 BITS_TO_LONGS(blklen) * sizeof(*present),
316 GFP_KERNEL);
317 if (!present) {
318 kfree(blk);
319 return -ENOMEM;
320 }
321
322 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
323 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
324 * sizeof(*present));
325 } else {
326 present = (unsigned long *)rbnode->cache_present;
327 }
328
329 /* insert the register value in the correct place in the rbnode block */
330 if (pos == 0) {
331 memmove(blk + offset * map->cache_word_size,
332 blk, rbnode->blklen * map->cache_word_size);
333 bitmap_shift_left(present, present, offset, blklen);
334 }
335
336 /* update the rbnode block, its size and the base register */
337 rbnode->block = blk;
338 rbnode->blklen = blklen;
339 rbnode->base_reg = base_reg;
340 rbnode->cache_present = (long*)present;
341
342 regcache_rbtree_set_register(map, rbnode, pos, value);
343 return 0;
344}
345
346/*static inline*/ struct regcache_rbtree_node *
347regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
348{
349 struct regcache_rbtree_node *rbnode;
350 const struct regmap_range *range;
351 int i;
352
353 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
354 if (!rbnode)
355 return NULL;
356
357 /* If there is a read table then use it to guess at an allocation */
358 if (map->rd_table) {
359 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
360 if (regmap_reg_in_range(reg,
361 &map->rd_table->yes_ranges[i]))
362 break;
363 }
364
365 if (i != map->rd_table->n_yes_ranges) {
366 range = &map->rd_table->yes_ranges[i];
367 rbnode->blklen = (range->range_max - range->range_min) /
368 map->reg_stride + 1;
369 rbnode->base_reg = range->range_min;
370 }
371 }
372
373 if (!rbnode->blklen) {
374 rbnode->blklen = 1;
375 rbnode->base_reg = reg;
376 }
377
378 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
379 GFP_KERNEL);
380 if (!rbnode->block)
381 goto err_free;
382
383 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
384 sizeof(*rbnode->cache_present),
385 GFP_KERNEL);
386 if (!rbnode->cache_present)
387 goto err_free_block;
388
389 return rbnode;
390
391err_free_block:
392 kfree(rbnode->block);
393err_free:
394 kfree(rbnode);
395 return NULL;
396}
397
398/*static inline*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
399 unsigned int value)
400{
401 struct regcache_rbtree_ctx *rbtree_ctx;
402 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
403 struct rb_node *node;
404 unsigned int reg_tmp;
405 int ret;
406
407 rbtree_ctx = map->cache;
408
409 /* if we can't locate it in the cached rbnode we'll have
410 * to traverse the rbtree looking for it.
411 */
412 rbnode = regcache_rbtree_lookup(map, reg);
413 if (rbnode) {
414 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
415 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
416 } else {
417 unsigned int base_reg, top_reg;
418 unsigned int new_base_reg, new_top_reg;
419 unsigned int min, max;
420 unsigned int max_dist;
421 unsigned int dist, best_dist = UINT_MAX;
422
423 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
424 map->cache_word_size;
425 if (reg < max_dist)
426 min = 0;
427 else
428 min = reg - max_dist;
429 max = reg + max_dist;
430
431 /* look for an adjacent register to the one we are about to add */
432 node = rbtree_ctx->root.rb_node;
433 while (node) {
434 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
435 node);
436
437 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
438 &base_reg, &top_reg);
439
440 if (base_reg <= max && top_reg >= min) {
441 if (reg < base_reg)
442 dist = base_reg - reg;
443 else if (reg > top_reg)
444 dist = reg - top_reg;
445 else
446 dist = 0;
447 if (dist < best_dist) {
448 rbnode = rbnode_tmp;
449 best_dist = dist;
450 new_base_reg = min(reg, base_reg);
451 new_top_reg = max(reg, top_reg);
452 }
453 }
454
455 /*
456 * Keep looking, we want to choose the closest block,
457 * otherwise we might end up creating overlapping
458 * blocks, which breaks the rbtree.
459 */
460 if (reg < base_reg)
461 node = node->rb_left;
462 else if (reg > top_reg)
463 node = node->rb_right;
464 else
465 break;
466 }
467
468 if (rbnode) {
469 ret = regcache_rbtree_insert_to_block(map, rbnode,
470 new_base_reg,
471 new_top_reg, reg,
472 value);
473 if (ret)
474 return ret;
475 rbtree_ctx->cached_rbnode = rbnode;
476 return 0;
477 }
478
479 /* We did not manage to find a place to insert it in
480 * an existing block so create a new rbnode.
481 */
482 rbnode = regcache_rbtree_node_alloc(map, reg);
483 if (!rbnode)
484 return -ENOMEM;
485 regcache_rbtree_set_register(map, rbnode,
486 reg - rbnode->base_reg, value);
487 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
488 rbtree_ctx->cached_rbnode = rbnode;
489 }
490
491 return 0;
492}
493
494/*static inline*/ int regcache_rbtree_sync(struct regmap *map, unsigned int min,
495 unsigned int max)
496{
497 struct regcache_rbtree_ctx *rbtree_ctx;
498 struct rb_node *node;
499 struct regcache_rbtree_node *rbnode;
500 unsigned int base_reg, top_reg;
501 unsigned int start, end;
502 int ret;
503
504 rbtree_ctx = map->cache;
505 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
506 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
507
508 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
509 &top_reg);
510 if (base_reg > max)
511 break;
512 if (top_reg < min)
513 continue;
514
515 if (min > base_reg)
516 start = (min - base_reg) / map->reg_stride;
517 else
518 start = 0;
519
520 if (max < top_reg)
521 end = (max - base_reg) / map->reg_stride + 1;
522 else
523 end = rbnode->blklen;
524
525 ret = regcache_sync_block(map, rbnode->block,
526 (unsigned long *)rbnode->cache_present,
527 rbnode->base_reg, start, end);
528 if (ret != 0)
529 return ret;
530 }
531
532 return regmap_async_complete(map);
533}
534
535/*static inline*/ int regcache_rbtree_drop(struct regmap *map, unsigned int min,
536 unsigned int max)
537{
538 struct regcache_rbtree_ctx *rbtree_ctx;
539 struct regcache_rbtree_node *rbnode;
540 struct rb_node *node;
541 unsigned int base_reg, top_reg;
542 unsigned int start, end;
543
544 rbtree_ctx = map->cache;
545 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
546 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
547
548 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
549 &top_reg);
550 if (base_reg > max)
551 break;
552 if (top_reg < min)
553 continue;
554
555 if (min > base_reg)
556 start = (min - base_reg) / map->reg_stride;
557 else
558 start = 0;
559
560 if (max < top_reg)
561 end = (max - base_reg) / map->reg_stride + 1;
562 else
563 end = rbnode->blklen;
564
565 bitmap_clear((unsigned long *)rbnode->cache_present, start, end - start);
566 }
567
568 return 0;
569}
570
571struct regcache_ops regcache_rbtree_ops = {
572 .type = REGCACHE_RBTREE,
573 .name = "rbtree",
574 .init = regcache_rbtree_init,
575 .exit = regcache_rbtree_exit,
576#ifdef CONFIG_DEBUG_FS
577 .debugfs_init = rbtree_debugfs_init,
578#endif
579 .read = regcache_rbtree_read,
580 .write = regcache_rbtree_write,
581 .sync = regcache_rbtree_sync,
582 .drop = regcache_rbtree_drop,
583};
Note: See TracBrowser for help on using the repository browser.