source: GPL/branches/uniaud32-next/lib32/regcache-rbtree.c@ 638

Last change on this file since 638 was 638, checked in by David Azarewicz, 5 years ago

Fixed more warnings.

File size: 14.6 KB
Line 
1/*
2 * Register cache access API - rbtree caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.19.163 */
13
14//#include <linux/debugfs.h>
15#include <linux/device.h>
16#include <linux/rbtree.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include <linux/byteorder/little_endian.h>
22#include <linux/printk.h>
23
24#include "internal.h"
25
26#ifdef TARGET_OS2
27#define UINT_MAX (~0U)
28#endif
29
30/*static inline*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
31 unsigned int value);
32/*static inline*/ int regcache_rbtree_exit(struct regmap *map);
33
34struct regcache_rbtree_node {
35 /* block of adjacent registers */
36 void *block;
37 /* Which registers are present */
38 long *cache_present;
39 /* base register handled by this block */
40 unsigned int base_reg;
41 /* number of registers available in the block */
42 unsigned int blklen;
43 /* the actual rbtree node holding this block */
44 struct rb_node node;
45} /*__attribute__ ((packed))*/;
46
47struct regcache_rbtree_ctx {
48 struct rb_root root;
49 struct regcache_rbtree_node *cached_rbnode;
50};
51
52/*static inline*/ inline void regcache_rbtree_get_base_top_reg(
53 struct regmap *map,
54 struct regcache_rbtree_node *rbnode,
55 unsigned int *base, unsigned int *top)
56{
57 *base = rbnode->base_reg;
58 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
59}
60
61/*static inline*/ unsigned int regcache_rbtree_get_register(struct regmap *map,
62 struct regcache_rbtree_node *rbnode, unsigned int idx)
63{
64 return regcache_get_val(map, rbnode->block, idx);
65}
66
67/*static inline*/ void regcache_rbtree_set_register(struct regmap *map,
68 struct regcache_rbtree_node *rbnode,
69 unsigned int idx, unsigned int val)
70{
71 set_bit(idx, rbnode->cache_present);
72 regcache_set_val(map, rbnode->block, idx, val);
73}
74
75/*static inline*/ struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
76 unsigned int reg)
77{
78 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
79 struct rb_node *node;
80 struct regcache_rbtree_node *rbnode;
81 unsigned int base_reg, top_reg;
82
83 rbnode = rbtree_ctx->cached_rbnode;
84 if (rbnode) {
85 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
86 &top_reg);
87 if (reg >= base_reg && reg <= top_reg)
88 return rbnode;
89 }
90
91 node = rbtree_ctx->root.rb_node;
92 while (node) {
93 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
94 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
95 &top_reg);
96 if (reg >= base_reg && reg <= top_reg) {
97 rbtree_ctx->cached_rbnode = rbnode;
98 return rbnode;
99 } else if (reg > top_reg) {
100 node = node->rb_right;
101 } else if (reg < base_reg) {
102 node = node->rb_left;
103 }
104 }
105
106 return NULL;
107}
108
109/*static inline*/ int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
110 struct regcache_rbtree_node *rbnode)
111{
112 struct rb_node **new, *parent;
113 struct regcache_rbtree_node *rbnode_tmp;
114 unsigned int base_reg_tmp, top_reg_tmp;
115 unsigned int base_reg;
116
117 parent = NULL;
118 new = &root->rb_node;
119 while (*new) {
120 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
121 /* base and top registers of the current rbnode */
122 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
123 &top_reg_tmp);
124 /* base register of the rbnode to be added */
125 base_reg = rbnode->base_reg;
126 parent = *new;
127 /* if this register has already been inserted, just return */
128 if (base_reg >= base_reg_tmp &&
129 base_reg <= top_reg_tmp)
130 return 0;
131 else if (base_reg > top_reg_tmp)
132 new = &((*new)->rb_right);
133 else if (base_reg < base_reg_tmp)
134 new = &((*new)->rb_left);
135 }
136
137 /* insert the node into the rbtree */
138 rb_link_node(&rbnode->node, parent, new);
139 rb_insert_color(&rbnode->node, root);
140
141 return 1;
142}
143
144#ifdef CONFIG_DEBUG_FS
145/*static inline*/ int rbtree_show(struct seq_file *s, void *ignored)
146{
147 struct regmap *map = s->private;
148 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
149 struct regcache_rbtree_node *n;
150 struct rb_node *node;
151 unsigned int base, top;
152 size_t mem_size;
153 int nodes = 0;
154 int registers = 0;
155 int this_registers, average;
156
157 map->lock(map->lock_arg);
158
159 mem_size = sizeof(*rbtree_ctx);
160
161 for (node = rb_first(&rbtree_ctx->root); node != NULL;
162 node = rb_next(node)) {
163 n = rb_entry(node, struct regcache_rbtree_node, node);
164 mem_size += sizeof(*n);
165 mem_size += (n->blklen * map->cache_word_size);
166 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
167
168 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
169 this_registers = ((top - base) / map->reg_stride) + 1;
170 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
171
172 nodes++;
173 registers += this_registers;
174 }
175
176 if (nodes)
177 average = registers / nodes;
178 else
179 average = 0;
180
181 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
182 nodes, registers, average, mem_size);
183
184 map->unlock(map->lock_arg);
185
186 return 0;
187}
188
189/*static inline*/ int rbtree_open(struct inode *inode, struct file *file)
190{
191 return single_open(file, rbtree_show, inode->i_private);
192}
193
194/*static inline*/ const struct file_operations rbtree_fops = {
195 .open = rbtree_open,
196 .read = seq_read,
197 .llseek = seq_lseek,
198 .release = single_release,
199};
200
201/*static inline*/ void rbtree_debugfs_init(struct regmap *map)
202{
203 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
204}
205#endif
206
207/*static inline*/ int regcache_rbtree_init(struct regmap *map)
208{
209 struct regcache_rbtree_ctx *rbtree_ctx;
210 int i;
211 int ret;
212#ifdef TARGET_OS2
213 // 2020-11-17 SHL FIXME patched struct rb_root
214 struct rb_root _RB_ROOT = { NULL, };
215#endif
216
217 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
218 if (!map->cache)
219 return -ENOMEM;
220
221 rbtree_ctx = map->cache;
222#ifndef TARGET_OS2
223 rbtree_ctx->root = RB_ROOT;
224#else
225 rbtree_ctx->root = _RB_ROOT;
226 rbtree_ctx->root.rb_node = NULL;
227 memset(&rbtree_ctx->root, 0, sizeof(struct rb_root));
228#endif
229 rbtree_ctx->cached_rbnode = NULL;
230
231 for (i = 0; i < map->num_reg_defaults; i++) {
232 ret = regcache_rbtree_write(map,
233 map->reg_defaults[i].reg,
234 map->reg_defaults[i].def);
235 if (ret)
236 goto err;
237 }
238
239 return 0;
240
241err:
242 regcache_rbtree_exit(map);
243 return ret;
244}
245
246/*static inline*/ int regcache_rbtree_exit(struct regmap *map)
247{
248 struct rb_node *next;
249 struct regcache_rbtree_ctx *rbtree_ctx;
250 struct regcache_rbtree_node *rbtree_node;
251
252 /* if we've already been called then just return */
253 rbtree_ctx = map->cache;
254 if (!rbtree_ctx)
255 return 0;
256
257 /* free up the rbtree */
258 next = rb_first(&rbtree_ctx->root);
259 while (next) {
260 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
261 next = rb_next(&rbtree_node->node);
262 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
263 kfree(rbtree_node->cache_present);
264 kfree(rbtree_node->block);
265 kfree(rbtree_node);
266 }
267
268 /* release the resources */
269 kfree(map->cache);
270 map->cache = NULL;
271
272 return 0;
273}
274
275/*static inline*/ int regcache_rbtree_read(struct regmap *map,
276 unsigned int reg, unsigned int *value)
277{
278 struct regcache_rbtree_node *rbnode;
279 unsigned int reg_tmp;
280
281 rbnode = regcache_rbtree_lookup(map, reg);
282 if (rbnode) {
283 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
284 if (!test_bit(reg_tmp, rbnode->cache_present))
285 return -ENOENT;
286 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
287 } else {
288 return -ENOENT;
289 }
290
291 return 0;
292}
293
294
295/*static inline*/ int regcache_rbtree_insert_to_block(struct regmap *map,
296 struct regcache_rbtree_node *rbnode,
297 unsigned int base_reg,
298 unsigned int top_reg,
299 unsigned int reg,
300 unsigned int value)
301{
302 unsigned int blklen;
303 unsigned int pos, offset;
304 unsigned long *present;
305 u8 *blk;
306
307 blklen = (top_reg - base_reg) / map->reg_stride + 1;
308 pos = (reg - base_reg) / map->reg_stride;
309 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
310
311 blk = krealloc(rbnode->block,
312 blklen * map->cache_word_size,
313 GFP_KERNEL);
314 if (!blk)
315 return -ENOMEM;
316
317 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
318 present = krealloc(rbnode->cache_present,
319 BITS_TO_LONGS(blklen) * sizeof(*present),
320 GFP_KERNEL);
321 if (!present) {
322 kfree(blk);
323 return -ENOMEM;
324 }
325
326 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
327 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
328 * sizeof(*present));
329 } else {
330 present = (unsigned long *)rbnode->cache_present;
331 }
332
333 /* insert the register value in the correct place in the rbnode block */
334 if (pos == 0) {
335 memmove(blk + offset * map->cache_word_size,
336 blk, rbnode->blklen * map->cache_word_size);
337 bitmap_shift_left(present, present, offset, blklen);
338 }
339
340 /* update the rbnode block, its size and the base register */
341 rbnode->block = blk;
342 rbnode->blklen = blklen;
343 rbnode->base_reg = base_reg;
344 rbnode->cache_present = (long*)present;
345
346 regcache_rbtree_set_register(map, rbnode, pos, value);
347 return 0;
348}
349
350/*static inline*/ struct regcache_rbtree_node *
351regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
352{
353 struct regcache_rbtree_node *rbnode;
354 const struct regmap_range *range;
355 int i;
356
357 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
358 if (!rbnode)
359 return NULL;
360
361 /* If there is a read table then use it to guess at an allocation */
362 if (map->rd_table) {
363 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
364 if (regmap_reg_in_range(reg,
365 &map->rd_table->yes_ranges[i]))
366 break;
367 }
368
369 if (i != map->rd_table->n_yes_ranges) {
370 range = &map->rd_table->yes_ranges[i];
371 rbnode->blklen = (range->range_max - range->range_min) /
372 map->reg_stride + 1;
373 rbnode->base_reg = range->range_min;
374 }
375 }
376
377 if (!rbnode->blklen) {
378 rbnode->blklen = 1;
379 rbnode->base_reg = reg;
380 }
381
382 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
383 GFP_KERNEL);
384 if (!rbnode->block)
385 goto err_free;
386
387 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
388 sizeof(*rbnode->cache_present),
389 GFP_KERNEL);
390 if (!rbnode->cache_present)
391 goto err_free_block;
392
393 return rbnode;
394
395err_free_block:
396 kfree(rbnode->block);
397err_free:
398 kfree(rbnode);
399 return NULL;
400}
401
402/*static inline*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
403 unsigned int value)
404{
405 struct regcache_rbtree_ctx *rbtree_ctx;
406 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
407 struct rb_node *node;
408 unsigned int reg_tmp;
409 int ret;
410
411 rbtree_ctx = map->cache;
412
413 /* if we can't locate it in the cached rbnode we'll have
414 * to traverse the rbtree looking for it.
415 */
416 rbnode = regcache_rbtree_lookup(map, reg);
417 if (rbnode) {
418 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
419 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
420 } else {
421 unsigned int base_reg, top_reg;
422 unsigned int new_base_reg, new_top_reg;
423 unsigned int min, max;
424 unsigned int max_dist;
425 unsigned int dist, best_dist = UINT_MAX;
426
427 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
428 map->cache_word_size;
429 if (reg < max_dist)
430 min = 0;
431 else
432 min = reg - max_dist;
433 max = reg + max_dist;
434
435 /* look for an adjacent register to the one we are about to add */
436 node = rbtree_ctx->root.rb_node;
437 while (node) {
438 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
439 node);
440
441 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
442 &base_reg, &top_reg);
443
444 if (base_reg <= max && top_reg >= min) {
445 if (reg < base_reg)
446 dist = base_reg - reg;
447 else if (reg > top_reg)
448 dist = reg - top_reg;
449 else
450 dist = 0;
451 if (dist < best_dist) {
452 rbnode = rbnode_tmp;
453 best_dist = dist;
454 new_base_reg = min(reg, base_reg);
455 new_top_reg = max(reg, top_reg);
456 }
457 }
458
459 /*
460 * Keep looking, we want to choose the closest block,
461 * otherwise we might end up creating overlapping
462 * blocks, which breaks the rbtree.
463 */
464 if (reg < base_reg)
465 node = node->rb_left;
466 else if (reg > top_reg)
467 node = node->rb_right;
468 else
469 break;
470 }
471
472 if (rbnode) {
473 ret = regcache_rbtree_insert_to_block(map, rbnode,
474 new_base_reg,
475 new_top_reg, reg,
476 value);
477 if (ret)
478 return ret;
479 rbtree_ctx->cached_rbnode = rbnode;
480 return 0;
481 }
482
483 /* We did not manage to find a place to insert it in
484 * an existing block so create a new rbnode.
485 */
486 rbnode = regcache_rbtree_node_alloc(map, reg);
487 if (!rbnode)
488 return -ENOMEM;
489 regcache_rbtree_set_register(map, rbnode,
490 reg - rbnode->base_reg, value);
491 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
492 rbtree_ctx->cached_rbnode = rbnode;
493 }
494
495 return 0;
496}
497
498/*static inline*/ int regcache_rbtree_sync(struct regmap *map, unsigned int min,
499 unsigned int max)
500{
501 struct regcache_rbtree_ctx *rbtree_ctx;
502 struct rb_node *node;
503 struct regcache_rbtree_node *rbnode;
504 unsigned int base_reg, top_reg;
505 unsigned int start, end;
506 int ret;
507
508 rbtree_ctx = map->cache;
509 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
510 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
511
512 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
513 &top_reg);
514 if (base_reg > max)
515 break;
516 if (top_reg < min)
517 continue;
518
519 if (min > base_reg)
520 start = (min - base_reg) / map->reg_stride;
521 else
522 start = 0;
523
524 if (max < top_reg)
525 end = (max - base_reg) / map->reg_stride + 1;
526 else
527 end = rbnode->blklen;
528
529 ret = regcache_sync_block(map, rbnode->block,
530 (unsigned long *)rbnode->cache_present,
531 rbnode->base_reg, start, end);
532 if (ret != 0)
533 return ret;
534 }
535
536 return regmap_async_complete(map);
537}
538
539/*static inline*/ int regcache_rbtree_drop(struct regmap *map, unsigned int min,
540 unsigned int max)
541{
542 struct regcache_rbtree_ctx *rbtree_ctx;
543 struct regcache_rbtree_node *rbnode;
544 struct rb_node *node;
545 unsigned int base_reg, top_reg;
546 unsigned int start, end;
547
548 rbtree_ctx = map->cache;
549 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
550 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
551
552 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
553 &top_reg);
554 if (base_reg > max)
555 break;
556 if (top_reg < min)
557 continue;
558
559 if (min > base_reg)
560 start = (min - base_reg) / map->reg_stride;
561 else
562 start = 0;
563
564 if (max < top_reg)
565 end = (max - base_reg) / map->reg_stride + 1;
566 else
567 end = rbnode->blklen;
568
569 bitmap_clear((unsigned long *)rbnode->cache_present, start, end - start);
570 }
571
572 return 0;
573}
574
575struct regcache_ops regcache_rbtree_ops = {
576 .type = REGCACHE_RBTREE,
577 .name = "rbtree",
578 .init = regcache_rbtree_init,
579 .exit = regcache_rbtree_exit,
580#ifdef CONFIG_DEBUG_FS
581 .debugfs_init = rbtree_debugfs_init,
582#endif
583 .read = regcache_rbtree_read,
584 .write = regcache_rbtree_write,
585 .sync = regcache_rbtree_sync,
586 .drop = regcache_rbtree_drop,
587};
Note: See TracBrowser for help on using the repository browser.