source: GPL/branches/uniaud32-next/lib32/regcache-rbtree.c@ 621

Last change on this file since 621 was 621, checked in by Paul Smedley, 5 years ago

Remove some logging messages

File size: 14.5 KB
Line 
1/*
2 * Register cache access API - rbtree caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.14.202 */
13
14//#include <linux/debugfs.h>
15#include <linux/device.h>
16#include <linux/rbtree.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include <linux/byteorder/little_endian.h>
22#include <linux/printk.h>
23
24#include "internal.h"
25
26/*static*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
27 unsigned int value);
28/*static*/ int regcache_rbtree_exit(struct regmap *map);
29
30#ifdef TARGET_OS2
31#define UINT_MAX (~0U)
32#pragma pack(1)
33#endif
34struct regcache_rbtree_node {
35 /* block of adjacent registers */
36 void *block;
37 /* Which registers are present */
38 unsigned long *cache_present;
39 /* base register handled by this block */
40 unsigned int base_reg;
41 /* number of registers available in the block */
42 unsigned int blklen;
43 /* the actual rbtree node holding this block */
44 struct rb_node node;
45#ifndef TARGET_OS2
46} __attribute__ ((packed));
47#else
48};
49#pragma pack()
50#endif
51
52struct regcache_rbtree_ctx {
53 struct rb_root root;
54 struct regcache_rbtree_node *cached_rbnode;
55};
56
57/*static inline*/ void regcache_rbtree_get_base_top_reg(
58 struct regmap *map,
59 struct regcache_rbtree_node *rbnode,
60 unsigned int *base, unsigned int *top)
61{
62 *base = rbnode->base_reg;
63 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
64}
65
66/*static*/ unsigned int regcache_rbtree_get_register(struct regmap *map,
67 struct regcache_rbtree_node *rbnode, unsigned int idx)
68{
69 return regcache_get_val(map, rbnode->block, idx);
70}
71
72/*static*/ void regcache_rbtree_set_register(struct regmap *map,
73 struct regcache_rbtree_node *rbnode,
74 unsigned int idx, unsigned int val)
75{
76 set_bit(idx, rbnode->cache_present);
77 regcache_set_val(map, rbnode->block, idx, val);
78}
79
80/*static*/ struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
81 unsigned int reg)
82{
83 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
84 struct rb_node *node;
85 struct regcache_rbtree_node *rbnode;
86 unsigned int base_reg, top_reg;
87
88 rbnode = rbtree_ctx->cached_rbnode;
89 if (rbnode) {
90 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
91 &top_reg);
92 if (reg >= base_reg && reg <= top_reg)
93 return rbnode;
94 }
95
96 node = rbtree_ctx->root.rb_node;
97 while (node) {
98 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
99 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
100 &top_reg);
101 if (reg >= base_reg && reg <= top_reg) {
102 rbtree_ctx->cached_rbnode = rbnode;
103 return rbnode;
104 } else if (reg > top_reg) {
105 node = node->rb_right;
106 } else if (reg < base_reg) {
107 node = node->rb_left;
108 }
109 }
110 return NULL;
111}
112
113/*static*/ int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
114 struct regcache_rbtree_node *rbnode)
115{
116 struct rb_node **new, *parent;
117 struct regcache_rbtree_node *rbnode_tmp;
118 unsigned int base_reg_tmp, top_reg_tmp;
119 unsigned int base_reg;
120
121 parent = NULL;
122 new = &root->rb_node;
123
124 while (*new) {
125 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
126 /* base and top registers of the current rbnode */
127 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
128 &top_reg_tmp);
129 /* base register of the rbnode to be added */
130 base_reg = rbnode->base_reg;
131 parent = *new;
132 /* if this register has already been inserted, just return */
133 if (base_reg >= base_reg_tmp &&
134 base_reg <= top_reg_tmp)
135 return 0;
136 else if (base_reg > top_reg_tmp)
137 new = &((*new)->rb_right);
138 else if (base_reg < base_reg_tmp)
139 new = &((*new)->rb_left);
140 }
141
142 /* insert the node into the rbtree */
143 rb_link_node(&rbnode->node, parent, new);
144 rb_insert_color(&rbnode->node, root);
145
146 return 1;
147}
148
149#ifdef CONFIG_DEBUG_FS
150/*static*/ int rbtree_show(struct seq_file *s, void *ignored)
151{
152 struct regmap *map = s->private;
153 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
154 struct regcache_rbtree_node *n;
155 struct rb_node *node;
156 unsigned int base, top;
157 size_t mem_size;
158 int nodes = 0;
159 int registers = 0;
160 int this_registers, average;
161
162 map->lock(map->lock_arg);
163
164 mem_size = sizeof(*rbtree_ctx);
165
166 for (node = rb_first(&rbtree_ctx->root); node != NULL;
167 node = rb_next(node)) {
168 n = rb_entry(node, struct regcache_rbtree_node, node);
169 mem_size += sizeof(*n);
170 mem_size += (n->blklen * map->cache_word_size);
171 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
172
173 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
174 this_registers = ((top - base) / map->reg_stride) + 1;
175 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
176
177 nodes++;
178 registers += this_registers;
179 }
180
181 if (nodes)
182 average = registers / nodes;
183 else
184 average = 0;
185
186 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
187 nodes, registers, average, mem_size);
188
189 map->unlock(map->lock_arg);
190
191 return 0;
192}
193
194/*static*/ int rbtree_open(struct inode *inode, struct file *file)
195{
196 return single_open(file, rbtree_show, inode->i_private);
197}
198
199/*static*/ const struct file_operations rbtree_fops = {
200 .open = rbtree_open,
201 .read = seq_read,
202 .llseek = seq_lseek,
203 .release = single_release,
204};
205
206/*static*/ void rbtree_debugfs_init(struct regmap *map)
207{
208 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
209}
210#endif
211
212/*static*/ int regcache_rbtree_init(struct regmap *map)
213{
214 struct regcache_rbtree_ctx *rbtree_ctx;
215 int i;
216 int ret;
217#ifdef TARGET_OS2
218 // 2020-11-17 SHL FIXME patched struct rb_root
219 struct rb_root _RB_ROOT = { NULL, };
220#endif
221
222 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
223 if (!map->cache)
224 return -ENOMEM;
225
226 rbtree_ctx = map->cache;
227#ifndef TARGET_OS2
228 rbtree_ctx->root = RB_ROOT;
229#else
230 rbtree_ctx->root = _RB_ROOT;
231 rbtree_ctx->root.rb_node = NULL;
232 memset(&rbtree_ctx->root, 0, sizeof(struct rb_root));
233#endif
234 rbtree_ctx->cached_rbnode = NULL;
235 for (i = 0; i < map->num_reg_defaults; i++) {
236 ret = regcache_rbtree_write(map,
237 map->reg_defaults[i].reg,
238 map->reg_defaults[i].def);
239 if (ret)
240 goto err;
241 }
242
243 return 0;
244
245err:
246 regcache_rbtree_exit(map);
247 return ret;
248}
249
250/*static*/ int regcache_rbtree_exit(struct regmap *map)
251{
252 struct rb_node *next;
253 struct regcache_rbtree_ctx *rbtree_ctx;
254 struct regcache_rbtree_node *rbtree_node;
255
256 /* if we've already been called then just return */
257 rbtree_ctx = map->cache;
258 if (!rbtree_ctx)
259 return 0;
260
261 /* free up the rbtree */
262 next = rb_first(&rbtree_ctx->root);
263 while (next) {
264 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
265 next = rb_next(&rbtree_node->node);
266 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
267 kfree(rbtree_node->cache_present);
268 kfree(rbtree_node->block);
269 kfree(rbtree_node);
270 }
271
272 /* release the resources */
273 kfree(map->cache);
274 map->cache = NULL;
275
276 return 0;
277}
278
279/*static*/ int regcache_rbtree_read(struct regmap *map,
280 unsigned int reg, unsigned int *value)
281{
282 struct regcache_rbtree_node *rbnode;
283 unsigned int reg_tmp;
284
285 rbnode = regcache_rbtree_lookup(map, reg);
286 if (rbnode) {
287 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
288 if (!test_bit(reg_tmp, rbnode->cache_present))
289 return -ENOENT;
290 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
291 } else {
292 return -ENOENT;
293 }
294 return 0;
295}
296
297
298/*static*/ int regcache_rbtree_insert_to_block(struct regmap *map,
299 struct regcache_rbtree_node *rbnode,
300 unsigned int base_reg,
301 unsigned int top_reg,
302 unsigned int reg,
303 unsigned int value)
304{
305 unsigned int blklen;
306 unsigned int pos, offset;
307 unsigned long *present;
308 u8 *blk;
309
310 blklen = (top_reg - base_reg) / map->reg_stride + 1;
311 pos = (reg - base_reg) / map->reg_stride;
312 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
313
314 blk = krealloc(rbnode->block,
315 blklen * map->cache_word_size,
316 GFP_KERNEL);
317 if (!blk)
318 return -ENOMEM;
319
320 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
321 present = krealloc(rbnode->cache_present,
322 BITS_TO_LONGS(blklen) * sizeof(*present),
323 GFP_KERNEL);
324 if (!present) {
325 kfree(blk);
326 return -ENOMEM;
327 }
328
329 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
330 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
331 * sizeof(*present));
332 } else {
333 present = rbnode->cache_present;
334 }
335
336 /* insert the register value in the correct place in the rbnode block */
337 if (pos == 0) {
338 memmove(blk + offset * map->cache_word_size,
339 blk, rbnode->blklen * map->cache_word_size);
340 bitmap_shift_left(present, present, offset, blklen);
341 }
342
343 /* update the rbnode block, its size and the base register */
344 rbnode->block = blk;
345 rbnode->blklen = blklen;
346 rbnode->base_reg = base_reg;
347 rbnode->cache_present = present;
348
349 regcache_rbtree_set_register(map, rbnode, pos, value);
350 return 0;
351}
352
353/*static*/ struct regcache_rbtree_node *
354regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
355{
356 struct regcache_rbtree_node *rbnode;
357 const struct regmap_range *range;
358 int i;
359
360 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
361 if (!rbnode)
362 return NULL;
363
364 /* If there is a read table then use it to guess at an allocation */
365 if (map->rd_table) {
366 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
367 if (regmap_reg_in_range(reg,
368 &map->rd_table->yes_ranges[i]))
369 break;
370 }
371 if (i != map->rd_table->n_yes_ranges) {
372 range = &map->rd_table->yes_ranges[i];
373 rbnode->blklen = (range->range_max - range->range_min) /
374 map->reg_stride + 1;
375 rbnode->base_reg = range->range_min;
376 }
377 }
378 if (!rbnode->blklen) {
379 rbnode->blklen = 1;
380 rbnode->base_reg = reg;
381 }
382
383 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
384 GFP_KERNEL);
385 if (!rbnode->block)
386 goto err_free;
387
388 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
389 sizeof(*rbnode->cache_present),
390 GFP_KERNEL);
391 if (!rbnode->cache_present)
392 goto err_free_block;
393
394 return rbnode;
395
396err_free_block:
397 kfree(rbnode->block);
398err_free:
399 kfree(rbnode);
400 return NULL;
401}
402
403/*static*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
404 unsigned int value)
405{
406 struct regcache_rbtree_ctx *rbtree_ctx;
407 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
408 struct rb_node *node;
409 unsigned int reg_tmp;
410 int ret;
411
412 rbtree_ctx = map->cache;
413
414 /* if we can't locate it in the cached rbnode we'll have
415 * to traverse the rbtree looking for it.
416 */
417 rbnode = regcache_rbtree_lookup(map, reg);
418 if (rbnode) {
419 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
420 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
421 } else {
422 unsigned int base_reg, top_reg;
423 unsigned int new_base_reg, new_top_reg;
424 unsigned int min, max;
425 unsigned int max_dist;
426 unsigned int dist, best_dist = UINT_MAX;
427
428 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
429 map->cache_word_size;
430 if (reg < max_dist)
431 min = 0;
432 else
433 min = reg - max_dist;
434 max = reg + max_dist;
435
436 /* look for an adjacent register to the one we are about to add */
437 node = rbtree_ctx->root.rb_node;
438 while (node) {
439 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
440 node);
441
442 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
443 &base_reg, &top_reg);
444
445 if (base_reg <= max && top_reg >= min) {
446 if (reg < base_reg)
447 dist = base_reg - reg;
448 else if (reg > top_reg)
449 dist = reg - top_reg;
450 else
451 dist = 0;
452 if (dist < best_dist) {
453 rbnode = rbnode_tmp;
454 best_dist = dist;
455 new_base_reg = min(reg, base_reg);
456 new_top_reg = max(reg, top_reg);
457 }
458 }
459
460 /*
461 * Keep looking, we want to choose the closest block,
462 * otherwise we might end up creating overlapping
463 * blocks, which breaks the rbtree.
464 */
465 if (reg < base_reg)
466 node = node->rb_left;
467 else if (reg > top_reg)
468 node = node->rb_right;
469 else
470 break;
471 }
472
473 if (rbnode) {
474 ret = regcache_rbtree_insert_to_block(map, rbnode,
475 new_base_reg,
476 new_top_reg, reg,
477 value);
478 if (ret){
479 return ret;}
480 rbtree_ctx->cached_rbnode = rbnode;
481 return 0;
482 }
483
484 /* We did not manage to find a place to insert it in
485 * an existing block so create a new rbnode.
486 */
487 rbnode = regcache_rbtree_node_alloc(map, reg);
488 if (!rbnode){
489 return -ENOMEM;}
490 regcache_rbtree_set_register(map, rbnode,
491 reg - rbnode->base_reg, value);
492 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
493 rbtree_ctx->cached_rbnode = rbnode;
494 }
495 return 0;
496}
497
498/*static*/ int regcache_rbtree_sync(struct regmap *map, unsigned int min,
499 unsigned int max)
500{
501 struct regcache_rbtree_ctx *rbtree_ctx;
502 struct rb_node *node;
503 struct regcache_rbtree_node *rbnode;
504 unsigned int base_reg, top_reg;
505 unsigned int start, end;
506 int ret;
507
508 rbtree_ctx = map->cache;
509 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
510 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
511
512 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
513 &top_reg);
514 if (base_reg > max)
515 break;
516 if (top_reg < min)
517 continue;
518
519 if (min > base_reg)
520 start = (min - base_reg) / map->reg_stride;
521 else
522 start = 0;
523
524 if (max < top_reg)
525 end = (max - base_reg) / map->reg_stride + 1;
526 else
527 end = rbnode->blklen;
528
529 ret = regcache_sync_block(map, rbnode->block,
530 rbnode->cache_present,
531 rbnode->base_reg, start, end);
532 if (ret != 0)
533 return ret;
534 }
535
536 return regmap_async_complete(map);
537}
538
539/*static*/ int regcache_rbtree_drop(struct regmap *map, unsigned int min,
540 unsigned int max)
541{
542 struct regcache_rbtree_ctx *rbtree_ctx;
543 struct regcache_rbtree_node *rbnode;
544 struct rb_node *node;
545 unsigned int base_reg, top_reg;
546 unsigned int start, end;
547
548 rbtree_ctx = map->cache;
549 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
550 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
551
552 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
553 &top_reg);
554 if (base_reg > max)
555 break;
556 if (top_reg < min)
557 continue;
558
559 if (min > base_reg)
560 start = (min - base_reg) / map->reg_stride;
561 else
562 start = 0;
563
564 if (max < top_reg)
565 end = (max - base_reg) / map->reg_stride + 1;
566 else
567 end = rbnode->blklen;
568
569 bitmap_clear(rbnode->cache_present, start, end - start);
570 }
571
572 return 0;
573}
574
575struct regcache_ops regcache_rbtree_ops = {
576 .type = REGCACHE_RBTREE,
577 .name = "rbtree",
578 .init = regcache_rbtree_init,
579 .exit = regcache_rbtree_exit,
580#ifdef CONFIG_DEBUG_FS
581 .debugfs_init = rbtree_debugfs_init,
582#endif
583 .read = regcache_rbtree_read,
584 .write = regcache_rbtree_write,
585 .sync = regcache_rbtree_sync,
586 .drop = regcache_rbtree_drop,
587};
Note: See TracBrowser for help on using the repository browser.