source: GPL/branches/uniaud32-next/lib32/regcache-rbtree.c@ 615

Last change on this file since 615 was 615, checked in by Paul Smedley, 5 years ago

Add source for uniaud32 based on code from linux kernel 5.4.86

File size: 16.1 KB
Line 
1/*
2 * Register cache access API - rbtree caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12/* from 4.14.202 */
13
14//#include <linux/debugfs.h>
15#include <linux/device.h>
16#include <linux/rbtree.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include <linux/byteorder/little_endian.h>
22#include <linux/printk.h>
23
24#include "internal.h"
25
26/*static*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
27 unsigned int value);
28/*static*/ int regcache_rbtree_exit(struct regmap *map);
29
30#ifdef TARGET_OS2
31#define UINT_MAX (~0U)
32#pragma pack(1)
33#endif
34struct regcache_rbtree_node {
35 /* block of adjacent registers */
36 void *block;
37 /* Which registers are present */
38 unsigned long *cache_present;
39 /* base register handled by this block */
40 unsigned int base_reg;
41 /* number of registers available in the block */
42 unsigned int blklen;
43 /* the actual rbtree node holding this block */
44 struct rb_node node;
45#ifndef TARGET_OS2
46} __attribute__ ((packed));
47#else
48};
49#pragma pack()
50#endif
51
52struct regcache_rbtree_ctx {
53 struct rb_root root;
54 struct regcache_rbtree_node *cached_rbnode;
55};
56
57/*static inline*/ void regcache_rbtree_get_base_top_reg(
58 struct regmap *map,
59 struct regcache_rbtree_node *rbnode,
60 unsigned int *base, unsigned int *top)
61{
62 *base = rbnode->base_reg;
63 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
64}
65
66/*static*/ unsigned int regcache_rbtree_get_register(struct regmap *map,
67 struct regcache_rbtree_node *rbnode, unsigned int idx)
68{
69 return regcache_get_val(map, rbnode->block, idx);
70}
71
72/*static*/ void regcache_rbtree_set_register(struct regmap *map,
73 struct regcache_rbtree_node *rbnode,
74 unsigned int idx, unsigned int val)
75{
76 set_bit(idx, rbnode->cache_present);
77 regcache_set_val(map, rbnode->block, idx, val);
78}
79
80/*static*/ struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
81 unsigned int reg)
82{
83 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
84 struct rb_node *node;
85 struct regcache_rbtree_node *rbnode;
86 unsigned int base_reg, top_reg;
87//pr_warn("regcache_rbtree_lookup");
88 rbnode = rbtree_ctx->cached_rbnode;
89 if (rbnode) {
90 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
91 &top_reg);
92//pr_warn("regcache_rbtree_lookup reg = %u, base_reg = %u, top_reg = %u", reg, base_reg, top_reg);
93 if (reg >= base_reg && reg <= top_reg)
94 return rbnode;
95 }
96
97 node = rbtree_ctx->root.rb_node;
98 while (node) {
99 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
100 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
101 &top_reg);
102//pr_warn("regcache_rbtree_lookup2 reg = %x, base_reg = %x, top_reg = %x", reg, base_reg, top_reg);
103 if (reg >= base_reg && reg <= top_reg) {
104 rbtree_ctx->cached_rbnode = rbnode;
105 return rbnode;
106 } else if (reg > top_reg) {
107 node = node->rb_right;
108 } else if (reg < base_reg) {
109 node = node->rb_left;
110 }
111 }
112//pr_warn("regcache_rbtree_lookup - NULL");
113 return NULL;
114}
115
116/*static*/ int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
117 struct regcache_rbtree_node *rbnode)
118{
119 struct rb_node **new, *parent;
120 struct regcache_rbtree_node *rbnode_tmp;
121 unsigned int base_reg_tmp, top_reg_tmp;
122 unsigned int base_reg;
123//pr_warn("regcache_rbtree_insert");
124 parent = NULL;
125 new = &root->rb_node;
126//pr_warn("regcache_rbtree_insert - new = %x",new);
127 while (*new) {
128//pr_warn("regcache_rbtree_insert2");
129 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
130 /* base and top registers of the current rbnode */
131 regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
132 &top_reg_tmp);
133 /* base register of the rbnode to be added */
134 base_reg = rbnode->base_reg;
135 parent = *new;
136 /* if this register has already been inserted, just return */
137//pr_warn("regcache_rbtree_insert - base_reg = %u, base_reg_tmp = %u, top_reg_tmp = %u", base_reg, base_reg_tmp, top_reg_tmp);
138 if (base_reg >= base_reg_tmp &&
139 base_reg <= top_reg_tmp)
140 return 0;
141 else if (base_reg > top_reg_tmp)
142 new = &((*new)->rb_right);
143 else if (base_reg < base_reg_tmp)
144 new = &((*new)->rb_left);
145 }
146
147 /* insert the node into the rbtree */
148 rb_link_node(&rbnode->node, parent, new);
149 rb_insert_color(&rbnode->node, root);
150
151 return 1;
152}
153
154#ifdef CONFIG_DEBUG_FS
155/*static*/ int rbtree_show(struct seq_file *s, void *ignored)
156{
157 struct regmap *map = s->private;
158 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
159 struct regcache_rbtree_node *n;
160 struct rb_node *node;
161 unsigned int base, top;
162 size_t mem_size;
163 int nodes = 0;
164 int registers = 0;
165 int this_registers, average;
166
167 map->lock(map->lock_arg);
168
169 mem_size = sizeof(*rbtree_ctx);
170
171 for (node = rb_first(&rbtree_ctx->root); node != NULL;
172 node = rb_next(node)) {
173 n = rb_entry(node, struct regcache_rbtree_node, node);
174 mem_size += sizeof(*n);
175 mem_size += (n->blklen * map->cache_word_size);
176 mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
177
178 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
179 this_registers = ((top - base) / map->reg_stride) + 1;
180 seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
181
182 nodes++;
183 registers += this_registers;
184 }
185
186 if (nodes)
187 average = registers / nodes;
188 else
189 average = 0;
190
191 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
192 nodes, registers, average, mem_size);
193
194 map->unlock(map->lock_arg);
195
196 return 0;
197}
198
199/*static*/ int rbtree_open(struct inode *inode, struct file *file)
200{
201 return single_open(file, rbtree_show, inode->i_private);
202}
203
204/*static*/ const struct file_operations rbtree_fops = {
205 .open = rbtree_open,
206 .read = seq_read,
207 .llseek = seq_lseek,
208 .release = single_release,
209};
210
211/*static*/ void rbtree_debugfs_init(struct regmap *map)
212{
213 debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
214}
215#endif
216
217/*static*/ int regcache_rbtree_init(struct regmap *map)
218{
219 struct regcache_rbtree_ctx *rbtree_ctx;
220 int i;
221 int ret;
222//pr_warn("regcache_rbtree_init");
223#ifdef TARGET_OS2
224 // 2020-11-17 SHL FIXME patched struct rb_root
225 struct rb_root _RB_ROOT = { NULL, };
226#endif
227
228 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
229 if (!map->cache)
230 return -ENOMEM;
231
232 rbtree_ctx = map->cache;
233#ifndef TARGET_OS2
234 rbtree_ctx->root = RB_ROOT;
235#else
236 rbtree_ctx->root = _RB_ROOT;
237 rbtree_ctx->root.rb_node = NULL;
238 memset(&rbtree_ctx->root, 0, sizeof(struct rb_root));
239#endif
240 rbtree_ctx->cached_rbnode = NULL;
241//pr_warn("regcache_rbtree_init - num_reg_defaults = %x",map->num_reg_defaults);
242 for (i = 0; i < map->num_reg_defaults; i++) {
243 ret = regcache_rbtree_write(map,
244 map->reg_defaults[i].reg,
245 map->reg_defaults[i].def);
246 if (ret)
247 goto err;
248 }
249
250 return 0;
251
252err:
253 regcache_rbtree_exit(map);
254 return ret;
255}
256
257/*static*/ int regcache_rbtree_exit(struct regmap *map)
258{
259 struct rb_node *next;
260 struct regcache_rbtree_ctx *rbtree_ctx;
261 struct regcache_rbtree_node *rbtree_node;
262
263 /* if we've already been called then just return */
264 rbtree_ctx = map->cache;
265 if (!rbtree_ctx)
266 return 0;
267
268 /* free up the rbtree */
269 next = rb_first(&rbtree_ctx->root);
270 while (next) {
271 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
272 next = rb_next(&rbtree_node->node);
273 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
274 kfree(rbtree_node->cache_present);
275 kfree(rbtree_node->block);
276 kfree(rbtree_node);
277 }
278
279 /* release the resources */
280 kfree(map->cache);
281 map->cache = NULL;
282
283 return 0;
284}
285
286/*static*/ int regcache_rbtree_read(struct regmap *map,
287 unsigned int reg, unsigned int *value)
288{
289 struct regcache_rbtree_node *rbnode;
290 unsigned int reg_tmp;
291//pr_warn("regcache_rbtree_read");
292 rbnode = regcache_rbtree_lookup(map, reg);
293 if (rbnode) {
294 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
295 if (!test_bit(reg_tmp, rbnode->cache_present)){
296//pr_warn("regcache_rbtree_read - -ENOENT");
297 return -ENOENT;}
298 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
299 } else {
300//pr_warn("regcache_rbtree_read2 - -ENOENT");
301 return -ENOENT;
302 }
303//pr_warn("regcache_rbtree_read - success");
304 return 0;
305}
306
307
308/*static*/ int regcache_rbtree_insert_to_block(struct regmap *map,
309 struct regcache_rbtree_node *rbnode,
310 unsigned int base_reg,
311 unsigned int top_reg,
312 unsigned int reg,
313 unsigned int value)
314{
315 unsigned int blklen;
316 unsigned int pos, offset;
317 unsigned long *present;
318 u8 *blk;
319//pr_warn("regcache_rbtree_insert_to_block");
320 blklen = (top_reg - base_reg) / map->reg_stride + 1;
321 pos = (reg - base_reg) / map->reg_stride;
322 offset = (rbnode->base_reg - base_reg) / map->reg_stride;
323
324 blk = krealloc(rbnode->block,
325 blklen * map->cache_word_size,
326 GFP_KERNEL);
327 if (!blk)
328 return -ENOMEM;
329
330 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
331 present = krealloc(rbnode->cache_present,
332 BITS_TO_LONGS(blklen) * sizeof(*present),
333 GFP_KERNEL);
334 if (!present) {
335 kfree(blk);
336 return -ENOMEM;
337 }
338
339 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
340 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
341 * sizeof(*present));
342 } else {
343 present = rbnode->cache_present;
344 }
345
346 /* insert the register value in the correct place in the rbnode block */
347 if (pos == 0) {
348 memmove(blk + offset * map->cache_word_size,
349 blk, rbnode->blklen * map->cache_word_size);
350 bitmap_shift_left(present, present, offset, blklen);
351 }
352
353 /* update the rbnode block, its size and the base register */
354 rbnode->block = blk;
355 rbnode->blklen = blklen;
356 rbnode->base_reg = base_reg;
357 rbnode->cache_present = present;
358
359 regcache_rbtree_set_register(map, rbnode, pos, value);
360 return 0;
361}
362
363/*static*/ struct regcache_rbtree_node *
364regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
365{
366 struct regcache_rbtree_node *rbnode;
367 const struct regmap_range *range;
368 int i;
369//pr_warn("regcache_rbtree_node_alloc");
370 rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
371 if (!rbnode)
372 return NULL;
373//pr_warn("regcache_rbtree_node_alloc2");
374 /* If there is a read table then use it to guess at an allocation */
375 if (map->rd_table) {
376//pr_warn("regcache_rbtree_node_alloc3");
377 for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
378 if (regmap_reg_in_range(reg,
379 &map->rd_table->yes_ranges[i]))
380 break;
381 }
382//pr_warn("regcache_rbtree_node_alloc4");
383 if (i != map->rd_table->n_yes_ranges) {
384 range = &map->rd_table->yes_ranges[i];
385 rbnode->blklen = (range->range_max - range->range_min) /
386 map->reg_stride + 1;
387 rbnode->base_reg = range->range_min;
388 }
389 }
390 if (!rbnode->blklen) {
391//pr_warn("regcache_rbtree_node_alloc5");
392 rbnode->blklen = 1;
393 rbnode->base_reg = reg;
394 }
395
396 rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
397 GFP_KERNEL);
398 if (!rbnode->block)
399 goto err_free;
400
401 rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
402 sizeof(*rbnode->cache_present),
403 GFP_KERNEL);
404 if (!rbnode->cache_present)
405 goto err_free_block;
406
407 return rbnode;
408
409err_free_block:
410 kfree(rbnode->block);
411err_free:
412 kfree(rbnode);
413 return NULL;
414}
415
416/*static*/ int regcache_rbtree_write(struct regmap *map, unsigned int reg,
417 unsigned int value)
418{
419 struct regcache_rbtree_ctx *rbtree_ctx;
420 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
421 struct rb_node *node;
422 unsigned int reg_tmp;
423 int ret;
424//pr_warn("regcache_rbtree_write, reg = %u, value = %u", reg, value);
425 rbtree_ctx = map->cache;
426
427 /* if we can't locate it in the cached rbnode we'll have
428 * to traverse the rbtree looking for it.
429 */
430 rbnode = regcache_rbtree_lookup(map, reg);
431//pr_warn("regcache_rbtree_write - rbnode = %x",rbnode);
432 if (rbnode) {
433//pr_warn("regcache_rbtree_write - got rbnode");
434 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
435 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
436 } else {
437//pr_warn("regcache_rbtree_write - no rbnode");
438 unsigned int base_reg, top_reg;
439 unsigned int new_base_reg, new_top_reg;
440 unsigned int min, max;
441 unsigned int max_dist;
442 unsigned int dist, best_dist = UINT_MAX;
443
444 max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
445 map->cache_word_size;
446 if (reg < max_dist)
447 min = 0;
448 else
449 min = reg - max_dist;
450 max = reg + max_dist;
451
452 /* look for an adjacent register to the one we are about to add */
453 node = rbtree_ctx->root.rb_node;
454 while (node) {
455//pr_warn("regcache_rbtree_write - no rbnode2");
456 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
457 node);
458
459 regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
460 &base_reg, &top_reg);
461
462 if (base_reg <= max && top_reg >= min) {
463 if (reg < base_reg)
464 dist = base_reg - reg;
465 else if (reg > top_reg)
466 dist = reg - top_reg;
467 else
468 dist = 0;
469 if (dist < best_dist) {
470 rbnode = rbnode_tmp;
471 best_dist = dist;
472 new_base_reg = min(reg, base_reg);
473 new_top_reg = max(reg, top_reg);
474 }
475 }
476
477 /*
478 * Keep looking, we want to choose the closest block,
479 * otherwise we might end up creating overlapping
480 * blocks, which breaks the rbtree.
481 */
482 if (reg < base_reg)
483 node = node->rb_left;
484 else if (reg > top_reg)
485 node = node->rb_right;
486 else
487 break;
488 }
489
490 if (rbnode) {
491 ret = regcache_rbtree_insert_to_block(map, rbnode,
492 new_base_reg,
493 new_top_reg, reg,
494 value);
495 if (ret){
496//pr_warn("regcache_write - ret from regcache_rbtree_insert_to_block = %d",ret);
497 return ret;}
498 rbtree_ctx->cached_rbnode = rbnode;
499//pr_warn("regcache_write - rbtree_ctx->cached_rbnode = %x",rbtree_ctx->cached_rbnode);
500 return 0;
501 }
502
503 /* We did not manage to find a place to insert it in
504 * an existing block so create a new rbnode.
505 */
506 rbnode = regcache_rbtree_node_alloc(map, reg);
507 if (!rbnode){
508//pr_warn("regcache_write - ENOMEM");
509 return -ENOMEM;}
510 regcache_rbtree_set_register(map, rbnode,
511 reg - rbnode->base_reg, value);
512 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
513 rbtree_ctx->cached_rbnode = rbnode;
514 }
515//pr_warn("regcache_write - rbtree_ctx->cached_rbnode2 = %x",rbtree_ctx->cached_rbnode);
516 return 0;
517}
518
519/*static*/ int regcache_rbtree_sync(struct regmap *map, unsigned int min,
520 unsigned int max)
521{
522 struct regcache_rbtree_ctx *rbtree_ctx;
523 struct rb_node *node;
524 struct regcache_rbtree_node *rbnode;
525 unsigned int base_reg, top_reg;
526 unsigned int start, end;
527 int ret;
528
529 rbtree_ctx = map->cache;
530 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
531 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
532
533 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
534 &top_reg);
535 if (base_reg > max)
536 break;
537 if (top_reg < min)
538 continue;
539
540 if (min > base_reg)
541 start = (min - base_reg) / map->reg_stride;
542 else
543 start = 0;
544
545 if (max < top_reg)
546 end = (max - base_reg) / map->reg_stride + 1;
547 else
548 end = rbnode->blklen;
549
550 ret = regcache_sync_block(map, rbnode->block,
551 rbnode->cache_present,
552 rbnode->base_reg, start, end);
553 if (ret != 0)
554 return ret;
555 }
556
557 return regmap_async_complete(map);
558}
559
560/*static*/ int regcache_rbtree_drop(struct regmap *map, unsigned int min,
561 unsigned int max)
562{
563 struct regcache_rbtree_ctx *rbtree_ctx;
564 struct regcache_rbtree_node *rbnode;
565 struct rb_node *node;
566 unsigned int base_reg, top_reg;
567 unsigned int start, end;
568
569 rbtree_ctx = map->cache;
570 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
571 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
572
573 regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
574 &top_reg);
575 if (base_reg > max)
576 break;
577 if (top_reg < min)
578 continue;
579
580 if (min > base_reg)
581 start = (min - base_reg) / map->reg_stride;
582 else
583 start = 0;
584
585 if (max < top_reg)
586 end = (max - base_reg) / map->reg_stride + 1;
587 else
588 end = rbnode->blklen;
589
590 bitmap_clear(rbnode->cache_present, start, end - start);
591 }
592
593 return 0;
594}
595
596struct regcache_ops regcache_rbtree_ops = {
597 .type = REGCACHE_RBTREE,
598 .name = "rbtree",
599 .init = regcache_rbtree_init,
600 .exit = regcache_rbtree_exit,
601#ifdef CONFIG_DEBUG_FS
602 .debugfs_init = rbtree_debugfs_init,
603#endif
604 .read = regcache_rbtree_read,
605 .write = regcache_rbtree_write,
606 .sync = regcache_rbtree_sync,
607 .drop = regcache_rbtree_drop,
608};
Note: See TracBrowser for help on using the repository browser.