Changeset 651
- Timestamp:
- Jan 23, 2021, 10:25:24 PM (5 years ago)
- Location:
- GPL/branches/uniaud32-next
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
GPL/branches/uniaud32-next/include/linux/rbtree.h
r625 r651 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 1 2 /* 2 3 Red Black Trees 3 4 (C) 1999 Andrea Arcangeli <andrea@suse.de> 4 5 5 This program is free software; you can redistribute it and/or modify6 it under the terms of the GNU General Public License as published by7 the Free Software Foundation; either version 2 of the License, or8 (at your option) any later version.9 10 This program is distributed in the hope that it will be useful,11 but WITHOUT ANY WARRANTY; without even the implied warranty of12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the13 GNU General Public License for more details.14 15 You should have received a copy of the GNU General Public License16 along with this program; if not, write to the Free Software17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA18 6 19 7 linux/include/linux/rbtree.h … … 24 12 performances and genericity... 25 13 26 See Documentation/ rbtree.txt for documentation and samples.14 See Documentation/core-api/rbtree.rst for documentation and samples. 27 15 */ 28 /* from 4.19.163*/16 /* from 5.10.10 */ 29 17 30 18 #ifndef _LINUX_RBTREE_H … … 46 34 }; 47 35 48 /*49 * Leftmost-cached rbtrees.50 *51 * We do not cache the rightmost node based on footprint52 * size vs number of potential users that could benefit53 * from O(1) rb_last(). Just not worth it, users that want54 * this feature can always implement the logic explicitly.55 * Furthermore, users that want to cache both pointers may56 * find it a bit asymmetric, but that's ok.57 */58 struct rb_root_cached {59 struct rb_root rb_root;60 struct rb_node *rb_leftmost;61 };62 63 36 #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) 64 37 65 38 #define RB_ROOT (struct rb_root) { NULL, } 66 #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }67 39 #define rb_entry(ptr, type, member) container_of(ptr, type, member) 68 40 … … 86 58 extern struct rb_node *rb_last(const struct rb_root *); 87 59 88 extern void rb_insert_color_cached(struct rb_node *,89 struct rb_root_cached *, bool);90 extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *);91 /* Same as rb_first(), but O(1) */92 #define rb_first_cached(root) (root)->rb_leftmost93 94 60 /* Postorder iteration - always visit the parent after its children */ 95 61 extern struct rb_node *rb_first_postorder(const struct rb_root *); … … 101 67 extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, 102 68 struct rb_root *root); 103 extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,104 struct rb_root_cached *root);105 69 106 70 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, … … 152 116 pos = n) 153 117 118 /* 119 * Leftmost-cached rbtrees. 120 * 121 * We do not cache the rightmost node based on footprint 122 * size vs number of potential users that could benefit 123 * from O(1) rb_last(). Just not worth it, users that want 124 * this feature can always implement the logic explicitly. 125 * Furthermore, users that want to cache both pointers may 126 * find it a bit asymmetric, but that's ok. 127 */ 128 struct rb_root_cached { 129 struct rb_root rb_root; 130 struct rb_node *rb_leftmost; 131 }; 132 133 #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } 134 135 /* Same as rb_first(), but O(1) */ 136 #define rb_first_cached(root) (root)->rb_leftmost 137 138 static inline void rb_insert_color_cached(struct rb_node *node, 139 struct rb_root_cached *root, 140 bool leftmost) 141 { 142 if (leftmost) 143 root->rb_leftmost = node; 144 rb_insert_color(node, &root->rb_root); 145 } 146 147 static inline void rb_erase_cached(struct rb_node *node, 148 struct rb_root_cached *root) 149 { 150 if (root->rb_leftmost == node) 151 root->rb_leftmost = rb_next(node); 152 rb_erase(node, &root->rb_root); 153 } 154 155 static inline void rb_replace_node_cached(struct rb_node *victim, 156 struct rb_node *new, 157 struct rb_root_cached *root) 158 { 159 if (root->rb_leftmost == victim) 160 root->rb_leftmost = new; 161 rb_replace_node(victim, new, &root->rb_root); 162 } 163 154 164 #endif /* _LINUX_RBTREE_H */ -
GPL/branches/uniaud32-next/include/linux/rbtree_augmented.h
r625 r651 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 1 2 /* 2 3 Red Black Trees … … 5 6 (C) 2012 Michel Lespinasse <walken@google.com> 6 7 7 This program is free software; you can redistribute it and/or modify8 it under the terms of the GNU General Public License as published by9 the Free Software Foundation; either version 2 of the License, or10 (at your option) any later version.11 12 This program is distributed in the hope that it will be useful,13 but WITHOUT ANY WARRANTY; without even the implied warranty of14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the15 GNU General Public License for more details.16 17 You should have received a copy of the GNU General Public License18 along with this program; if not, write to the Free Software19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA20 8 21 9 linux/include/linux/rbtree_augmented.h 22 10 */ 23 /* from 4.19.163 */ 11 12 /* from 5.10.10 */ 24 13 25 14 #ifndef _LINUX_RBTREE_AUGMENTED_H … … 35 24 * The rest are implementation details you are not expected to depend on. 36 25 * 37 * See Documentation/ rbtree.txt for documentation and samples.26 * See Documentation/core-api/rbtree.rst for documentation and samples. 38 27 */ 39 28 … … 44 33 }; 45 34 46 extern void __rb_insert_augmented(struct rb_node *node, 47 struct rb_root *root, 48 bool newleft, struct rb_node **leftmost, 35 extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 49 36 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 37 50 38 /* 51 39 * Fixup the rbtree and update the augmented information when rebalancing. … … 53 41 * On insertion, the user must update the augmented information on the path 54 42 * leading to the inserted node, then call rb_link_node() as usual and 55 * rb_ augment_inserted() instead of the usual rb_insert_color() call.56 * If rb_ augment_inserted() rebalances the rbtree, it will callback into43 * rb_insert_augmented() instead of the usual rb_insert_color() call. 44 * If rb_insert_augmented() rebalances the rbtree, it will callback into 57 45 * a user provided function to update the augmented information on the 58 46 * affected subtrees. 59 47 */ 60 /*static inline*/void48 static inline void 61 49 rb_insert_augmented(struct rb_node *node, struct rb_root *root, 62 50 const struct rb_augment_callbacks *augment) 63 51 { 64 __rb_insert_augmented(node, root, false, NULL,augment->rotate);65 } 66 67 /*static inline*/void52 __rb_insert_augmented(node, root, augment->rotate); 53 } 54 55 static inline void 68 56 rb_insert_augmented_cached(struct rb_node *node, 69 57 struct rb_root_cached *root, bool newleft, 70 58 const struct rb_augment_callbacks *augment) 71 59 { 72 __rb_insert_augmented(node, &root->rb_root, 73 newleft, &root->rb_leftmost, augment->rotate); 74 } 75 76 #define RB_DECLARE_CALLBACKS(rb/*static*/, rbname, rbstruct, rbfield, \ 77 rbtype, rbaugmented, rbcompute) \ 78 /*static inline*/ void \ 79 rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 60 if (newleft) 61 root->rb_leftmost = node; 62 rb_insert_augmented(node, &root->rb_root, augment); 63 } 64 65 /* 66 * Template for declaring augmented rbtree callbacks (generic case) 67 * 68 * RBSTATIC: 'static' or empty 69 * RBNAME: name of the rb_augment_callbacks structure 70 * RBSTRUCT: struct type of the tree nodes 71 * RBFIELD: name of struct rb_node field within RBSTRUCT 72 * RBAUGMENTED: name of field within RBSTRUCT holding data for subtree 73 * RBCOMPUTE: name of function that recomputes the RBAUGMENTED data 74 */ 75 76 #define RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ 77 RBSTRUCT, RBFIELD, RBAUGMENTED, RBCOMPUTE) \ 78 static inline void \ 79 RBNAME ## _propagate(struct rb_node *rb, struct rb_node *stop) \ 80 80 { \ 81 81 while (rb != stop) { \ 82 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ 83 rbtype augmented = rbcompute(node); \ 84 if (node->rbaugmented == augmented) \ 82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 83 if (RBCOMPUTE(node, true)) \ 85 84 break; \ 86 node->rbaugmented = augmented; \ 87 rb = rb_parent(&node->rbfield); \ 85 rb = rb_parent(&node->RBFIELD); \ 88 86 } \ 89 87 } \ 90 /*static inline*/void \91 rbname## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \88 static inline void \ 89 RBNAME ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ 92 90 { \ 93 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \94 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \95 new-> rbaugmented = old->rbaugmented; \91 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 92 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 93 new->RBAUGMENTED = old->RBAUGMENTED; \ 96 94 } \ 97 /*static*/void \98 rbname## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \95 static void \ 96 RBNAME ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ 99 97 { \ 100 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \101 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \102 new-> rbaugmented = old->rbaugmented; \103 old->rbaugmented = rbcompute(old);\98 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 99 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 100 new->RBAUGMENTED = old->RBAUGMENTED; \ 101 RBCOMPUTE(old, false); \ 104 102 } \ 105 rb/*static*/ const struct rb_augment_callbacks rbname= { \106 .propagate = rbname## _propagate, \107 .copy = rbname## _copy, \108 .rotate = rbname## _rotate \103 RBSTATIC const struct rb_augment_callbacks RBNAME = { \ 104 .propagate = RBNAME ## _propagate, \ 105 .copy = RBNAME ## _copy, \ 106 .rotate = RBNAME ## _rotate \ 109 107 }; 108 109 /* 110 * Template for declaring augmented rbtree callbacks, 111 * computing RBAUGMENTED scalar as max(RBCOMPUTE(node)) for all subtree nodes. 112 * 113 * RBSTATIC: 'static' or empty 114 * RBNAME: name of the rb_augment_callbacks structure 115 * RBSTRUCT: struct type of the tree nodes 116 * RBFIELD: name of struct rb_node field within RBSTRUCT 117 * RBTYPE: type of the RBAUGMENTED field 118 * RBAUGMENTED: name of RBTYPE field within RBSTRUCT holding data for subtree 119 * RBCOMPUTE: name of function that returns the per-node RBTYPE scalar 120 */ 121 122 #define RB_DECLARE_CALLBACKS_MAX(RBSTATIC, RBNAME, RBSTRUCT, RBFIELD, \ 123 RBTYPE, RBAUGMENTED, RBCOMPUTE) \ 124 static inline bool RBNAME ## _compute_max(RBSTRUCT *node, bool exit) \ 125 { \ 126 RBSTRUCT *child; \ 127 RBTYPE max = RBCOMPUTE(node); \ 128 if (node->RBFIELD.rb_left) { \ 129 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ 130 if (child->RBAUGMENTED > max) \ 131 max = child->RBAUGMENTED; \ 132 } \ 133 if (node->RBFIELD.rb_right) { \ 134 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \ 135 if (child->RBAUGMENTED > max) \ 136 max = child->RBAUGMENTED; \ 137 } \ 138 if (exit && node->RBAUGMENTED == max) \ 139 return true; \ 140 node->RBAUGMENTED = max; \ 141 return false; \ 142 } \ 143 RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \ 144 RBSTRUCT, RBFIELD, RBAUGMENTED, RBNAME ## _compute_max) 110 145 111 146 … … 122 157 #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) 123 158 124 /*static inline*/void rb_set_parent(struct rb_node *rb, struct rb_node *p)159 static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) 125 160 { 126 161 rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; 127 162 } 128 163 129 /*static inline*/void rb_set_parent_color(struct rb_node *rb,164 static inline void rb_set_parent_color(struct rb_node *rb, 130 165 struct rb_node *p, int color) 131 166 { … … 133 168 } 134 169 135 /*static inline*/void170 static inline void 136 171 __rb_change_child(struct rb_node *old, struct rb_node *new, 137 172 struct rb_node *parent, struct rb_root *root) … … 147 182 148 183 #ifndef TARGET_OS2 149 /*static inline*/void184 static inline void 150 185 __rb_change_child_rcu(struct rb_node *old, struct rb_node *new, 151 186 struct rb_node *parent, struct rb_root *root) … … 164 199 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 165 200 166 /*static inline*/struct rb_node *201 static inline struct rb_node * 167 202 __rb_erase_augmented(struct rb_node *node, struct rb_root *root, 168 struct rb_node **leftmost,169 203 const struct rb_augment_callbacks *augment) 170 204 { … … 173 207 struct rb_node *parent, *rebalance; 174 208 unsigned long pc; 175 176 if (leftmost && node == *leftmost)177 *leftmost = rb_next(node);178 209 179 210 if (!tmp) { … … 257 288 258 289 if (child2) { 259 successor->__rb_parent_color = pc;260 290 rb_set_parent_color(child2, parent, RB_BLACK); 261 291 rebalance = NULL; 262 292 } else { 263 unsigned long pc2 = successor->__rb_parent_color; 264 successor->__rb_parent_color = pc; 265 rebalance = __rb_is_black(pc2) ? parent : NULL; 293 rebalance = rb_is_black(successor) ? parent : NULL; 266 294 } 295 successor->__rb_parent_color = pc; 267 296 tmp = successor; 268 297 } … … 272 301 } 273 302 274 /*static inline*/void303 static inline void 275 304 rb_erase_augmented(struct rb_node *node, struct rb_root *root, 276 305 const struct rb_augment_callbacks *augment) 277 306 { 278 struct rb_node *rebalance = __rb_erase_augmented(node, root, 279 NULL, augment); 307 struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); 280 308 if (rebalance) 281 309 __rb_erase_color(rebalance, root, augment->rotate); 282 310 } 283 311 284 /*static inline*/void312 static inline void 285 313 rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, 286 314 const struct rb_augment_callbacks *augment) 287 315 { 288 struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, 289 &root->rb_leftmost, 290 augment); 291 if (rebalance) 292 __rb_erase_color(rebalance, &root->rb_root, augment->rotate); 316 if (root->rb_leftmost == node) 317 root->rb_leftmost = rb_next(node); 318 rb_erase_augmented(node, &root->rb_root, augment); 293 319 } 294 320 -
GPL/branches/uniaud32-next/lib32/rbtree.c
r625 r651 1 // SPDX-License-Identifier: GPL-2.0-or-later 1 2 /* 2 3 Red Black Trees … … 5 6 (C) 2012 Michel Lespinasse <walken@google.com> 6 7 7 This program is free software; you can redistribute it and/or modify8 it under the terms of the GNU General Public License as published by9 the Free Software Foundation; either version 2 of the License, or10 (at your option) any later version.11 12 This program is distributed in the hope that it will be useful,13 but WITHOUT ANY WARRANTY; without even the implied warranty of14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the15 GNU General Public License for more details.16 17 You should have received a copy of the GNU General Public License18 along with this program; if not, write to the Free Software19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA20 8 21 9 linux/lib/rbtree.c 22 10 */ 23 /* from 4.19.163*/11 /* from 5.10.10 */ 24 12 25 13 #include <linux/rbtree_augmented.h> … … 29 17 30 18 /* 31 * red-black trees properties: http ://en.wikipedia.org/wiki/Rbtree19 * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree 32 20 * 33 21 * 1) A node is either red or black … … 72 60 */ 73 61 74 /*static inline*/void rb_set_black(struct rb_node *rb)62 static inline void rb_set_black(struct rb_node *rb) 75 63 { 76 64 rb->__rb_parent_color |= RB_BLACK; 77 65 } 78 66 79 /*static inline*/struct rb_node *rb_red_parent(struct rb_node *red)67 static inline struct rb_node *rb_red_parent(struct rb_node *red) 80 68 { 81 69 return (struct rb_node *)red->__rb_parent_color; … … 87 75 * - old gets assigned new as a parent and 'color' as a color. 88 76 */ 89 /*static inline*/void77 static inline void 90 78 __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, 91 79 struct rb_root *root, int color) … … 97 85 } 98 86 99 /*static inline*/void87 static inline void 100 88 __rb_insert(struct rb_node *node, struct rb_root *root, 101 bool newleft, struct rb_node **leftmost,102 89 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 103 90 { 104 91 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; 105 106 if (newleft)107 *leftmost = node;108 92 109 93 while (true) { … … 111 95 * Loop invariant: node is red. 112 96 */ 113 if ( !parent) {97 if (unlikely(!parent)) { 114 98 /* 115 99 * The inserted node is root. Either this is the … … 243 227 * and eliminate the dummy_rotate callback there 244 228 */ 245 /*static inline*/void229 static inline void 246 230 ____rb_erase_color(struct rb_node *parent, struct rb_root *root, 247 231 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) … … 441 425 */ 442 426 443 /*static inline*/void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}444 /*static inline*/void dummy_copy(struct rb_node *old, struct rb_node *new) {}445 /*static inline*/void dummy_rotate(struct rb_node *old, struct rb_node *new) {}427 static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} 428 static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} 429 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} 446 430 447 431 static const struct rb_augment_callbacks dummy_callbacks = { … … 453 437 void rb_insert_color(struct rb_node *node, struct rb_root *root) 454 438 { 455 __rb_insert(node, root, false, NULL,dummy_rotate);439 __rb_insert(node, root, dummy_rotate); 456 440 } 457 441 EXPORT_SYMBOL(rb_insert_color); … … 460 444 { 461 445 struct rb_node *rebalance; 462 rebalance = __rb_erase_augmented(node, root, 463 NULL, &dummy_callbacks); 446 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); 464 447 if (rebalance) 465 448 ____rb_erase_color(rebalance, root, dummy_rotate); 466 449 } 467 450 EXPORT_SYMBOL(rb_erase); 468 469 void rb_insert_color_cached(struct rb_node *node,470 struct rb_root_cached *root, bool leftmost)471 {472 __rb_insert(node, &root->rb_root, leftmost,473 &root->rb_leftmost, dummy_rotate);474 }475 EXPORT_SYMBOL(rb_insert_color_cached);476 477 void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)478 {479 struct rb_node *rebalance;480 rebalance = __rb_erase_augmented(node, &root->rb_root,481 &root->rb_leftmost, &dummy_callbacks);482 if (rebalance)483 ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);484 }485 EXPORT_SYMBOL(rb_erase_cached);486 451 487 452 /* … … 493 458 494 459 void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 495 bool newleft, struct rb_node **leftmost,496 460 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 497 461 { 498 __rb_insert(node, root, newleft, leftmost,augment_rotate);462 __rb_insert(node, root, augment_rotate); 499 463 } 500 464 EXPORT_SYMBOL(__rb_insert_augmented); … … 543 507 node = node->rb_right; 544 508 while (node->rb_left) 545 node =node->rb_left;509 node = node->rb_left; 546 510 return (struct rb_node *)node; 547 511 } … … 575 539 node = node->rb_left; 576 540 while (node->rb_right) 577 node =node->rb_right;541 node = node->rb_right; 578 542 return (struct rb_node *)node; 579 543 } … … 606 570 } 607 571 EXPORT_SYMBOL(rb_replace_node); 608 609 void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new,610 struct rb_root_cached *root)611 {612 rb_replace_node(victim, new, &root->rb_root);613 614 if (root->rb_leftmost == victim)615 root->rb_leftmost = new;616 }617 EXPORT_SYMBOL(rb_replace_node_cached);618 572 619 573 #ifndef TARGET_OS2
Note:
See TracChangeset
for help on using the changeset viewer.