Changeset 772 for GPL/trunk/lib32/devres.c
- Timestamp:
- Apr 19, 2025, 8:08:37 PM (4 months ago)
- Location:
- GPL/trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
GPL/trunk
- Property svn:mergeinfo changed
/GPL/branches/uniaud32-6.6-LTS (added) merged: 765,768-769 /GPL/branches/uniaud32-exp (added) merged: 735-741,743-744,748-751,753-760,762-764 /GPL/branches/uniaud32-next merged: 718-734
- Property svn:mergeinfo changed
-
GPL/trunk/lib32/devres.c
r717 r772 9 9 10 10 #include <linux/device.h> 11 #include <linux/pci.h> 11 12 #include <linux/module.h> 12 13 #include <linux/slab.h> 13 14 #include <linux/gfp.h> 14 15 #include <linux/errno.h> 16 #include <asm/io.h> 15 17 16 18 struct devres_node { 17 19 struct list_head entry; 18 20 dr_release_t release; 19 #ifdef CONFIG_DEBUG_DEVRES20 21 const char *name; 21 22 size_t size; 22 #endif23 23 }; 24 24 … … 26 26 struct devres_node node; 27 27 /* -- 3 pointers */ 28 u nsigned long longdata[]; /* guarantee ull alignment */28 u8 data[]; /* guarantee ull alignment */ 29 29 }; 30 30 … … 36 36 }; 37 37 38 static void set_node_dbginfo(struct devres_node *node, const char *name, 39 size_t size) 40 { 41 node->name = name; 42 node->size = size; 43 } 44 38 45 #define devres_log(dev, node, op) do {} while (0) 39 46 … … 76 83 } 77 84 78 #define devres_log(dev, node, op) do {} while (0)79 80 85 static void add_dr(struct device *dev, struct devres_node *node) 81 86 { 82 87 devres_log(dev, node, "ADD"); 83 88 BUG_ON(!list_empty(&node->entry)); 84 //#ifndef TARGET_OS285 /* Traps here on OS/2 */86 89 list_add_tail(&node->entry, &dev->devres_head); 87 //#endif88 90 } 89 91 … … 99 101 void devres_add(struct device *dev, void *res) 100 102 { 101 /* Traps here on OS/2 */102 103 struct devres *dr = container_of(res, struct devres, data); 103 104 unsigned long flags; 105 104 106 spin_lock_irqsave(&dev->devres_lock, flags); 105 107 add_dr(dev, &dr->node); … … 108 110 109 111 /** 110 * devres_alloc- Allocate device resource data112 * __devres_alloc_node - Allocate device resource data 111 113 * @release: Release function devres will be associated with 112 114 * @size: Allocation size 113 115 * @gfp: Allocation flags 114 116 * @nid: NUMA node 117 * @name: Name of the resource 115 118 * 116 119 * Allocate devres of @size bytes. The allocated area is zeroed, then … … 121 124 * Pointer to allocated devres on success, NULL on failure. 122 125 */ 123 void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) 126 void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 127 const char *name) 124 128 { 125 129 struct devres *dr; 126 130 127 131 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 132 if (unlikely(!dr)) 133 return NULL; 134 set_node_dbginfo(&dr->node, name, size); 128 135 return dr->data; 129 136 } … … 214 221 } 215 222 216 static int release_nodes(struct device *dev, struct list_head *first, 217 struct list_head *end, unsigned long flags) 218 { 219 // LIST_HEAD(todo); 220 struct list_head todo; 221 222 int cnt; 223 static void release_nodes(struct device *dev, struct list_head *todo) 224 { 223 225 struct devres *dr, *tmp; 224 225 cnt = remove_nodes(dev, first, end, &todo);226 227 spin_unlock_irqrestore(&dev->devres_lock, flags);228 226 229 227 /* Release. Note that both devres and devres_group are 230 228 * handled as devres in the following loop. This is safe. 231 229 */ 232 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry, struct devres) {230 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry, struct devres) { 233 231 devres_log(dev, &dr->node, "REL"); 234 232 dr->node.release(dev, dr->data); 235 233 kfree(dr); 236 234 } 237 238 return cnt;239 235 } 240 236 … … 249 245 { 250 246 unsigned long flags; 247 struct list_head todo; 248 int cnt; 251 249 252 250 /* Looks like an uninitialized device structure */ 253 251 if (WARN_ON(dev->devres_head.next == NULL)) 254 252 return -ENODEV; 253 254 /* Nothing to release if list is empty */ 255 if (list_empty(&dev->devres_head)) 256 return 0; 257 255 258 spin_lock_irqsave(&dev->devres_lock, flags); 256 return release_nodes(dev, dev->devres_head.next, &dev->devres_head, 257 flags); 258 } 259 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo); 260 spin_unlock_irqrestore(&dev->devres_lock, flags); 261 262 release_nodes(dev, &todo); 263 return cnt; 264 } 265 266 /** 267 * devres_open_group - Open a new devres group 268 * @dev: Device to open devres group for 269 * @id: Separator ID 270 * @gfp: Allocation flags 271 * 272 * Open a new devres group for @dev with @id. For @id, using a 273 * pointer to an object which won't be used for another group is 274 * recommended. If @id is NULL, address-wise unique ID is created. 275 * 276 * RETURNS: 277 * ID of the new group, NULL on failure. 278 */ 279 void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 280 { 281 struct devres_group *grp; 282 unsigned long flags; 283 284 grp = kmalloc(sizeof(*grp), gfp); 285 if (unlikely(!grp)) 286 return NULL; 287 288 grp->node[0].release = &group_open_release; 289 grp->node[1].release = &group_close_release; 290 INIT_LIST_HEAD(&grp->node[0].entry); 291 INIT_LIST_HEAD(&grp->node[1].entry); 292 set_node_dbginfo(&grp->node[0], "grp<", 0); 293 set_node_dbginfo(&grp->node[1], "grp>", 0); 294 grp->id = grp; 295 if (id) 296 grp->id = id; 297 298 spin_lock_irqsave(&dev->devres_lock, flags); 299 add_dr(dev, &grp->node[0]); 300 spin_unlock_irqrestore(&dev->devres_lock, flags); 301 return grp->id; 302 } 303 EXPORT_SYMBOL_GPL(devres_open_group); 304 305 /* Find devres group with ID @id. If @id is NULL, look for the latest. */ 306 static struct devres_group * find_group(struct device *dev, void *id) 307 { 308 struct devres_node *node; 309 310 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) { 311 struct devres_group *grp; 312 313 if (node->release != &group_open_release) 314 continue; 315 316 grp = container_of(node, struct devres_group, node[0]); 317 318 if (id) { 319 if (grp->id == id) 320 return grp; 321 } else if (list_empty(&grp->node[1].entry)) 322 return grp; 323 } 324 325 return NULL; 326 } 327 328 /** 329 * devres_release_group - Release resources in a devres group 330 * @dev: Device to release group for 331 * @id: ID of target group, can be NULL 332 * 333 * Release all resources in the group identified by @id. If @id is 334 * NULL, the latest open group is selected. The selected group and 335 * groups properly nested inside the selected group are removed. 336 * 337 * RETURNS: 338 * The number of released non-group resources. 339 */ 340 int devres_release_group(struct device *dev, void *id) 341 { 342 struct devres_group *grp; 343 unsigned long flags; 344 struct list_head todo; 345 int cnt = 0; 346 347 spin_lock_irqsave(&dev->devres_lock, flags); 348 349 grp = find_group(dev, id); 350 if (grp) { 351 struct list_head *first = &grp->node[0].entry; 352 struct list_head *end = &dev->devres_head; 353 354 if (!list_empty(&grp->node[1].entry)) 355 end = grp->node[1].entry.next; 356 357 cnt = remove_nodes(dev, first, end, &todo); 358 spin_unlock_irqrestore(&dev->devres_lock, flags); 359 360 release_nodes(dev, &todo); 361 } else { 362 WARN_ON(1); 363 spin_unlock_irqrestore(&dev->devres_lock, flags); 364 } 365 366 return cnt; 367 } 368 EXPORT_SYMBOL_GPL(devres_release_group); 259 369 260 370 static struct devres *find_dr(struct device *dev, dr_release_t release, … … 306 416 307 417 /** 418 * devres_get - Find devres, if non-existent, add one atomically 419 * @dev: Device to lookup or add devres for 420 * @new_res: Pointer to new initialized devres to add if not found 421 * @match: Match function (optional) 422 * @match_data: Data for the match function 423 * 424 * Find the latest devres of @dev which has the same release function 425 * as @new_res and for which @match return 1. If found, @new_res is 426 * freed; otherwise, @new_res is added atomically. 427 * 428 * RETURNS: 429 * Pointer to found or added devres. 430 */ 431 void * devres_get(struct device *dev, void *new_res, 432 dr_match_t match, void *match_data) 433 { 434 struct devres *new_dr = container_of(new_res, struct devres, data); 435 struct devres *dr; 436 unsigned long flags; 437 438 spin_lock_irqsave(&dev->devres_lock, flags); 439 dr = find_dr(dev, new_dr->node.release, match, match_data); 440 if (!dr) { 441 add_dr(dev, &new_dr->node); 442 dr = new_dr; 443 new_res = NULL; 444 } 445 spin_unlock_irqrestore(&dev->devres_lock, flags); 446 devres_free(new_res); 447 448 return dr->data; 449 } 450 EXPORT_SYMBOL_GPL(devres_get); 451 452 /* 453 * Custom devres actions allow inserting a simple function call 454 * into the teadown sequence. 455 */ 456 457 struct action_devres { 458 void *data; 459 void (*action)(void *); 460 }; 461 462 static void devm_action_release(struct device *dev, void *res) 463 { 464 struct action_devres *devres = res; 465 466 devres->action(devres->data); 467 } 468 469 /** 308 470 * devm_add_action() - add a custom action to list of managed resources 309 471 * @dev: Device that owns the action … … 316 478 int devm_add_action(struct device *dev, void (*action)(void *), void *data) 317 479 { 480 struct action_devres *devres; 481 482 devres = devres_alloc(devm_action_release, 483 sizeof(struct action_devres), GFP_KERNEL); 484 if (!devres) 485 return -ENOMEM; 486 487 devres->data = data; 488 devres->action = action; 489 490 devres_add(dev, devres); 491 318 492 return 0; 319 493 } … … 331 505 { 332 506 } 507 508 /* 509 * Managed kmalloc/kfree 510 */ 511 static void devm_kmalloc_release(struct device *dev, void *res) 512 { 513 /* noop */ 514 } 515 516 /** 517 * devm_kmalloc - Resource-managed kmalloc 518 * @dev: Device to allocate memory for 519 * @size: Allocation size 520 * @gfp: Allocation gfp flags 521 * 522 * Managed kmalloc. Memory allocated with this function is 523 * automatically freed on driver detach. Like all other devres 524 * resources, guaranteed alignment is unsigned long long. 525 * 526 * RETURNS: 527 * Pointer to allocated memory on success, NULL on failure. 528 */ 529 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 530 { 531 struct devres *dr; 532 533 if (unlikely(!size)) 534 return ZERO_SIZE_PTR; 535 536 /* use raw alloc_dr for kmalloc caller tracing */ 537 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 538 if (unlikely(!dr)) 539 return NULL; 540 541 /* 542 * This is named devm_kzalloc_release for historical reasons 543 * The initial implementation did not support kmalloc, only kzalloc 544 */ 545 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 546 devres_add(dev, dr->data); 547 return dr->data; 548 } 549 EXPORT_SYMBOL_GPL(devm_kmalloc); 550 551 enum devm_ioremap_type { 552 DEVM_IOREMAP = 0, 553 DEVM_IOREMAP_UC, 554 DEVM_IOREMAP_WC, 555 DEVM_IOREMAP_NP, 556 }; 557 558 void devm_ioremap_release(struct device *dev, void *res) 559 { 560 iounmap(*(void __iomem **)res); 561 } 562 563 static void *__devm_ioremap(struct device *dev, resource_size_t offset, 564 resource_size_t size, 565 enum devm_ioremap_type type) 566 { 567 void __iomem **ptr, *addr = NULL; 568 569 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 570 if (!ptr) 571 return NULL; 572 573 switch (type) { 574 case DEVM_IOREMAP: 575 addr = ioremap(offset, size); 576 break; 577 #if 0 578 case DEVM_IOREMAP_UC: 579 addr = ioremap_uc(offset, size); 580 break; 581 case DEVM_IOREMAP_WC: 582 addr = ioremap_wc(offset, size); 583 break; 584 case DEVM_IOREMAP_NP: 585 addr = ioremap_np(offset, size); 586 break; 587 #endif 588 } 589 590 if (addr) { 591 *ptr = addr; 592 devres_add(dev, ptr); 593 } else 594 devres_free(ptr); 595 596 return addr; 597 } 598 599 /** 600 * devm_ioremap - Managed ioremap() 601 * @dev: Generic device to remap IO address for 602 * @offset: Resource address to map 603 * @size: Size of map 604 * 605 * Managed ioremap(). Map is automatically unmapped on driver detach. 606 */ 607 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 608 resource_size_t size) 609 { 610 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP); 611 } 612 EXPORT_SYMBOL(devm_ioremap); 613 614 /** 615 * devm_kvasprintf - Allocate resource managed space and format a string 616 * into that. 617 * @dev: Device to allocate memory for 618 * @gfp: the GFP mask used in the devm_kmalloc() call when 619 * allocating memory 620 * @fmt: The printf()-style format string 621 * @ap: Arguments for the format string 622 * RETURNS: 623 * Pointer to allocated string on success, NULL on failure. 624 */ 625 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 626 va_list ap) 627 { 628 unsigned int len; 629 char *p; 630 va_list aq; 631 632 va_copy(aq, ap); 633 len = vsnprintf(NULL, 0, fmt, aq); 634 va_end(aq); 635 636 p = devm_kmalloc(dev, len+1, gfp); 637 if (!p) 638 return NULL; 639 640 vsnprintf(p, len+1, fmt, ap); 641 642 return p; 643 } 644 EXPORT_SYMBOL(devm_kvasprintf); 645 /** 646 * devm_kasprintf - Allocate resource managed space and format a string 647 * into that. 648 * @dev: Device to allocate memory for 649 * @gfp: the GFP mask used in the devm_kmalloc() call when 650 * allocating memory 651 * @fmt: The printf()-style format string 652 * @...: Arguments for the format string 653 * RETURNS: 654 * Pointer to allocated string on success, NULL on failure. 655 */ 656 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 657 { 658 va_list ap; 659 char *p; 660 661 va_start(ap, fmt); 662 p = devm_kvasprintf(dev, gfp, fmt, ap); 663 va_end(ap); 664 665 return p; 666 } 667 EXPORT_SYMBOL_GPL(devm_kasprintf); 668 669 /* 670 * PCI iomap devres 671 */ 672 #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS 673 674 struct pcim_iomap_devres { 675 void __iomem *table[PCIM_IOMAP_MAX]; 676 }; 677 678 static void pcim_iomap_release(struct device *gendev, void *res) 679 { 680 struct pci_dev *dev = to_pci_dev(gendev); 681 struct pcim_iomap_devres *this = res; 682 int i; 683 684 for (i = 0; i < PCIM_IOMAP_MAX; i++) 685 if (this->table[i]) 686 pci_iounmap(dev, this->table[i]); 687 } 688 689 /** 690 * pcim_iomap_table - access iomap allocation table 691 * @pdev: PCI device to access iomap table for 692 * 693 * Access iomap allocation table for @dev. If iomap table doesn't 694 * exist and @pdev is managed, it will be allocated. All iomaps 695 * recorded in the iomap table are automatically unmapped on driver 696 * detach. 697 * 698 * This function might sleep when the table is first allocated but can 699 * be safely called without context and guaranteed to succeed once 700 * allocated. 701 */ 702 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) 703 { 704 struct pcim_iomap_devres *dr, *new_dr; 705 706 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); 707 if (dr) 708 return dr->table; 709 710 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); 711 if (!new_dr) 712 return NULL; 713 dr = devres_get(&pdev->dev, new_dr, NULL, NULL); 714 return dr->table; 715 } 716 EXPORT_SYMBOL(pcim_iomap_table); 717 718 /** 719 * pcim_iomap - Managed pcim_iomap() 720 * @pdev: PCI device to iomap for 721 * @bar: BAR to iomap 722 * @maxlen: Maximum length of iomap 723 * 724 * Managed pci_iomap(). Map is automatically unmapped on driver 725 * detach. 726 */ 727 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 728 { 729 void __iomem **tbl; 730 731 BUG_ON(bar >= PCIM_IOMAP_MAX); 732 733 tbl = (void __iomem **)pcim_iomap_table(pdev); 734 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ 735 return NULL; 736 737 tbl[bar] = pci_iomap(pdev, bar, maxlen); 738 return tbl[bar]; 739 } 740 EXPORT_SYMBOL(pcim_iomap); 741 742 /** 743 * pcim_iounmap - Managed pci_iounmap() 744 * @pdev: PCI device to iounmap for 745 * @addr: Address to unmap 746 * 747 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). 748 */ 749 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) 750 { 751 void __iomem **tbl; 752 int i; 753 754 pci_iounmap(pdev, addr); 755 756 tbl = (void __iomem **)pcim_iomap_table(pdev); 757 BUG_ON(!tbl); 758 759 for (i = 0; i < PCIM_IOMAP_MAX; i++) 760 if (tbl[i] == addr) { 761 tbl[i] = NULL; 762 return; 763 } 764 WARN_ON(1); 765 } 766 EXPORT_SYMBOL(pcim_iounmap); 767 768 /** 769 * pcim_iomap_regions - Request and iomap PCI BARs 770 * @pdev: PCI device to map IO resources for 771 * @mask: Mask of BARs to request and iomap 772 * @name: Name used when requesting regions 773 * 774 * Request and iomap regions specified by @mask. 775 */ 776 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name) 777 { 778 void __iomem * const *iomap; 779 int i, rc; 780 781 iomap = pcim_iomap_table(pdev); 782 if (!iomap) 783 return -ENOMEM; 784 785 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 786 unsigned long len; 787 788 if (!(mask & (1 << i))) 789 continue; 790 791 rc = -EINVAL; 792 len = pci_resource_len(pdev, i); 793 if (!len) 794 goto err_inval; 795 796 rc = pci_request_region(pdev, i, (char*)name); 797 if (rc) 798 goto err_inval; 799 800 rc = -ENOMEM; 801 if (!pcim_iomap(pdev, i, 0)) 802 goto err_region; 803 } 804 805 return 0; 806 807 err_region: 808 pci_release_region(pdev, i); 809 err_inval: 810 while (--i >= 0) { 811 if (!(mask & (1 << i))) 812 continue; 813 pcim_iounmap(pdev, iomap[i]); 814 pci_release_region(pdev, i); 815 } 816 817 return rc; 818 } 819 EXPORT_SYMBOL(pcim_iomap_regions);
Note:
See TracChangeset
for help on using the changeset viewer.