Changeset 615 for GPL/branches/uniaud32-next/lib32/pci.c
- Timestamp:
- Jan 1, 2021, 5:31:48 AM (5 years ago)
- Location:
- GPL/branches/uniaud32-next
- Files:
-
- 1 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
GPL/branches/uniaud32-next/lib32/pci.c
r604 r615 25 25 * 26 26 */ 27 27 #define CONFIG_PM 28 28 #include "linux.h" 29 29 #include <linux/init.h> 30 30 #include <linux/poll.h> 31 #include <linux/dma-mapping.h> 31 32 #include <asm/uaccess.h> 32 33 #include <asm/hardirq.h> … … 183 184 memset((void near *)pcidev, 0, sizeof(struct pci_dev)); 184 185 185 pcidev-> _class = ulTmp2;186 pcidev->class = ulTmp2; 186 187 pcidev->vendor = detectedId & 0xffff; 187 188 pcidev->device = detectedId >> 16; … … 201 202 pcidev->sibling = NULL; 202 203 pcidev->next = NULL; 203 pcidev->dma_mask = 0xFFFFFFFF; 204 pcidev->dma_mask = 0xffffffff; 205 pcidev->dev.dma_mask = &pcidev->dma_mask; 206 pcidev->dev.coherent_dma_mask = 0xffffffffull; 207 pr_warn("params set"); 204 208 205 209 // Subsystem ID 206 210 pci_read_config_word(pcidev, PCI_SUBSYSTEM_VENDOR_ID, &pcidev->subsystem_vendor); 207 211 pci_read_config_word(pcidev, PCI_SUBSYSTEM_ID, &pcidev->subsystem_device); 212 213 // revision 214 pci_read_config_byte(pcidev, PCI_REVISION_ID, &pcidev->revision); 208 215 209 216 // I/O and MEM … … 500 507 501 508 rprintf(("pci_register_driver: query_device found %x %x:%x class=%x checking %s", 502 ulLast, pcidev->vendor, pcidev->device, pcidev-> _class, driver->name));509 ulLast, pcidev->vendor, pcidev->device, pcidev->class, driver->name)); 503 510 504 511 for( iTableIx = 0; driver->id_table[iTableIx].vendor; iTableIx++) … … 506 513 struct pci_device_id const *pDriverId = &driver->id_table[iTableIx]; 507 514 508 if ( (pDriverId->class) && ((pcidev-> _class & pDriverId->class_mask) != pDriverId->class) ) continue;515 if ( (pDriverId->class) && ((pcidev->class & pDriverId->class_mask) != pDriverId->class) ) continue; 509 516 if (pDriverId->vendor != pcidev->vendor) continue; 510 517 if ( (pDriverId->device != PCI_ANY_ID) && (pDriverId->device != pcidev->device) ) continue; 511 518 512 519 rprintf(("pci_register_driver: matched %d %x:%x/%x with %x:%x/%x %x (%s)", iTableIx, 513 pcidev->vendor, pcidev->device, pcidev-> _class,520 pcidev->vendor, pcidev->device, pcidev->class, 514 521 pDriverId->vendor, pDriverId->device, pDriverId->class, pDriverId->class_mask, driver->name)); 515 522 … … 645 652 dprintf(("pci_alloc_consistent %d mask %x", size, (hwdev) ? hwdev->dma_mask : 0)); 646 653 if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) { 654 dprintf(("pci_alloc_consistent")); 655 pr_warn("dma_mask = %x",hwdev->dma_mask); 647 656 //try not to exhaust low memory (< 16mb) so allocate from the high region first 648 657 //if that doesn't satisfy the dma mask requirement, then get it from the low 649 //regi noanyway658 //region anyway 650 659 if(hwdev->dma_mask > 0x00ffffff) { 660 dprintf(("pci_alloc_consistent2")); 661 order = __compat_get_order(size); 662 dprintf(("pci_alloc_consistent3")); 663 ret = (void *)__get_free_pages(gfp|GFP_DMAHIGHMEM, order); 664 dprintf(("pci_alloc_consistent4")); 665 *dma_handle = virt_to_bus(ret); 666 if(*dma_handle > hwdev->dma_mask) { 667 dprintf(("pci_alloc_consistent5")); 668 free_pages((unsigned long)ret, __compat_get_order(size)); 669 dprintf(("pci_alloc_consistent6")); 670 //be sure and allocate below 16 mb 671 gfp |= GFP_DMA; 672 ret = NULL; 673 } 674 dprintf(("pci_alloc_consistent6a")); 675 } 676 else { //must always allocate below 16 mb 677 dprintf(("pci_alloc_consistent7")); 678 gfp |= GFP_DMA; 679 } 680 dprintf(("pci_alloc_consistent7a")); 681 } 682 dprintf(("pci_alloc_consistent8")); 683 if(ret == NULL) { 684 dprintf(("pci_alloc_consistent9")); 685 ret = (void *)__get_free_pages(gfp, __compat_get_order(size)); 686 } 687 dprintf(("pci_alloc_consistent10")); 688 if (ret != NULL) { 689 dprintf(("pci_alloc_consistent11")); 690 memset(ret, 0, size); 691 *dma_handle = virt_to_bus(ret); 692 } 693 return ret; 694 } 695 696 #if 0 697 void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, 698 dma_addr_t *dma_handle) 699 { 700 return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); 701 } 702 #endif 703 #if 0 704 void *dma_alloc_coherent(struct device *dev, size_t size, 705 dma_addr_t *dma_handle, gfp_t gfp) 706 { 707 void *ret = NULL; 708 int order; 709 710 dprintf(("dma_alloc_coherent %d mask %x", size, (dev) ? dev->dma_mask : 0)); 711 if (dev == NULL || *dev->dma_mask != 0xffffffff) { 712 dprintf(("dma_alloc_coherent")); 713 //try not to exhaust low memory (< 16mb) so allocate from the high region first 714 //if that doesn't satisfy the dma mask requirement, then get it from the low 715 //region anyway 716 if(*dev->dma_mask > 0x00ffffff) { 717 dprintf(("dma_alloc_coherent2")); 651 718 order = __compat_get_order(size); 652 719 ret = (void *)__get_free_pages(gfp|GFP_DMAHIGHMEM, order); 653 720 *dma_handle = virt_to_bus(ret); 654 if(*dma_handle > hwdev->dma_mask) { 721 if(*dma_handle > *dev->dma_mask) { 722 dprintf(("dma_alloc_coherent3")); 655 723 free_pages((unsigned long)ret, __compat_get_order(size)); 656 724 //be sure and allocate below 16 mb … … 658 726 ret = NULL; 659 727 } 728 dprintf(("dma_alloc_coherent3a")); 660 729 } 661 730 else { //must always allocate below 16 mb 731 dprintf(("dma_alloc_coherent4")); 662 732 gfp |= GFP_DMA; 663 733 } 664 734 } 665 735 if(ret == NULL) { 736 dprintf(("dma_alloc_coherent5")); 666 737 ret = (void *)__get_free_pages(gfp, __compat_get_order(size)); 667 738 } … … 672 743 } 673 744 return ret; 745 746 } 747 #endif 748 749 int dma_supported(struct device *dev, u64 mask) 750 { 751 pr_warn("dma_supported"); 752 return 1; 753 } 754 755 int dma_set_coherent_mask(struct device *dev, u64 mask) 756 { 757 pr_warn("dma_set_coherent_mask"); 758 /* 759 * Truncate the mask to the actually supported dma_addr_t width to 760 * avoid generating unsupportable addresses. 761 */ 762 mask = (dma_addr_t)mask; 763 764 if (!dma_supported(dev, mask)) 765 return -EIO; 766 767 dev->coherent_dma_mask = mask; 768 return 0; 769 } 770 771 int dma_set_mask(struct device *dev, u64 mask) 772 { 773 pr_warn("dma_set_mask"); 774 /* 775 * Truncate the mask to the actually supported dma_addr_t width to 776 * avoid generating unsupportable addresses. 777 */ 778 mask = (dma_addr_t)mask; 779 780 if (!dev->dma_mask || !dma_supported(dev, mask)) 781 return -EIO; 782 783 *dev->dma_mask = mask; 784 return 0; 674 785 } 675 786 … … 888 999 } 889 1000 890 const struct pci_device_id * pci_match_ device(const struct pci_device_id *ids, struct pci_dev *dev)1001 const struct pci_device_id * pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev) 891 1002 { 892 1003 u16 subsystem_vendor, subsystem_device; … … 900 1011 (ids->subvendor == PCI_ANY_ID || ids->subvendor == subsystem_vendor) && 901 1012 (ids->subdevice == PCI_ANY_ID || ids->subdevice == subsystem_device) && 902 !((ids->class ^ dev-> _class) & ids->class_mask))1013 !((ids->class ^ dev->class) & ids->class_mask)) 903 1014 return ids; 904 1015 ids++;
Note:
See TracChangeset
for help on using the changeset viewer.