source: GPL/branches/uniaud32-exp/lib32/devres.c@ 753

Last change on this file since 753 was 753, checked in by Paul Smedley, 3 years ago

Code cleanups + disable unsupported HDMI code for now

File size: 20.1 KB
Line 
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/pci.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/gfp.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17
18struct devres_node {
19 struct list_head entry;
20 dr_release_t release;
21 const char *name;
22 size_t size;
23};
24
25struct devres {
26 struct devres_node node;
27 /* -- 3 pointers */
28 u8 data[]; /* guarantee ull alignment */
29};
30
31struct devres_group {
32 struct devres_node node[2];
33 void *id;
34 int color;
35 /* -- 8 pointers */
36};
37
38static void set_node_dbginfo(struct devres_node *node, const char *name,
39 size_t size)
40{
41 node->name = name;
42 node->size = size;
43}
44
45#define devres_log(dev, node, op) do {} while (0)
46
47/*
48 * Release functions for devres group. These callbacks are used only
49 * for identification.
50 */
51static void group_open_release(struct device *dev, void *res)
52{
53 /* noop */
54}
55
56static void group_close_release(struct device *dev, void *res)
57{
58 /* noop */
59}
60
61static struct devres_group * node_to_group(struct devres_node *node)
62{
63 if (node->release == &group_open_release)
64 return container_of(node, struct devres_group, node[0]);
65 if (node->release == &group_close_release)
66 return container_of(node, struct devres_group, node[1]);
67 return NULL;
68}
69
70static inline struct devres * alloc_dr(dr_release_t release,
71 size_t size, gfp_t gfp, int nid)
72{
73 size_t tot_size = sizeof(struct devres) + size;
74 struct devres *dr;
75
76 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
77
78 memset(dr, 0, offsetof(struct devres, data));
79
80 INIT_LIST_HEAD(&dr->node.entry);
81 dr->node.release = release;
82 return dr;
83}
84
85static void add_dr(struct device *dev, struct devres_node *node)
86{
87 devres_log(dev, node, "ADD");
88 BUG_ON(!list_empty(&node->entry));
89 list_add_tail(&node->entry, &dev->devres_head);
90}
91
92/**
93 * devres_add - Register device resource
94 * @dev: Device to add resource to
95 * @res: Resource to register
96 *
97 * Register devres @res to @dev. @res should have been allocated
98 * using devres_alloc(). On driver detach, the associated release
99 * function will be invoked and devres will be freed automatically.
100 */
101void devres_add(struct device *dev, void *res)
102{
103 struct devres *dr = container_of(res, struct devres, data);
104 unsigned long flags;
105
106 spin_lock_irqsave(&dev->devres_lock, flags);
107 add_dr(dev, &dr->node);
108 spin_unlock_irqrestore(&dev->devres_lock, flags);
109}
110
111/**
112 * __devres_alloc_node - Allocate device resource data
113 * @release: Release function devres will be associated with
114 * @size: Allocation size
115 * @gfp: Allocation flags
116 * @nid: NUMA node
117 * @name: Name of the resource
118 *
119 * Allocate devres of @size bytes. The allocated area is zeroed, then
120 * associated with @release. The returned pointer can be passed to
121 * other devres_*() functions.
122 *
123 * RETURNS:
124 * Pointer to allocated devres on success, NULL on failure.
125 */
126void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
127 const char *name)
128{
129 struct devres *dr;
130
131 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
132 if (unlikely(!dr))
133 return NULL;
134 set_node_dbginfo(&dr->node, name, size);
135 return dr->data;
136}
137
138/**
139 * devres_free - Free device resource data
140 * @res: Pointer to devres data to free
141 *
142 * Free devres created with devres_alloc().
143 */
144void devres_free(void *res)
145{
146 if (res) {
147 struct devres *dr = container_of(res, struct devres, data);
148
149 BUG_ON(!list_empty(&dr->node.entry));
150 kfree(dr);
151 }
152}
153
154static int remove_nodes(struct device *dev,
155 struct list_head *first, struct list_head *end,
156 struct list_head *todo)
157{
158 int cnt = 0, nr_groups = 0;
159 struct list_head *cur;
160
161 /* First pass - move normal devres entries to @todo and clear
162 * devres_group colors.
163 */
164 cur = first;
165 while (cur != end) {
166 struct devres_node *node;
167 struct devres_group *grp;
168
169 node = list_entry(cur, struct devres_node, entry);
170 cur = cur->next;
171
172 grp = node_to_group(node);
173 if (grp) {
174 /* clear color of group markers in the first pass */
175 grp->color = 0;
176 nr_groups++;
177 } else {
178 /* regular devres entry */
179 if (&node->entry == first)
180 first = first->next;
181 list_move_tail(&node->entry, todo);
182 cnt++;
183 }
184 }
185
186 if (!nr_groups)
187 return cnt;
188
189 /* Second pass - Scan groups and color them. A group gets
190 * color value of two iff the group is wholly contained in
191 * [cur, end). That is, for a closed group, both opening and
192 * closing markers should be in the range, while just the
193 * opening marker is enough for an open group.
194 */
195 cur = first;
196 while (cur != end) {
197 struct devres_node *node;
198 struct devres_group *grp;
199
200 node = list_entry(cur, struct devres_node, entry);
201 cur = cur->next;
202
203 grp = node_to_group(node);
204 BUG_ON(!grp || list_empty(&grp->node[0].entry));
205
206 grp->color++;
207 if (list_empty(&grp->node[1].entry))
208 grp->color++;
209
210 BUG_ON(grp->color <= 0 || grp->color > 2);
211 if (grp->color == 2) {
212 /* No need to update cur or end. The removed
213 * nodes are always before both.
214 */
215 list_move_tail(&grp->node[0].entry, todo);
216 list_del_init(&grp->node[1].entry);
217 }
218 }
219
220 return cnt;
221}
222
223static void release_nodes(struct device *dev, struct list_head *todo)
224{
225 struct devres *dr, *tmp;
226
227 /* Release. Note that both devres and devres_group are
228 * handled as devres in the following loop. This is safe.
229 */
230 list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry, struct devres) {
231 devres_log(dev, &dr->node, "REL");
232 dr->node.release(dev, dr->data);
233 kfree(dr);
234 }
235}
236
237/**
238 * devres_release_all - Release all managed resources
239 * @dev: Device to release resources for
240 *
241 * Release all resources associated with @dev. This function is
242 * called on driver detach.
243 */
244int devres_release_all(struct device *dev)
245{
246 unsigned long flags;
247 struct list_head todo;
248 int cnt;
249
250 /* Looks like an uninitialized device structure */
251 if (WARN_ON(dev->devres_head.next == NULL))
252 return -ENODEV;
253
254 /* Nothing to release if list is empty */
255 if (list_empty(&dev->devres_head))
256 return 0;
257
258 spin_lock_irqsave(&dev->devres_lock, flags);
259 cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
260 spin_unlock_irqrestore(&dev->devres_lock, flags);
261
262 release_nodes(dev, &todo);
263 return cnt;
264}
265
266/**
267 * devres_open_group - Open a new devres group
268 * @dev: Device to open devres group for
269 * @id: Separator ID
270 * @gfp: Allocation flags
271 *
272 * Open a new devres group for @dev with @id. For @id, using a
273 * pointer to an object which won't be used for another group is
274 * recommended. If @id is NULL, address-wise unique ID is created.
275 *
276 * RETURNS:
277 * ID of the new group, NULL on failure.
278 */
279void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
280{
281 struct devres_group *grp;
282 unsigned long flags;
283
284 grp = kmalloc(sizeof(*grp), gfp);
285 if (unlikely(!grp))
286 return NULL;
287
288 grp->node[0].release = &group_open_release;
289 grp->node[1].release = &group_close_release;
290 INIT_LIST_HEAD(&grp->node[0].entry);
291 INIT_LIST_HEAD(&grp->node[1].entry);
292 set_node_dbginfo(&grp->node[0], "grp<", 0);
293 set_node_dbginfo(&grp->node[1], "grp>", 0);
294 grp->id = grp;
295 if (id)
296 grp->id = id;
297
298 spin_lock_irqsave(&dev->devres_lock, flags);
299 add_dr(dev, &grp->node[0]);
300 spin_unlock_irqrestore(&dev->devres_lock, flags);
301 return grp->id;
302}
303EXPORT_SYMBOL_GPL(devres_open_group);
304
305/* Find devres group with ID @id. If @id is NULL, look for the latest. */
306static struct devres_group * find_group(struct device *dev, void *id)
307{
308 struct devres_node *node;
309
310 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
311 struct devres_group *grp;
312
313 if (node->release != &group_open_release)
314 continue;
315
316 grp = container_of(node, struct devres_group, node[0]);
317
318 if (id) {
319 if (grp->id == id)
320 return grp;
321 } else if (list_empty(&grp->node[1].entry))
322 return grp;
323 }
324
325 return NULL;
326}
327
328/**
329 * devres_release_group - Release resources in a devres group
330 * @dev: Device to release group for
331 * @id: ID of target group, can be NULL
332 *
333 * Release all resources in the group identified by @id. If @id is
334 * NULL, the latest open group is selected. The selected group and
335 * groups properly nested inside the selected group are removed.
336 *
337 * RETURNS:
338 * The number of released non-group resources.
339 */
340int devres_release_group(struct device *dev, void *id)
341{
342 struct devres_group *grp;
343 unsigned long flags;
344 struct list_head todo;
345 int cnt = 0;
346
347 spin_lock_irqsave(&dev->devres_lock, flags);
348
349 grp = find_group(dev, id);
350 if (grp) {
351 struct list_head *first = &grp->node[0].entry;
352 struct list_head *end = &dev->devres_head;
353
354 if (!list_empty(&grp->node[1].entry))
355 end = grp->node[1].entry.next;
356
357 cnt = remove_nodes(dev, first, end, &todo);
358 spin_unlock_irqrestore(&dev->devres_lock, flags);
359
360 release_nodes(dev, &todo);
361 } else {
362 WARN_ON(1);
363 spin_unlock_irqrestore(&dev->devres_lock, flags);
364 }
365
366 return cnt;
367}
368EXPORT_SYMBOL_GPL(devres_release_group);
369
370static struct devres *find_dr(struct device *dev, dr_release_t release,
371 dr_match_t match, void *match_data)
372{
373 struct devres_node *node;
374
375 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
376 struct devres *dr = container_of(node, struct devres, node);
377
378 if (node->release != release)
379 continue;
380 if (match && !match(dev, dr->data, match_data))
381 continue;
382 return dr;
383 }
384
385 return NULL;
386}
387
388/**
389 * devres_find - Find device resource
390 * @dev: Device to lookup resource from
391 * @release: Look for resources associated with this release function
392 * @match: Match function (optional)
393 * @match_data: Data for the match function
394 *
395 * Find the latest devres of @dev which is associated with @release
396 * and for which @match returns 1. If @match is NULL, it's considered
397 * to match all.
398 *
399 * RETURNS:
400 * Pointer to found devres, NULL if not found.
401 */
402void * devres_find(struct device *dev, dr_release_t release,
403 dr_match_t match, void *match_data)
404{
405 struct devres *dr;
406 unsigned long flags;
407
408 spin_lock_irqsave(&dev->devres_lock, flags);
409 dr = find_dr(dev, release, match, match_data);
410 spin_unlock_irqrestore(&dev->devres_lock, flags);
411
412 if (dr)
413 return dr->data;
414 return NULL;
415}
416
417/**
418 * devres_get - Find devres, if non-existent, add one atomically
419 * @dev: Device to lookup or add devres for
420 * @new_res: Pointer to new initialized devres to add if not found
421 * @match: Match function (optional)
422 * @match_data: Data for the match function
423 *
424 * Find the latest devres of @dev which has the same release function
425 * as @new_res and for which @match return 1. If found, @new_res is
426 * freed; otherwise, @new_res is added atomically.
427 *
428 * RETURNS:
429 * Pointer to found or added devres.
430 */
431void * devres_get(struct device *dev, void *new_res,
432 dr_match_t match, void *match_data)
433{
434 struct devres *new_dr = container_of(new_res, struct devres, data);
435 struct devres *dr;
436 unsigned long flags;
437
438 spin_lock_irqsave(&dev->devres_lock, flags);
439 dr = find_dr(dev, new_dr->node.release, match, match_data);
440 if (!dr) {
441 add_dr(dev, &new_dr->node);
442 dr = new_dr;
443 new_res = NULL;
444 }
445 spin_unlock_irqrestore(&dev->devres_lock, flags);
446 devres_free(new_res);
447
448 return dr->data;
449}
450EXPORT_SYMBOL_GPL(devres_get);
451
452/*
453 * Custom devres actions allow inserting a simple function call
454 * into the teadown sequence.
455 */
456
457struct action_devres {
458 void *data;
459 void (*action)(void *);
460};
461
462static void devm_action_release(struct device *dev, void *res)
463{
464 struct action_devres *devres = res;
465
466 devres->action(devres->data);
467}
468
469/**
470 * devm_add_action() - add a custom action to list of managed resources
471 * @dev: Device that owns the action
472 * @action: Function that should be called
473 * @data: Pointer to data passed to @action implementation
474 *
475 * This adds a custom action to the list of managed resources so that
476 * it gets executed as part of standard resource unwinding.
477 */
478int devm_add_action(struct device *dev, void (*action)(void *), void *data)
479{
480 struct action_devres *devres;
481
482 devres = devres_alloc(devm_action_release,
483 sizeof(struct action_devres), GFP_KERNEL);
484 if (!devres)
485 return -ENOMEM;
486
487 devres->data = data;
488 devres->action = action;
489
490 devres_add(dev, devres);
491
492 return 0;
493}
494
495/**
496 * devm_remove_action() - removes previously added custom action
497 * @dev: Device that owns the action
498 * @action: Function implementing the action
499 * @data: Pointer to data passed to @action implementation
500 *
501 * Removes instance of @action previously added by devm_add_action().
502 * Both action and data should match one of the existing entries.
503 */
504void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
505{
506}
507
508/*
509 * Managed kmalloc/kfree
510 */
511static void devm_kmalloc_release(struct device *dev, void *res)
512{
513 /* noop */
514}
515
516/**
517 * devm_kmalloc - Resource-managed kmalloc
518 * @dev: Device to allocate memory for
519 * @size: Allocation size
520 * @gfp: Allocation gfp flags
521 *
522 * Managed kmalloc. Memory allocated with this function is
523 * automatically freed on driver detach. Like all other devres
524 * resources, guaranteed alignment is unsigned long long.
525 *
526 * RETURNS:
527 * Pointer to allocated memory on success, NULL on failure.
528 */
529void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
530{
531 struct devres *dr;
532
533 if (unlikely(!size))
534 return ZERO_SIZE_PTR;
535
536 /* use raw alloc_dr for kmalloc caller tracing */
537 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
538 if (unlikely(!dr))
539 return NULL;
540
541 /*
542 * This is named devm_kzalloc_release for historical reasons
543 * The initial implementation did not support kmalloc, only kzalloc
544 */
545 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
546 devres_add(dev, dr->data);
547 return dr->data;
548}
549EXPORT_SYMBOL_GPL(devm_kmalloc);
550
551enum devm_ioremap_type {
552 DEVM_IOREMAP = 0,
553 DEVM_IOREMAP_UC,
554 DEVM_IOREMAP_WC,
555 DEVM_IOREMAP_NP,
556};
557
558void devm_ioremap_release(struct device *dev, void *res)
559{
560 iounmap(*(void __iomem **)res);
561}
562
563static void *__devm_ioremap(struct device *dev, resource_size_t offset,
564 resource_size_t size,
565 enum devm_ioremap_type type)
566{
567 void __iomem **ptr, *addr = NULL;
568
569 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
570 if (!ptr)
571 return NULL;
572
573 switch (type) {
574 case DEVM_IOREMAP:
575 addr = ioremap(offset, size);
576 break;
577#if 0
578 case DEVM_IOREMAP_UC:
579 addr = ioremap_uc(offset, size);
580 break;
581 case DEVM_IOREMAP_WC:
582 addr = ioremap_wc(offset, size);
583 break;
584 case DEVM_IOREMAP_NP:
585 addr = ioremap_np(offset, size);
586 break;
587#endif
588 }
589
590 if (addr) {
591 *ptr = addr;
592 devres_add(dev, ptr);
593 } else
594 devres_free(ptr);
595
596 return addr;
597}
598
599/**
600 * devm_ioremap - Managed ioremap()
601 * @dev: Generic device to remap IO address for
602 * @offset: Resource address to map
603 * @size: Size of map
604 *
605 * Managed ioremap(). Map is automatically unmapped on driver detach.
606 */
607void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
608 resource_size_t size)
609{
610 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
611}
612EXPORT_SYMBOL(devm_ioremap);
613
614/**
615 * devm_kvasprintf - Allocate resource managed space and format a string
616 * into that.
617 * @dev: Device to allocate memory for
618 * @gfp: the GFP mask used in the devm_kmalloc() call when
619 * allocating memory
620 * @fmt: The printf()-style format string
621 * @ap: Arguments for the format string
622 * RETURNS:
623 * Pointer to allocated string on success, NULL on failure.
624 */
625char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
626 va_list ap)
627{
628 unsigned int len;
629 char *p;
630 va_list aq;
631
632 va_copy(aq, ap);
633 len = vsnprintf(NULL, 0, fmt, aq);
634 va_end(aq);
635
636 p = devm_kmalloc(dev, len+1, gfp);
637 if (!p)
638 return NULL;
639
640 vsnprintf(p, len+1, fmt, ap);
641
642 return p;
643}
644EXPORT_SYMBOL(devm_kvasprintf);
645/**
646 * devm_kasprintf - Allocate resource managed space and format a string
647 * into that.
648 * @dev: Device to allocate memory for
649 * @gfp: the GFP mask used in the devm_kmalloc() call when
650 * allocating memory
651 * @fmt: The printf()-style format string
652 * @...: Arguments for the format string
653 * RETURNS:
654 * Pointer to allocated string on success, NULL on failure.
655 */
656char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
657{
658 va_list ap;
659 char *p;
660
661 va_start(ap, fmt);
662 p = devm_kvasprintf(dev, gfp, fmt, ap);
663 va_end(ap);
664
665 return p;
666}
667EXPORT_SYMBOL_GPL(devm_kasprintf);
668
669/*
670 * PCI iomap devres
671 */
672#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
673
674struct pcim_iomap_devres {
675 void __iomem *table[PCIM_IOMAP_MAX];
676};
677
678static void pcim_iomap_release(struct device *gendev, void *res)
679{
680 struct pci_dev *dev = to_pci_dev(gendev);
681 struct pcim_iomap_devres *this = res;
682 int i;
683
684 for (i = 0; i < PCIM_IOMAP_MAX; i++)
685 if (this->table[i])
686 pci_iounmap(dev, this->table[i]);
687}
688
689/**
690 * pcim_iomap_table - access iomap allocation table
691 * @pdev: PCI device to access iomap table for
692 *
693 * Access iomap allocation table for @dev. If iomap table doesn't
694 * exist and @pdev is managed, it will be allocated. All iomaps
695 * recorded in the iomap table are automatically unmapped on driver
696 * detach.
697 *
698 * This function might sleep when the table is first allocated but can
699 * be safely called without context and guaranteed to succeed once
700 * allocated.
701 */
702void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
703{
704 struct pcim_iomap_devres *dr, *new_dr;
705
706 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
707 if (dr)
708 return dr->table;
709
710 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
711 if (!new_dr)
712 return NULL;
713 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
714 return dr->table;
715}
716EXPORT_SYMBOL(pcim_iomap_table);
717
718/**
719 * pcim_iomap - Managed pcim_iomap()
720 * @pdev: PCI device to iomap for
721 * @bar: BAR to iomap
722 * @maxlen: Maximum length of iomap
723 *
724 * Managed pci_iomap(). Map is automatically unmapped on driver
725 * detach.
726 */
727void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
728{
729 void __iomem **tbl;
730
731 BUG_ON(bar >= PCIM_IOMAP_MAX);
732
733 tbl = (void __iomem **)pcim_iomap_table(pdev);
734 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
735 return NULL;
736
737 tbl[bar] = pci_iomap(pdev, bar, maxlen);
738 return tbl[bar];
739}
740EXPORT_SYMBOL(pcim_iomap);
741
742/**
743 * pcim_iounmap - Managed pci_iounmap()
744 * @pdev: PCI device to iounmap for
745 * @addr: Address to unmap
746 *
747 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
748 */
749void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
750{
751 void __iomem **tbl;
752 int i;
753
754 pci_iounmap(pdev, addr);
755
756 tbl = (void __iomem **)pcim_iomap_table(pdev);
757 BUG_ON(!tbl);
758
759 for (i = 0; i < PCIM_IOMAP_MAX; i++)
760 if (tbl[i] == addr) {
761 tbl[i] = NULL;
762 return;
763 }
764 WARN_ON(1);
765}
766EXPORT_SYMBOL(pcim_iounmap);
767
768/**
769 * pcim_iomap_regions - Request and iomap PCI BARs
770 * @pdev: PCI device to map IO resources for
771 * @mask: Mask of BARs to request and iomap
772 * @name: Name used when requesting regions
773 *
774 * Request and iomap regions specified by @mask.
775 */
776int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
777{
778 void __iomem * const *iomap;
779 int i, rc;
780
781 iomap = pcim_iomap_table(pdev);
782 if (!iomap)
783 return -ENOMEM;
784
785 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
786 unsigned long len;
787
788 if (!(mask & (1 << i)))
789 continue;
790
791 rc = -EINVAL;
792 len = pci_resource_len(pdev, i);
793 if (!len)
794 goto err_inval;
795
796 rc = pci_request_region(pdev, i, name);
797 if (rc)
798 goto err_inval;
799
800 rc = -ENOMEM;
801 if (!pcim_iomap(pdev, i, 0))
802 goto err_region;
803 }
804
805 return 0;
806
807 err_region:
808 pci_release_region(pdev, i);
809 err_inval:
810 while (--i >= 0) {
811 if (!(mask & (1 << i)))
812 continue;
813 pcim_iounmap(pdev, iomap[i]);
814 pci_release_region(pdev, i);
815 }
816
817 return rc;
818}
819EXPORT_SYMBOL(pcim_iomap_regions);
Note: See TracBrowser for help on using the repository browser.