source: GPL/branches/uniaud32-next/lib32/devres.c

Last change on this file was 745, checked in by Paul Smedley, 3 years ago

Add some additional functions to minimise upstream patches

File size: 17.4 KB
Line 
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/pci.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/gfp.h>
15#include <linux/errno.h>
16#include <asm/io.h>
17
18struct devres_node {
19 struct list_head entry;
20 dr_release_t release;
21 const char *name;
22 size_t size;
23};
24
25struct devres {
26 struct devres_node node;
27 /* -- 3 pointers */
28 u8 data[]; /* guarantee ull alignment */
29};
30
31struct devres_group {
32 struct devres_node node[2];
33 void *id;
34 int color;
35 /* -- 8 pointers */
36};
37
38static void set_node_dbginfo(struct devres_node *node, const char *name,
39 size_t size)
40{
41 node->name = name;
42 node->size = size;
43}
44
45#define devres_log(dev, node, op) do {} while (0)
46
47/*
48 * Release functions for devres group. These callbacks are used only
49 * for identification.
50 */
51static void group_open_release(struct device *dev, void *res)
52{
53 /* noop */
54}
55
56static void group_close_release(struct device *dev, void *res)
57{
58 /* noop */
59}
60
61static struct devres_group * node_to_group(struct devres_node *node)
62{
63 if (node->release == &group_open_release)
64 return container_of(node, struct devres_group, node[0]);
65 if (node->release == &group_close_release)
66 return container_of(node, struct devres_group, node[1]);
67 return NULL;
68}
69
70static inline struct devres * alloc_dr(dr_release_t release,
71 size_t size, gfp_t gfp, int nid)
72{
73 size_t tot_size = sizeof(struct devres) + size;
74 struct devres *dr;
75
76 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
77
78 memset(dr, 0, offsetof(struct devres, data));
79
80 INIT_LIST_HEAD(&dr->node.entry);
81 dr->node.release = release;
82 return dr;
83}
84
85static void add_dr(struct device *dev, struct devres_node *node)
86{
87 devres_log(dev, node, "ADD");
88 BUG_ON(!list_empty(&node->entry));
89 list_add_tail(&node->entry, &dev->devres_head);
90}
91
92/**
93 * devres_add - Register device resource
94 * @dev: Device to add resource to
95 * @res: Resource to register
96 *
97 * Register devres @res to @dev. @res should have been allocated
98 * using devres_alloc(). On driver detach, the associated release
99 * function will be invoked and devres will be freed automatically.
100 */
101void devres_add(struct device *dev, void *res)
102{
103 struct devres *dr = container_of(res, struct devres, data);
104 unsigned long flags;
105
106 spin_lock_irqsave(&dev->devres_lock, flags);
107 add_dr(dev, &dr->node);
108 spin_unlock_irqrestore(&dev->devres_lock, flags);
109}
110
111/**
112 * __devres_alloc_node - Allocate device resource data
113 * @release: Release function devres will be associated with
114 * @size: Allocation size
115 * @gfp: Allocation flags
116 * @nid: NUMA node
117 * @name: Name of the resource
118 *
119 * Allocate devres of @size bytes. The allocated area is zeroed, then
120 * associated with @release. The returned pointer can be passed to
121 * other devres_*() functions.
122 *
123 * RETURNS:
124 * Pointer to allocated devres on success, NULL on failure.
125 */
126void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
127 const char *name)
128{
129 struct devres *dr;
130
131 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
132 if (unlikely(!dr))
133 return NULL;
134 set_node_dbginfo(&dr->node, name, size);
135 return dr->data;
136}
137
138/**
139 * devres_free - Free device resource data
140 * @res: Pointer to devres data to free
141 *
142 * Free devres created with devres_alloc().
143 */
144void devres_free(void *res)
145{
146 if (res) {
147 struct devres *dr = container_of(res, struct devres, data);
148
149 BUG_ON(!list_empty(&dr->node.entry));
150 kfree(dr);
151 }
152}
153
154static int remove_nodes(struct device *dev,
155 struct list_head *first, struct list_head *end,
156 struct list_head *todo)
157{
158 int cnt = 0, nr_groups = 0;
159 struct list_head *cur;
160
161 /* First pass - move normal devres entries to @todo and clear
162 * devres_group colors.
163 */
164 cur = first;
165 while (cur != end) {
166 struct devres_node *node;
167 struct devres_group *grp;
168
169 node = list_entry(cur, struct devres_node, entry);
170 cur = cur->next;
171
172 grp = node_to_group(node);
173 if (grp) {
174 /* clear color of group markers in the first pass */
175 grp->color = 0;
176 nr_groups++;
177 } else {
178 /* regular devres entry */
179 if (&node->entry == first)
180 first = first->next;
181 list_move_tail(&node->entry, todo);
182 cnt++;
183 }
184 }
185
186 if (!nr_groups)
187 return cnt;
188
189 /* Second pass - Scan groups and color them. A group gets
190 * color value of two iff the group is wholly contained in
191 * [cur, end). That is, for a closed group, both opening and
192 * closing markers should be in the range, while just the
193 * opening marker is enough for an open group.
194 */
195 cur = first;
196 while (cur != end) {
197 struct devres_node *node;
198 struct devres_group *grp;
199
200 node = list_entry(cur, struct devres_node, entry);
201 cur = cur->next;
202
203 grp = node_to_group(node);
204 BUG_ON(!grp || list_empty(&grp->node[0].entry));
205
206 grp->color++;
207 if (list_empty(&grp->node[1].entry))
208 grp->color++;
209
210 BUG_ON(grp->color <= 0 || grp->color > 2);
211 if (grp->color == 2) {
212 /* No need to update cur or end. The removed
213 * nodes are always before both.
214 */
215 list_move_tail(&grp->node[0].entry, todo);
216 list_del_init(&grp->node[1].entry);
217 }
218 }
219
220 return cnt;
221}
222
223static int release_nodes(struct device *dev, struct list_head *first,
224 struct list_head *end, unsigned long flags)
225{
226// LIST_HEAD(todo);
227 struct list_head todo;
228
229 int cnt;
230 struct devres *dr, *tmp;
231
232 cnt = remove_nodes(dev, first, end, &todo);
233
234 spin_unlock_irqrestore(&dev->devres_lock, flags);
235
236 /* Release. Note that both devres and devres_group are
237 * handled as devres in the following loop. This is safe.
238 */
239 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry, struct devres) {
240 devres_log(dev, &dr->node, "REL");
241 dr->node.release(dev, dr->data);
242 kfree(dr);
243 }
244
245 return cnt;
246}
247
248/**
249 * devres_release_all - Release all managed resources
250 * @dev: Device to release resources for
251 *
252 * Release all resources associated with @dev. This function is
253 * called on driver detach.
254 */
255int devres_release_all(struct device *dev)
256{
257 unsigned long flags;
258
259 /* Looks like an uninitialized device structure */
260 if (WARN_ON(dev->devres_head.next == NULL))
261 return -ENODEV;
262 spin_lock_irqsave(&dev->devres_lock, flags);
263 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
264 flags);
265}
266
267static struct devres *find_dr(struct device *dev, dr_release_t release,
268 dr_match_t match, void *match_data)
269{
270 struct devres_node *node;
271
272 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
273 struct devres *dr = container_of(node, struct devres, node);
274
275 if (node->release != release)
276 continue;
277 if (match && !match(dev, dr->data, match_data))
278 continue;
279 return dr;
280 }
281
282 return NULL;
283}
284
285/**
286 * devres_find - Find device resource
287 * @dev: Device to lookup resource from
288 * @release: Look for resources associated with this release function
289 * @match: Match function (optional)
290 * @match_data: Data for the match function
291 *
292 * Find the latest devres of @dev which is associated with @release
293 * and for which @match returns 1. If @match is NULL, it's considered
294 * to match all.
295 *
296 * RETURNS:
297 * Pointer to found devres, NULL if not found.
298 */
299void * devres_find(struct device *dev, dr_release_t release,
300 dr_match_t match, void *match_data)
301{
302 struct devres *dr;
303 unsigned long flags;
304
305 spin_lock_irqsave(&dev->devres_lock, flags);
306 dr = find_dr(dev, release, match, match_data);
307 spin_unlock_irqrestore(&dev->devres_lock, flags);
308
309 if (dr)
310 return dr->data;
311 return NULL;
312}
313
314/**
315 * devres_get - Find devres, if non-existent, add one atomically
316 * @dev: Device to lookup or add devres for
317 * @new_res: Pointer to new initialized devres to add if not found
318 * @match: Match function (optional)
319 * @match_data: Data for the match function
320 *
321 * Find the latest devres of @dev which has the same release function
322 * as @new_res and for which @match return 1. If found, @new_res is
323 * freed; otherwise, @new_res is added atomically.
324 *
325 * RETURNS:
326 * Pointer to found or added devres.
327 */
328void * devres_get(struct device *dev, void *new_res,
329 dr_match_t match, void *match_data)
330{
331 struct devres *new_dr = container_of(new_res, struct devres, data);
332 struct devres *dr;
333 unsigned long flags;
334
335 spin_lock_irqsave(&dev->devres_lock, flags);
336 dr = find_dr(dev, new_dr->node.release, match, match_data);
337 if (!dr) {
338 add_dr(dev, &new_dr->node);
339 dr = new_dr;
340 new_res = NULL;
341 }
342 spin_unlock_irqrestore(&dev->devres_lock, flags);
343 devres_free(new_res);
344
345 return dr->data;
346}
347EXPORT_SYMBOL_GPL(devres_get);
348
349/*
350 * Custom devres actions allow inserting a simple function call
351 * into the teadown sequence.
352 */
353
354struct action_devres {
355 void *data;
356 void (*action)(void *);
357};
358
359static void devm_action_release(struct device *dev, void *res)
360{
361 struct action_devres *devres = res;
362
363 devres->action(devres->data);
364}
365
366/**
367 * devm_add_action() - add a custom action to list of managed resources
368 * @dev: Device that owns the action
369 * @action: Function that should be called
370 * @data: Pointer to data passed to @action implementation
371 *
372 * This adds a custom action to the list of managed resources so that
373 * it gets executed as part of standard resource unwinding.
374 */
375int devm_add_action(struct device *dev, void (*action)(void *), void *data)
376{
377 struct action_devres *devres;
378
379 devres = devres_alloc(devm_action_release,
380 sizeof(struct action_devres), GFP_KERNEL);
381 if (!devres)
382 return -ENOMEM;
383
384 devres->data = data;
385 devres->action = action;
386
387 devres_add(dev, devres);
388
389 return 0;
390}
391
392/**
393 * devm_remove_action() - removes previously added custom action
394 * @dev: Device that owns the action
395 * @action: Function implementing the action
396 * @data: Pointer to data passed to @action implementation
397 *
398 * Removes instance of @action previously added by devm_add_action().
399 * Both action and data should match one of the existing entries.
400 */
401void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
402{
403}
404
405/*
406 * Managed kmalloc/kfree
407 */
408static void devm_kmalloc_release(struct device *dev, void *res)
409{
410 /* noop */
411}
412
413/**
414 * devm_kmalloc - Resource-managed kmalloc
415 * @dev: Device to allocate memory for
416 * @size: Allocation size
417 * @gfp: Allocation gfp flags
418 *
419 * Managed kmalloc. Memory allocated with this function is
420 * automatically freed on driver detach. Like all other devres
421 * resources, guaranteed alignment is unsigned long long.
422 *
423 * RETURNS:
424 * Pointer to allocated memory on success, NULL on failure.
425 */
426void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
427{
428 struct devres *dr;
429
430 if (unlikely(!size))
431 return ZERO_SIZE_PTR;
432
433 /* use raw alloc_dr for kmalloc caller tracing */
434 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
435 if (unlikely(!dr))
436 return NULL;
437
438 /*
439 * This is named devm_kzalloc_release for historical reasons
440 * The initial implementation did not support kmalloc, only kzalloc
441 */
442 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
443 devres_add(dev, dr->data);
444 return dr->data;
445}
446EXPORT_SYMBOL_GPL(devm_kmalloc);
447
448enum devm_ioremap_type {
449 DEVM_IOREMAP = 0,
450 DEVM_IOREMAP_UC,
451 DEVM_IOREMAP_WC,
452 DEVM_IOREMAP_NP,
453};
454
455void devm_ioremap_release(struct device *dev, void *res)
456{
457 iounmap(*(void __iomem **)res);
458}
459
460static void *__devm_ioremap(struct device *dev, resource_size_t offset,
461 resource_size_t size,
462 enum devm_ioremap_type type)
463{
464 void __iomem **ptr, *addr = NULL;
465
466 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
467 if (!ptr)
468 return NULL;
469
470 switch (type) {
471 case DEVM_IOREMAP:
472 addr = ioremap(offset, size);
473 break;
474#if 0
475 case DEVM_IOREMAP_UC:
476 addr = ioremap_uc(offset, size);
477 break;
478 case DEVM_IOREMAP_WC:
479 addr = ioremap_wc(offset, size);
480 break;
481 case DEVM_IOREMAP_NP:
482 addr = ioremap_np(offset, size);
483 break;
484#endif
485 }
486
487 if (addr) {
488 *ptr = addr;
489 devres_add(dev, ptr);
490 } else
491 devres_free(ptr);
492
493 return addr;
494}
495
496/**
497 * devm_ioremap - Managed ioremap()
498 * @dev: Generic device to remap IO address for
499 * @offset: Resource address to map
500 * @size: Size of map
501 *
502 * Managed ioremap(). Map is automatically unmapped on driver detach.
503 */
504void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
505 resource_size_t size)
506{
507 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
508}
509EXPORT_SYMBOL(devm_ioremap);
510
511/**
512 * devm_kvasprintf - Allocate resource managed space and format a string
513 * into that.
514 * @dev: Device to allocate memory for
515 * @gfp: the GFP mask used in the devm_kmalloc() call when
516 * allocating memory
517 * @fmt: The printf()-style format string
518 * @ap: Arguments for the format string
519 * RETURNS:
520 * Pointer to allocated string on success, NULL on failure.
521 */
522char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
523 va_list ap)
524{
525 unsigned int len;
526 char *p;
527 va_list aq;
528
529 va_copy(aq, ap);
530 len = vsnprintf(NULL, 0, fmt, aq);
531 va_end(aq);
532
533 p = devm_kmalloc(dev, len+1, gfp);
534 if (!p)
535 return NULL;
536
537 vsnprintf(p, len+1, fmt, ap);
538
539 return p;
540}
541EXPORT_SYMBOL(devm_kvasprintf);
542/**
543 * devm_kasprintf - Allocate resource managed space and format a string
544 * into that.
545 * @dev: Device to allocate memory for
546 * @gfp: the GFP mask used in the devm_kmalloc() call when
547 * allocating memory
548 * @fmt: The printf()-style format string
549 * @...: Arguments for the format string
550 * RETURNS:
551 * Pointer to allocated string on success, NULL on failure.
552 */
553char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
554{
555 va_list ap;
556 char *p;
557
558 va_start(ap, fmt);
559 p = devm_kvasprintf(dev, gfp, fmt, ap);
560 va_end(ap);
561
562 return p;
563}
564EXPORT_SYMBOL_GPL(devm_kasprintf);
565
566/*
567 * PCI iomap devres
568 */
569#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
570
571struct pcim_iomap_devres {
572 void __iomem *table[PCIM_IOMAP_MAX];
573};
574
575static void pcim_iomap_release(struct device *gendev, void *res)
576{
577 struct pci_dev *dev = to_pci_dev(gendev);
578 struct pcim_iomap_devres *this = res;
579 int i;
580
581 for (i = 0; i < PCIM_IOMAP_MAX; i++)
582 if (this->table[i])
583 pci_iounmap(dev, this->table[i]);
584}
585
586/**
587 * pcim_iomap_table - access iomap allocation table
588 * @pdev: PCI device to access iomap table for
589 *
590 * Access iomap allocation table for @dev. If iomap table doesn't
591 * exist and @pdev is managed, it will be allocated. All iomaps
592 * recorded in the iomap table are automatically unmapped on driver
593 * detach.
594 *
595 * This function might sleep when the table is first allocated but can
596 * be safely called without context and guaranteed to succeed once
597 * allocated.
598 */
599void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
600{
601 struct pcim_iomap_devres *dr, *new_dr;
602
603 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
604 if (dr)
605 return dr->table;
606
607 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
608 if (!new_dr)
609 return NULL;
610 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
611 return dr->table;
612}
613EXPORT_SYMBOL(pcim_iomap_table);
614
615/**
616 * pcim_iomap - Managed pcim_iomap()
617 * @pdev: PCI device to iomap for
618 * @bar: BAR to iomap
619 * @maxlen: Maximum length of iomap
620 *
621 * Managed pci_iomap(). Map is automatically unmapped on driver
622 * detach.
623 */
624void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
625{
626 void __iomem **tbl;
627
628 BUG_ON(bar >= PCIM_IOMAP_MAX);
629
630 tbl = (void __iomem **)pcim_iomap_table(pdev);
631 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
632 return NULL;
633
634 tbl[bar] = pci_iomap(pdev, bar, maxlen);
635 return tbl[bar];
636}
637EXPORT_SYMBOL(pcim_iomap);
638
639/**
640 * pcim_iounmap - Managed pci_iounmap()
641 * @pdev: PCI device to iounmap for
642 * @addr: Address to unmap
643 *
644 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
645 */
646void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
647{
648 void __iomem **tbl;
649 int i;
650
651 pci_iounmap(pdev, addr);
652
653 tbl = (void __iomem **)pcim_iomap_table(pdev);
654 BUG_ON(!tbl);
655
656 for (i = 0; i < PCIM_IOMAP_MAX; i++)
657 if (tbl[i] == addr) {
658 tbl[i] = NULL;
659 return;
660 }
661 WARN_ON(1);
662}
663EXPORT_SYMBOL(pcim_iounmap);
664
665/**
666 * pcim_iomap_regions - Request and iomap PCI BARs
667 * @pdev: PCI device to map IO resources for
668 * @mask: Mask of BARs to request and iomap
669 * @name: Name used when requesting regions
670 *
671 * Request and iomap regions specified by @mask.
672 */
673int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
674{
675 void __iomem * const *iomap;
676 int i, rc;
677
678 iomap = pcim_iomap_table(pdev);
679 if (!iomap)
680 return -ENOMEM;
681
682 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
683 unsigned long len;
684
685 if (!(mask & (1 << i)))
686 continue;
687
688 rc = -EINVAL;
689 len = pci_resource_len(pdev, i);
690 if (!len)
691 goto err_inval;
692
693 rc = pci_request_region(pdev, i, name);
694 if (rc)
695 goto err_inval;
696
697 rc = -ENOMEM;
698 if (!pcim_iomap(pdev, i, 0))
699 goto err_region;
700 }
701
702 return 0;
703
704 err_region:
705 pci_release_region(pdev, i);
706 err_inval:
707 while (--i >= 0) {
708 if (!(mask & (1 << i)))
709 continue;
710 pcim_iounmap(pdev, iomap[i]);
711 pci_release_region(pdev, i);
712 }
713
714 return rc;
715}
716EXPORT_SYMBOL(pcim_iomap_regions);
Note: See TracBrowser for help on using the repository browser.