source: GPL/branches/uniaud32-exp/lib32/devres.c@ 737

Last change on this file since 737 was 737, checked in by Paul Smedley, 3 years ago

Initial commit of 5.17.15

File size: 12.9 KB
Line 
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/gfp.h>
14#include <linux/errno.h>
15#include <asm/io.h>
16
17struct devres_node {
18 struct list_head entry;
19 dr_release_t release;
20 const char *name;
21 size_t size;
22};
23
24struct devres {
25 struct devres_node node;
26 /* -- 3 pointers */
27 u8 data[]; /* guarantee ull alignment */
28};
29
30struct devres_group {
31 struct devres_node node[2];
32 void *id;
33 int color;
34 /* -- 8 pointers */
35};
36
37static void set_node_dbginfo(struct devres_node *node, const char *name,
38 size_t size)
39{
40 node->name = name;
41 node->size = size;
42}
43
44#define devres_log(dev, node, op) do {} while (0)
45
46/*
47 * Release functions for devres group. These callbacks are used only
48 * for identification.
49 */
50static void group_open_release(struct device *dev, void *res)
51{
52 /* noop */
53}
54
55static void group_close_release(struct device *dev, void *res)
56{
57 /* noop */
58}
59
60static struct devres_group * node_to_group(struct devres_node *node)
61{
62 if (node->release == &group_open_release)
63 return container_of(node, struct devres_group, node[0]);
64 if (node->release == &group_close_release)
65 return container_of(node, struct devres_group, node[1]);
66 return NULL;
67}
68
69static inline struct devres * alloc_dr(dr_release_t release,
70 size_t size, gfp_t gfp, int nid)
71{
72 size_t tot_size = sizeof(struct devres) + size;
73 struct devres *dr;
74
75 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
76
77 memset(dr, 0, offsetof(struct devres, data));
78
79 INIT_LIST_HEAD(&dr->node.entry);
80 dr->node.release = release;
81 return dr;
82}
83
84static void add_dr(struct device *dev, struct devres_node *node)
85{
86 devres_log(dev, node, "ADD");
87 BUG_ON(!list_empty(&node->entry));
88 list_add_tail(&node->entry, &dev->devres_head);
89}
90
91/**
92 * devres_add - Register device resource
93 * @dev: Device to add resource to
94 * @res: Resource to register
95 *
96 * Register devres @res to @dev. @res should have been allocated
97 * using devres_alloc(). On driver detach, the associated release
98 * function will be invoked and devres will be freed automatically.
99 */
100void devres_add(struct device *dev, void *res)
101{
102 struct devres *dr = container_of(res, struct devres, data);
103 unsigned long flags;
104
105 spin_lock_irqsave(&dev->devres_lock, flags);
106 add_dr(dev, &dr->node);
107 spin_unlock_irqrestore(&dev->devres_lock, flags);
108}
109
110/**
111 * __devres_alloc_node - Allocate device resource data
112 * @release: Release function devres will be associated with
113 * @size: Allocation size
114 * @gfp: Allocation flags
115 * @nid: NUMA node
116 * @name: Name of the resource
117 *
118 * Allocate devres of @size bytes. The allocated area is zeroed, then
119 * associated with @release. The returned pointer can be passed to
120 * other devres_*() functions.
121 *
122 * RETURNS:
123 * Pointer to allocated devres on success, NULL on failure.
124 */
125void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
126 const char *name)
127{
128 struct devres *dr;
129
130 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
131 if (unlikely(!dr))
132 return NULL;
133 set_node_dbginfo(&dr->node, name, size);
134 return dr->data;
135}
136
137/**
138 * devres_free - Free device resource data
139 * @res: Pointer to devres data to free
140 *
141 * Free devres created with devres_alloc().
142 */
143void devres_free(void *res)
144{
145 if (res) {
146 struct devres *dr = container_of(res, struct devres, data);
147
148 BUG_ON(!list_empty(&dr->node.entry));
149 kfree(dr);
150 }
151}
152
153static int remove_nodes(struct device *dev,
154 struct list_head *first, struct list_head *end,
155 struct list_head *todo)
156{
157 int cnt = 0, nr_groups = 0;
158 struct list_head *cur;
159
160 /* First pass - move normal devres entries to @todo and clear
161 * devres_group colors.
162 */
163 cur = first;
164 while (cur != end) {
165 struct devres_node *node;
166 struct devres_group *grp;
167
168 node = list_entry(cur, struct devres_node, entry);
169 cur = cur->next;
170
171 grp = node_to_group(node);
172 if (grp) {
173 /* clear color of group markers in the first pass */
174 grp->color = 0;
175 nr_groups++;
176 } else {
177 /* regular devres entry */
178 if (&node->entry == first)
179 first = first->next;
180 list_move_tail(&node->entry, todo);
181 cnt++;
182 }
183 }
184
185 if (!nr_groups)
186 return cnt;
187
188 /* Second pass - Scan groups and color them. A group gets
189 * color value of two iff the group is wholly contained in
190 * [cur, end). That is, for a closed group, both opening and
191 * closing markers should be in the range, while just the
192 * opening marker is enough for an open group.
193 */
194 cur = first;
195 while (cur != end) {
196 struct devres_node *node;
197 struct devres_group *grp;
198
199 node = list_entry(cur, struct devres_node, entry);
200 cur = cur->next;
201
202 grp = node_to_group(node);
203 BUG_ON(!grp || list_empty(&grp->node[0].entry));
204
205 grp->color++;
206 if (list_empty(&grp->node[1].entry))
207 grp->color++;
208
209 BUG_ON(grp->color <= 0 || grp->color > 2);
210 if (grp->color == 2) {
211 /* No need to update cur or end. The removed
212 * nodes are always before both.
213 */
214 list_move_tail(&grp->node[0].entry, todo);
215 list_del_init(&grp->node[1].entry);
216 }
217 }
218
219 return cnt;
220}
221
222static int release_nodes(struct device *dev, struct list_head *first,
223 struct list_head *end, unsigned long flags)
224{
225// LIST_HEAD(todo);
226 struct list_head todo;
227
228 int cnt;
229 struct devres *dr, *tmp;
230
231 cnt = remove_nodes(dev, first, end, &todo);
232
233 spin_unlock_irqrestore(&dev->devres_lock, flags);
234
235 /* Release. Note that both devres and devres_group are
236 * handled as devres in the following loop. This is safe.
237 */
238 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry, struct devres) {
239 devres_log(dev, &dr->node, "REL");
240 dr->node.release(dev, dr->data);
241 kfree(dr);
242 }
243
244 return cnt;
245}
246
247/**
248 * devres_release_all - Release all managed resources
249 * @dev: Device to release resources for
250 *
251 * Release all resources associated with @dev. This function is
252 * called on driver detach.
253 */
254int devres_release_all(struct device *dev)
255{
256 unsigned long flags;
257
258 /* Looks like an uninitialized device structure */
259 if (WARN_ON(dev->devres_head.next == NULL))
260 return -ENODEV;
261 spin_lock_irqsave(&dev->devres_lock, flags);
262 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
263 flags);
264}
265
266static struct devres *find_dr(struct device *dev, dr_release_t release,
267 dr_match_t match, void *match_data)
268{
269 struct devres_node *node;
270
271 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
272 struct devres *dr = container_of(node, struct devres, node);
273
274 if (node->release != release)
275 continue;
276 if (match && !match(dev, dr->data, match_data))
277 continue;
278 return dr;
279 }
280
281 return NULL;
282}
283
284/**
285 * devres_find - Find device resource
286 * @dev: Device to lookup resource from
287 * @release: Look for resources associated with this release function
288 * @match: Match function (optional)
289 * @match_data: Data for the match function
290 *
291 * Find the latest devres of @dev which is associated with @release
292 * and for which @match returns 1. If @match is NULL, it's considered
293 * to match all.
294 *
295 * RETURNS:
296 * Pointer to found devres, NULL if not found.
297 */
298void * devres_find(struct device *dev, dr_release_t release,
299 dr_match_t match, void *match_data)
300{
301 struct devres *dr;
302 unsigned long flags;
303
304 spin_lock_irqsave(&dev->devres_lock, flags);
305 dr = find_dr(dev, release, match, match_data);
306 spin_unlock_irqrestore(&dev->devres_lock, flags);
307
308 if (dr)
309 return dr->data;
310 return NULL;
311}
312
313/*
314 * Custom devres actions allow inserting a simple function call
315 * into the teadown sequence.
316 */
317
318struct action_devres {
319 void *data;
320 void (*action)(void *);
321};
322
323static void devm_action_release(struct device *dev, void *res)
324{
325 struct action_devres *devres = res;
326
327 devres->action(devres->data);
328}
329
330/**
331 * devm_add_action() - add a custom action to list of managed resources
332 * @dev: Device that owns the action
333 * @action: Function that should be called
334 * @data: Pointer to data passed to @action implementation
335 *
336 * This adds a custom action to the list of managed resources so that
337 * it gets executed as part of standard resource unwinding.
338 */
339int devm_add_action(struct device *dev, void (*action)(void *), void *data)
340{
341 struct action_devres *devres;
342
343 devres = devres_alloc(devm_action_release,
344 sizeof(struct action_devres), GFP_KERNEL);
345 if (!devres)
346 return -ENOMEM;
347
348 devres->data = data;
349 devres->action = action;
350
351 devres_add(dev, devres);
352
353 return 0;
354}
355
356/**
357 * devm_remove_action() - removes previously added custom action
358 * @dev: Device that owns the action
359 * @action: Function implementing the action
360 * @data: Pointer to data passed to @action implementation
361 *
362 * Removes instance of @action previously added by devm_add_action().
363 * Both action and data should match one of the existing entries.
364 */
365void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
366{
367}
368
369/*
370 * Managed kmalloc/kfree
371 */
372static void devm_kmalloc_release(struct device *dev, void *res)
373{
374 /* noop */
375}
376
377/**
378 * devm_kmalloc - Resource-managed kmalloc
379 * @dev: Device to allocate memory for
380 * @size: Allocation size
381 * @gfp: Allocation gfp flags
382 *
383 * Managed kmalloc. Memory allocated with this function is
384 * automatically freed on driver detach. Like all other devres
385 * resources, guaranteed alignment is unsigned long long.
386 *
387 * RETURNS:
388 * Pointer to allocated memory on success, NULL on failure.
389 */
390void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
391{
392 struct devres *dr;
393
394 if (unlikely(!size))
395 return ZERO_SIZE_PTR;
396
397 /* use raw alloc_dr for kmalloc caller tracing */
398 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
399 if (unlikely(!dr))
400 return NULL;
401
402 /*
403 * This is named devm_kzalloc_release for historical reasons
404 * The initial implementation did not support kmalloc, only kzalloc
405 */
406 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
407 devres_add(dev, dr->data);
408 return dr->data;
409}
410EXPORT_SYMBOL_GPL(devm_kmalloc);
411
412enum devm_ioremap_type {
413 DEVM_IOREMAP = 0,
414 DEVM_IOREMAP_UC,
415 DEVM_IOREMAP_WC,
416 DEVM_IOREMAP_NP,
417};
418
419void devm_ioremap_release(struct device *dev, void *res)
420{
421 iounmap(*(void __iomem **)res);
422}
423
424static void *__devm_ioremap(struct device *dev, resource_size_t offset,
425 resource_size_t size,
426 enum devm_ioremap_type type)
427{
428 void __iomem **ptr, *addr = NULL;
429
430 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
431 if (!ptr)
432 return NULL;
433
434 switch (type) {
435 case DEVM_IOREMAP:
436 addr = ioremap(offset, size);
437 break;
438#if 0
439 case DEVM_IOREMAP_UC:
440 addr = ioremap_uc(offset, size);
441 break;
442 case DEVM_IOREMAP_WC:
443 addr = ioremap_wc(offset, size);
444 break;
445 case DEVM_IOREMAP_NP:
446 addr = ioremap_np(offset, size);
447 break;
448#endif
449 }
450
451 if (addr) {
452 *ptr = addr;
453 devres_add(dev, ptr);
454 } else
455 devres_free(ptr);
456
457 return addr;
458}
459
460/**
461 * devm_ioremap - Managed ioremap()
462 * @dev: Generic device to remap IO address for
463 * @offset: Resource address to map
464 * @size: Size of map
465 *
466 * Managed ioremap(). Map is automatically unmapped on driver detach.
467 */
468void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
469 resource_size_t size)
470{
471 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
472}
473EXPORT_SYMBOL(devm_ioremap);
474
475/**
476 * devm_kvasprintf - Allocate resource managed space and format a string
477 * into that.
478 * @dev: Device to allocate memory for
479 * @gfp: the GFP mask used in the devm_kmalloc() call when
480 * allocating memory
481 * @fmt: The printf()-style format string
482 * @ap: Arguments for the format string
483 * RETURNS:
484 * Pointer to allocated string on success, NULL on failure.
485 */
486char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
487 va_list ap)
488{
489 unsigned int len;
490 char *p;
491 va_list aq;
492
493 va_copy(aq, ap);
494 len = vsnprintf(NULL, 0, fmt, aq);
495 va_end(aq);
496
497 p = devm_kmalloc(dev, len+1, gfp);
498 if (!p)
499 return NULL;
500
501 vsnprintf(p, len+1, fmt, ap);
502
503 return p;
504}
505EXPORT_SYMBOL(devm_kvasprintf);
506/**
507 * devm_kasprintf - Allocate resource managed space and format a string
508 * into that.
509 * @dev: Device to allocate memory for
510 * @gfp: the GFP mask used in the devm_kmalloc() call when
511 * allocating memory
512 * @fmt: The printf()-style format string
513 * @...: Arguments for the format string
514 * RETURNS:
515 * Pointer to allocated string on success, NULL on failure.
516 */
517char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
518{
519 va_list ap;
520 char *p;
521
522 va_start(ap, fmt);
523 p = devm_kvasprintf(dev, gfp, fmt, ap);
524 va_end(ap);
525
526 return p;
527}
528EXPORT_SYMBOL_GPL(devm_kasprintf);
Note: See TracBrowser for help on using the repository browser.