source: GPL/branches/uniaud32-next/lib32/devres.c@ 719

Last change on this file since 719 was 719, checked in by Paul Smedley, 3 years ago

Tidy ups, and fix non-HDA hardware

File size: 11.8 KB
Line 
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/gfp.h>
14#include <linux/errno.h>
15#include <asm/io.h>
16
17struct devres_node {
18 struct list_head entry;
19 dr_release_t release;
20 const char *name;
21 size_t size;
22};
23
24struct devres {
25 struct devres_node node;
26 /* -- 3 pointers */
27 u8 data[]; /* guarantee ull alignment */
28};
29
30struct devres_group {
31 struct devres_node node[2];
32 void *id;
33 int color;
34 /* -- 8 pointers */
35};
36
37static void set_node_dbginfo(struct devres_node *node, const char *name,
38 size_t size)
39{
40 node->name = name;
41 node->size = size;
42}
43
44#define devres_log(dev, node, op) do {} while (0)
45
46/*
47 * Release functions for devres group. These callbacks are used only
48 * for identification.
49 */
50static void group_open_release(struct device *dev, void *res)
51{
52 /* noop */
53}
54
55static void group_close_release(struct device *dev, void *res)
56{
57 /* noop */
58}
59
60static struct devres_group * node_to_group(struct devres_node *node)
61{
62 if (node->release == &group_open_release)
63 return container_of(node, struct devres_group, node[0]);
64 if (node->release == &group_close_release)
65 return container_of(node, struct devres_group, node[1]);
66 return NULL;
67}
68
69static inline struct devres * alloc_dr(dr_release_t release,
70 size_t size, gfp_t gfp, int nid)
71{
72 size_t tot_size = sizeof(struct devres) + size;
73 struct devres *dr;
74
75 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
76
77 memset(dr, 0, offsetof(struct devres, data));
78
79 INIT_LIST_HEAD(&dr->node.entry);
80 dr->node.release = release;
81 return dr;
82}
83
84static void add_dr(struct device *dev, struct devres_node *node)
85{
86 devres_log(dev, node, "ADD");
87 BUG_ON(!list_empty(&node->entry));
88 list_add_tail(&node->entry, &dev->devres_head);
89}
90
91/**
92 * devres_add - Register device resource
93 * @dev: Device to add resource to
94 * @res: Resource to register
95 *
96 * Register devres @res to @dev. @res should have been allocated
97 * using devres_alloc(). On driver detach, the associated release
98 * function will be invoked and devres will be freed automatically.
99 */
100void devres_add(struct device *dev, void *res)
101{
102 struct devres *dr = container_of(res, struct devres, data);
103 unsigned long flags;
104
105 spin_lock_irqsave(&dev->devres_lock, flags);
106 add_dr(dev, &dr->node);
107 spin_unlock_irqrestore(&dev->devres_lock, flags);
108}
109
110/**
111 * __devres_alloc_node - Allocate device resource data
112 * @release: Release function devres will be associated with
113 * @size: Allocation size
114 * @gfp: Allocation flags
115 * @nid: NUMA node
116 * @name: Name of the resource
117 *
118 * Allocate devres of @size bytes. The allocated area is zeroed, then
119 * associated with @release. The returned pointer can be passed to
120 * other devres_*() functions.
121 *
122 * RETURNS:
123 * Pointer to allocated devres on success, NULL on failure.
124 */
125void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
126 const char *name)
127{
128 struct devres *dr;
129
130 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
131 if (unlikely(!dr))
132 return NULL;
133 set_node_dbginfo(&dr->node, name, size);
134 return dr->data;
135}
136
137/**
138 * devres_free - Free device resource data
139 * @res: Pointer to devres data to free
140 *
141 * Free devres created with devres_alloc().
142 */
143void devres_free(void *res)
144{
145 if (res) {
146 struct devres *dr = container_of(res, struct devres, data);
147
148 BUG_ON(!list_empty(&dr->node.entry));
149 kfree(dr);
150 }
151}
152
153#if 0 //2022-09-02
154static int remove_nodes(struct device *dev,
155 struct list_head *first, struct list_head *end,
156 struct list_head *todo)
157{
158 int cnt = 0, nr_groups = 0;
159 struct list_head *cur;
160
161 /* First pass - move normal devres entries to @todo and clear
162 * devres_group colors.
163 */
164 cur = first;
165 while (cur != end) {
166 struct devres_node *node;
167 struct devres_group *grp;
168
169 node = list_entry(cur, struct devres_node, entry);
170 cur = cur->next;
171
172 grp = node_to_group(node);
173 if (grp) {
174 /* clear color of group markers in the first pass */
175 grp->color = 0;
176 nr_groups++;
177 } else {
178 /* regular devres entry */
179 if (&node->entry == first)
180 first = first->next;
181 list_move_tail(&node->entry, todo);
182 cnt++;
183 }
184 }
185
186 if (!nr_groups)
187 return cnt;
188
189 /* Second pass - Scan groups and color them. A group gets
190 * color value of two iff the group is wholly contained in
191 * [cur, end). That is, for a closed group, both opening and
192 * closing markers should be in the range, while just the
193 * opening marker is enough for an open group.
194 */
195 cur = first;
196 while (cur != end) {
197 struct devres_node *node;
198 struct devres_group *grp;
199
200 node = list_entry(cur, struct devres_node, entry);
201 cur = cur->next;
202
203 grp = node_to_group(node);
204 BUG_ON(!grp || list_empty(&grp->node[0].entry));
205
206 grp->color++;
207 if (list_empty(&grp->node[1].entry))
208 grp->color++;
209
210 BUG_ON(grp->color <= 0 || grp->color > 2);
211 if (grp->color == 2) {
212 /* No need to update cur or end. The removed
213 * nodes are always before both.
214 */
215 list_move_tail(&grp->node[0].entry, todo);
216 list_del_init(&grp->node[1].entry);
217 }
218 }
219
220 return cnt;
221}
222
223static int release_nodes(struct device *dev, struct list_head *first,
224 struct list_head *end, unsigned long flags)
225{
226// LIST_HEAD(todo);
227 struct list_head todo;
228
229 int cnt;
230 struct devres *dr, *tmp;
231
232 cnt = remove_nodes(dev, first, end, &todo);
233
234 spin_unlock_irqrestore(&dev->devres_lock, flags);
235
236 /* Release. Note that both devres and devres_group are
237 * handled as devres in the following loop. This is safe.
238 */
239 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry, struct devres) {
240 devres_log(dev, &dr->node, "REL");
241 dr->node.release(dev, dr->data);
242 kfree(dr);
243 }
244
245 return cnt;
246}
247
248/**
249 * devres_release_all - Release all managed resources
250 * @dev: Device to release resources for
251 *
252 * Release all resources associated with @dev. This function is
253 * called on driver detach.
254 */
255int devres_release_all(struct device *dev)
256{
257 unsigned long flags;
258
259 /* Looks like an uninitialized device structure */
260 if (WARN_ON(dev->devres_head.next == NULL))
261 return -ENODEV;
262 spin_lock_irqsave(&dev->devres_lock, flags);
263 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
264 flags);
265}
266#endif
267
268static struct devres *find_dr(struct device *dev, dr_release_t release,
269 dr_match_t match, void *match_data)
270{
271 struct devres_node *node;
272
273 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
274 struct devres *dr = container_of(node, struct devres, node);
275
276 if (node->release != release)
277 continue;
278 if (match && !match(dev, dr->data, match_data))
279 continue;
280 return dr;
281 }
282
283 return NULL;
284}
285
286/**
287 * devres_find - Find device resource
288 * @dev: Device to lookup resource from
289 * @release: Look for resources associated with this release function
290 * @match: Match function (optional)
291 * @match_data: Data for the match function
292 *
293 * Find the latest devres of @dev which is associated with @release
294 * and for which @match returns 1. If @match is NULL, it's considered
295 * to match all.
296 *
297 * RETURNS:
298 * Pointer to found devres, NULL if not found.
299 */
300void * devres_find(struct device *dev, dr_release_t release,
301 dr_match_t match, void *match_data)
302{
303 struct devres *dr;
304 unsigned long flags;
305
306 spin_lock_irqsave(&dev->devres_lock, flags);
307 dr = find_dr(dev, release, match, match_data);
308 spin_unlock_irqrestore(&dev->devres_lock, flags);
309
310 if (dr)
311 return dr->data;
312 return NULL;
313}
314
315/*
316 * Custom devres actions allow inserting a simple function call
317 * into the teadown sequence.
318 */
319
320struct action_devres {
321 void *data;
322 void (*action)(void *);
323};
324
325static void devm_action_release(struct device *dev, void *res)
326{
327 struct action_devres *devres = res;
328
329 devres->action(devres->data);
330}
331
332/**
333 * devm_add_action() - add a custom action to list of managed resources
334 * @dev: Device that owns the action
335 * @action: Function that should be called
336 * @data: Pointer to data passed to @action implementation
337 *
338 * This adds a custom action to the list of managed resources so that
339 * it gets executed as part of standard resource unwinding.
340 */
341int devm_add_action(struct device *dev, void (*action)(void *), void *data)
342{
343 struct action_devres *devres;
344
345 devres = devres_alloc(devm_action_release,
346 sizeof(struct action_devres), GFP_KERNEL);
347 if (!devres)
348 return -ENOMEM;
349
350 devres->data = data;
351 devres->action = action;
352
353 devres_add(dev, devres);
354
355 return 0;
356}
357
358/**
359 * devm_remove_action() - removes previously added custom action
360 * @dev: Device that owns the action
361 * @action: Function implementing the action
362 * @data: Pointer to data passed to @action implementation
363 *
364 * Removes instance of @action previously added by devm_add_action().
365 * Both action and data should match one of the existing entries.
366 */
367void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
368{
369}
370
371/*
372 * Managed kmalloc/kfree
373 */
374static void devm_kmalloc_release(struct device *dev, void *res)
375{
376 /* noop */
377}
378
379static int devm_kmalloc_match(struct device *dev, void *res, void *data)
380{
381 return res == data;
382}
383
384/**
385 * devm_kmalloc - Resource-managed kmalloc
386 * @dev: Device to allocate memory for
387 * @size: Allocation size
388 * @gfp: Allocation gfp flags
389 *
390 * Managed kmalloc. Memory allocated with this function is
391 * automatically freed on driver detach. Like all other devres
392 * resources, guaranteed alignment is unsigned long long.
393 *
394 * RETURNS:
395 * Pointer to allocated memory on success, NULL on failure.
396 */
397void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
398{
399 struct devres *dr;
400
401 if (unlikely(!size))
402 return ZERO_SIZE_PTR;
403
404 /* use raw alloc_dr for kmalloc caller tracing */
405 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
406 if (unlikely(!dr))
407 return NULL;
408
409 /*
410 * This is named devm_kzalloc_release for historical reasons
411 * The initial implementation did not support kmalloc, only kzalloc
412 */
413 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
414 devres_add(dev, dr->data);
415 return dr->data;
416}
417EXPORT_SYMBOL_GPL(devm_kmalloc);
418
419enum devm_ioremap_type {
420 DEVM_IOREMAP = 0,
421 DEVM_IOREMAP_UC,
422 DEVM_IOREMAP_WC,
423 DEVM_IOREMAP_NP,
424};
425
426void devm_ioremap_release(struct device *dev, void *res)
427{
428 iounmap(*(void __iomem **)res);
429}
430
431static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
432{
433 return *(void **)res == match_data;
434}
435
436static void *__devm_ioremap(struct device *dev, resource_size_t offset,
437 resource_size_t size,
438 enum devm_ioremap_type type)
439{
440 void __iomem **ptr, *addr = NULL;
441
442 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
443 if (!ptr)
444 return NULL;
445
446 switch (type) {
447 case DEVM_IOREMAP:
448 addr = ioremap(offset, size);
449 break;
450#if 0
451 case DEVM_IOREMAP_UC:
452 addr = ioremap_uc(offset, size);
453 break;
454 case DEVM_IOREMAP_WC:
455 addr = ioremap_wc(offset, size);
456 break;
457 case DEVM_IOREMAP_NP:
458 addr = ioremap_np(offset, size);
459 break;
460#endif
461 }
462
463 if (addr) {
464 *ptr = addr;
465 devres_add(dev, ptr);
466 } else
467 devres_free(ptr);
468
469 return addr;
470}
471
472/**
473 * devm_ioremap - Managed ioremap()
474 * @dev: Generic device to remap IO address for
475 * @offset: Resource address to map
476 * @size: Size of map
477 *
478 * Managed ioremap(). Map is automatically unmapped on driver detach.
479 */
480void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
481 resource_size_t size)
482{
483 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
484}
485EXPORT_SYMBOL(devm_ioremap);
Note: See TracBrowser for help on using the repository browser.