source: GPL/branches/uniaud32-next/lib32/devres.c@ 718

Last change on this file since 718 was 718, checked in by Paul Smedley, 3 years ago

WIP trying to fix non-HDA Hardware

File size: 12.0 KB
Line 
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/gfp.h>
14#include <linux/errno.h>
15#include <asm/io.h>
16
17struct devres_node {
18 struct list_head entry;
19 dr_release_t release;
20 const char *name;
21 size_t size;
22};
23
24struct devres {
25 struct devres_node node;
26 /* -- 3 pointers */
27 u8 data[]; /* guarantee ull alignment */
28};
29
30struct devres_group {
31 struct devres_node node[2];
32 void *id;
33 int color;
34 /* -- 8 pointers */
35};
36
37static void set_node_dbginfo(struct devres_node *node, const char *name,
38 size_t size)
39{
40 node->name = name;
41 node->size = size;
42}
43
44#define devres_log(dev, node, op) do {} while (0)
45
46/*
47 * Release functions for devres group. These callbacks are used only
48 * for identification.
49 */
50static void group_open_release(struct device *dev, void *res)
51{
52 /* noop */
53}
54
55static void group_close_release(struct device *dev, void *res)
56{
57 /* noop */
58}
59
60static struct devres_group * node_to_group(struct devres_node *node)
61{
62 if (node->release == &group_open_release)
63 return container_of(node, struct devres_group, node[0]);
64 if (node->release == &group_close_release)
65 return container_of(node, struct devres_group, node[1]);
66 return NULL;
67}
68
69static inline struct devres * alloc_dr(dr_release_t release,
70 size_t size, gfp_t gfp, int nid)
71{
72 size_t tot_size = sizeof(struct devres) + size;
73 struct devres *dr;
74
75 dr = kmalloc_node_track_caller(tot_size, gfp, nid);
76
77 memset(dr, 0, offsetof(struct devres, data));
78
79 INIT_LIST_HEAD(&dr->node.entry);
80 dr->node.release = release;
81 return dr;
82}
83
84#define devres_log(dev, node, op) do {} while (0)
85
86static void add_dr(struct device *dev, struct devres_node *node)
87{
88 devres_log(dev, node, "ADD");
89 BUG_ON(!list_empty(&node->entry));
90//#ifndef TARGET_OS2
91 /* Traps here on OS/2 - release builds on non-HDA hardware only */
92 rprintf(("add_dr"));
93 list_add_tail(&node->entry, &dev->devres_head);
94 rprintf(("add_dr2"));
95//#endif
96}
97
98/**
99 * devres_add - Register device resource
100 * @dev: Device to add resource to
101 * @res: Resource to register
102 *
103 * Register devres @res to @dev. @res should have been allocated
104 * using devres_alloc(). On driver detach, the associated release
105 * function will be invoked and devres will be freed automatically.
106 */
107void devres_add(struct device *dev, void *res)
108{
109 struct devres *dr = container_of(res, struct devres, data);
110 unsigned long flags;
111
112 spin_lock_irqsave(&dev->devres_lock, flags);
113 add_dr(dev, &dr->node);
114 spin_unlock_irqrestore(&dev->devres_lock, flags);
115}
116
117/**
118 * __devres_alloc_node - Allocate device resource data
119 * @release: Release function devres will be associated with
120 * @size: Allocation size
121 * @gfp: Allocation flags
122 * @nid: NUMA node
123 * @name: Name of the resource
124 *
125 * Allocate devres of @size bytes. The allocated area is zeroed, then
126 * associated with @release. The returned pointer can be passed to
127 * other devres_*() functions.
128 *
129 * RETURNS:
130 * Pointer to allocated devres on success, NULL on failure.
131 */
132void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
133 const char *name)
134{
135 struct devres *dr;
136
137 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
138 if (unlikely(!dr))
139 return NULL;
140 set_node_dbginfo(&dr->node, name, size);
141 return dr->data;
142}
143
144/**
145 * devres_free - Free device resource data
146 * @res: Pointer to devres data to free
147 *
148 * Free devres created with devres_alloc().
149 */
150void devres_free(void *res)
151{
152 if (res) {
153 struct devres *dr = container_of(res, struct devres, data);
154
155 BUG_ON(!list_empty(&dr->node.entry));
156 kfree(dr);
157 }
158}
159
160#if 0 //2022-09-02
161static int remove_nodes(struct device *dev,
162 struct list_head *first, struct list_head *end,
163 struct list_head *todo)
164{
165 int cnt = 0, nr_groups = 0;
166 struct list_head *cur;
167
168 /* First pass - move normal devres entries to @todo and clear
169 * devres_group colors.
170 */
171 cur = first;
172 while (cur != end) {
173 struct devres_node *node;
174 struct devres_group *grp;
175
176 node = list_entry(cur, struct devres_node, entry);
177 cur = cur->next;
178
179 grp = node_to_group(node);
180 if (grp) {
181 /* clear color of group markers in the first pass */
182 grp->color = 0;
183 nr_groups++;
184 } else {
185 /* regular devres entry */
186 if (&node->entry == first)
187 first = first->next;
188 list_move_tail(&node->entry, todo);
189 cnt++;
190 }
191 }
192
193 if (!nr_groups)
194 return cnt;
195
196 /* Second pass - Scan groups and color them. A group gets
197 * color value of two iff the group is wholly contained in
198 * [cur, end). That is, for a closed group, both opening and
199 * closing markers should be in the range, while just the
200 * opening marker is enough for an open group.
201 */
202 cur = first;
203 while (cur != end) {
204 struct devres_node *node;
205 struct devres_group *grp;
206
207 node = list_entry(cur, struct devres_node, entry);
208 cur = cur->next;
209
210 grp = node_to_group(node);
211 BUG_ON(!grp || list_empty(&grp->node[0].entry));
212
213 grp->color++;
214 if (list_empty(&grp->node[1].entry))
215 grp->color++;
216
217 BUG_ON(grp->color <= 0 || grp->color > 2);
218 if (grp->color == 2) {
219 /* No need to update cur or end. The removed
220 * nodes are always before both.
221 */
222 list_move_tail(&grp->node[0].entry, todo);
223 list_del_init(&grp->node[1].entry);
224 }
225 }
226
227 return cnt;
228}
229
230static int release_nodes(struct device *dev, struct list_head *first,
231 struct list_head *end, unsigned long flags)
232{
233// LIST_HEAD(todo);
234 struct list_head todo;
235
236 int cnt;
237 struct devres *dr, *tmp;
238
239 cnt = remove_nodes(dev, first, end, &todo);
240
241 spin_unlock_irqrestore(&dev->devres_lock, flags);
242
243 /* Release. Note that both devres and devres_group are
244 * handled as devres in the following loop. This is safe.
245 */
246 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry, struct devres) {
247 devres_log(dev, &dr->node, "REL");
248 dr->node.release(dev, dr->data);
249 kfree(dr);
250 }
251
252 return cnt;
253}
254
255/**
256 * devres_release_all - Release all managed resources
257 * @dev: Device to release resources for
258 *
259 * Release all resources associated with @dev. This function is
260 * called on driver detach.
261 */
262int devres_release_all(struct device *dev)
263{
264 unsigned long flags;
265
266 /* Looks like an uninitialized device structure */
267 if (WARN_ON(dev->devres_head.next == NULL))
268 return -ENODEV;
269 spin_lock_irqsave(&dev->devres_lock, flags);
270 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
271 flags);
272}
273#endif
274
275static struct devres *find_dr(struct device *dev, dr_release_t release,
276 dr_match_t match, void *match_data)
277{
278 struct devres_node *node;
279
280 list_for_each_entry_reverse(node, &dev->devres_head, entry, struct devres_node) {
281 struct devres *dr = container_of(node, struct devres, node);
282
283 if (node->release != release)
284 continue;
285 if (match && !match(dev, dr->data, match_data))
286 continue;
287 return dr;
288 }
289
290 return NULL;
291}
292
293/**
294 * devres_find - Find device resource
295 * @dev: Device to lookup resource from
296 * @release: Look for resources associated with this release function
297 * @match: Match function (optional)
298 * @match_data: Data for the match function
299 *
300 * Find the latest devres of @dev which is associated with @release
301 * and for which @match returns 1. If @match is NULL, it's considered
302 * to match all.
303 *
304 * RETURNS:
305 * Pointer to found devres, NULL if not found.
306 */
307void * devres_find(struct device *dev, dr_release_t release,
308 dr_match_t match, void *match_data)
309{
310 struct devres *dr;
311 unsigned long flags;
312
313 spin_lock_irqsave(&dev->devres_lock, flags);
314 dr = find_dr(dev, release, match, match_data);
315 spin_unlock_irqrestore(&dev->devres_lock, flags);
316
317 if (dr)
318 return dr->data;
319 return NULL;
320}
321
322/*
323 * Custom devres actions allow inserting a simple function call
324 * into the teadown sequence.
325 */
326
327struct action_devres {
328 void *data;
329 void (*action)(void *);
330};
331
332static void devm_action_release(struct device *dev, void *res)
333{
334 struct action_devres *devres = res;
335
336 devres->action(devres->data);
337}
338
339/**
340 * devm_add_action() - add a custom action to list of managed resources
341 * @dev: Device that owns the action
342 * @action: Function that should be called
343 * @data: Pointer to data passed to @action implementation
344 *
345 * This adds a custom action to the list of managed resources so that
346 * it gets executed as part of standard resource unwinding.
347 */
348int devm_add_action(struct device *dev, void (*action)(void *), void *data)
349{
350 struct action_devres *devres;
351
352 devres = devres_alloc(devm_action_release,
353 sizeof(struct action_devres), GFP_KERNEL);
354 if (!devres)
355 return -ENOMEM;
356
357 devres->data = data;
358 devres->action = action;
359
360 devres_add(dev, devres);
361
362 return 0;
363}
364
365/**
366 * devm_remove_action() - removes previously added custom action
367 * @dev: Device that owns the action
368 * @action: Function implementing the action
369 * @data: Pointer to data passed to @action implementation
370 *
371 * Removes instance of @action previously added by devm_add_action().
372 * Both action and data should match one of the existing entries.
373 */
374void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
375{
376}
377
378/*
379 * Managed kmalloc/kfree
380 */
381static void devm_kmalloc_release(struct device *dev, void *res)
382{
383 /* noop */
384}
385
386static int devm_kmalloc_match(struct device *dev, void *res, void *data)
387{
388 return res == data;
389}
390
391/**
392 * devm_kmalloc - Resource-managed kmalloc
393 * @dev: Device to allocate memory for
394 * @size: Allocation size
395 * @gfp: Allocation gfp flags
396 *
397 * Managed kmalloc. Memory allocated with this function is
398 * automatically freed on driver detach. Like all other devres
399 * resources, guaranteed alignment is unsigned long long.
400 *
401 * RETURNS:
402 * Pointer to allocated memory on success, NULL on failure.
403 */
404void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
405{
406 struct devres *dr;
407
408 if (unlikely(!size))
409 return ZERO_SIZE_PTR;
410
411 /* use raw alloc_dr for kmalloc caller tracing */
412 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
413 if (unlikely(!dr))
414 return NULL;
415
416 /*
417 * This is named devm_kzalloc_release for historical reasons
418 * The initial implementation did not support kmalloc, only kzalloc
419 */
420 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
421 devres_add(dev, dr->data);
422 return dr->data;
423}
424EXPORT_SYMBOL_GPL(devm_kmalloc);
425
426enum devm_ioremap_type {
427 DEVM_IOREMAP = 0,
428 DEVM_IOREMAP_UC,
429 DEVM_IOREMAP_WC,
430 DEVM_IOREMAP_NP,
431};
432
433void devm_ioremap_release(struct device *dev, void *res)
434{
435 iounmap(*(void __iomem **)res);
436}
437
438static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
439{
440 return *(void **)res == match_data;
441}
442
443static void *__devm_ioremap(struct device *dev, resource_size_t offset,
444 resource_size_t size,
445 enum devm_ioremap_type type)
446{
447 void __iomem **ptr, *addr = NULL;
448
449 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
450 if (!ptr)
451 return NULL;
452
453 switch (type) {
454 case DEVM_IOREMAP:
455 addr = ioremap(offset, size);
456 break;
457#if 0
458 case DEVM_IOREMAP_UC:
459 addr = ioremap_uc(offset, size);
460 break;
461 case DEVM_IOREMAP_WC:
462 addr = ioremap_wc(offset, size);
463 break;
464 case DEVM_IOREMAP_NP:
465 addr = ioremap_np(offset, size);
466 break;
467#endif
468 }
469
470 if (addr) {
471 *ptr = addr;
472 devres_add(dev, ptr);
473 } else
474 devres_free(ptr);
475
476 return addr;
477}
478
479/**
480 * devm_ioremap - Managed ioremap()
481 * @dev: Generic device to remap IO address for
482 * @offset: Resource address to map
483 * @size: Size of map
484 *
485 * Managed ioremap(). Map is automatically unmapped on driver detach.
486 */
487void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
488 resource_size_t size)
489{
490 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
491}
492EXPORT_SYMBOL(devm_ioremap);
Note: See TracBrowser for help on using the repository browser.