source: GPL/branches/uniaud32-next/lib32/regcache-lzo.c@ 660

Last change on this file since 660 was 652, checked in by Paul Smedley, 5 years ago

Update regmap & regcache to 5.10.10 kernel code

File size: 8.7 KB
Line 
1// SPDX-License-Identifier: GPL-2.0
2//
3// Register cache access API - LZO caching support
4//
5// Copyright 2011 Wolfson Microelectronics plc
6//
7// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
8
9/* from 5.10.10 */
10
11#include <linux/device.h>
12#include <linux/lzo.h>
13#include <linux/slab.h>
14
15#include "internal.h"
16
17static int regcache_lzo_exit(struct regmap *map);
18
19struct regcache_lzo_ctx {
20 void *wmem;
21 void *dst;
22 const void *src;
23 size_t src_len;
24 size_t dst_len;
25 size_t decompressed_size;
26 unsigned long *sync_bmp;
27 int sync_bmp_nbits;
28};
29
30#define LZO_BLOCK_NUM 8
31static int regcache_lzo_block_count(struct regmap *map)
32{
33 return LZO_BLOCK_NUM;
34}
35
36static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
37{
38 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
39 if (!lzo_ctx->wmem)
40 return -ENOMEM;
41 return 0;
42}
43
44static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
45{
46 size_t compress_size;
47 int ret;
48
49 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
50 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
51 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
52 return -EINVAL;
53 lzo_ctx->dst_len = compress_size;
54 return 0;
55}
56
57static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
58{
59 size_t dst_len;
60 int ret;
61
62 dst_len = lzo_ctx->dst_len;
63 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
64 lzo_ctx->dst, &dst_len);
65 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
66 return -EINVAL;
67 return 0;
68}
69
70static int regcache_lzo_compress_cache_block(struct regmap *map,
71 struct regcache_lzo_ctx *lzo_ctx)
72{
73 int ret;
74
75 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
76 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
77 if (!lzo_ctx->dst) {
78 lzo_ctx->dst_len = 0;
79 return -ENOMEM;
80 }
81
82 ret = regcache_lzo_compress(lzo_ctx);
83 if (ret < 0)
84 return ret;
85 return 0;
86}
87
88static int regcache_lzo_decompress_cache_block(struct regmap *map,
89 struct regcache_lzo_ctx *lzo_ctx)
90{
91 int ret;
92
93 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
94 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
95 if (!lzo_ctx->dst) {
96 lzo_ctx->dst_len = 0;
97 return -ENOMEM;
98 }
99
100 ret = regcache_lzo_decompress(lzo_ctx);
101 if (ret < 0)
102 return ret;
103 return 0;
104}
105
106static inline int regcache_lzo_get_blkindex(struct regmap *map,
107 unsigned int reg)
108{
109 return ((reg / map->reg_stride) * map->cache_word_size) /
110 DIV_ROUND_UP(map->cache_size_raw,
111 regcache_lzo_block_count(map));
112}
113
114static inline int regcache_lzo_get_blkpos(struct regmap *map,
115 unsigned int reg)
116{
117 return (reg / map->reg_stride) %
118 (DIV_ROUND_UP(map->cache_size_raw,
119 regcache_lzo_block_count(map)) /
120 map->cache_word_size);
121}
122
123static inline int regcache_lzo_get_blksize(struct regmap *map)
124{
125 return DIV_ROUND_UP(map->cache_size_raw,
126 regcache_lzo_block_count(map));
127}
128
129static int regcache_lzo_init(struct regmap *map)
130{
131 struct regcache_lzo_ctx **lzo_blocks;
132 size_t bmp_size;
133 int ret, i, blksize, blkcount;
134 const char *p, *end;
135 unsigned long *sync_bmp;
136
137 ret = 0;
138
139 blkcount = regcache_lzo_block_count(map);
140 map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
141 GFP_KERNEL);
142 if (!map->cache)
143 return -ENOMEM;
144 lzo_blocks = map->cache;
145
146 /*
147 * allocate a bitmap to be used when syncing the cache with
148 * the hardware. Each time a register is modified, the corresponding
149 * bit is set in the bitmap, so we know that we have to sync
150 * that register.
151 */
152 bmp_size = map->num_reg_defaults_raw;
153 sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
154 if (!sync_bmp) {
155 ret = -ENOMEM;
156 goto err;
157 }
158
159 /* allocate the lzo blocks and initialize them */
160 for (i = 0; i < blkcount; i++) {
161 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
162 GFP_KERNEL);
163 if (!lzo_blocks[i]) {
164 bitmap_free(sync_bmp);
165 ret = -ENOMEM;
166 goto err;
167 }
168 lzo_blocks[i]->sync_bmp = sync_bmp;
169 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
170 /* alloc the working space for the compressed block */
171 ret = regcache_lzo_prepare(lzo_blocks[i]);
172 if (ret < 0)
173 goto err;
174 }
175
176 blksize = regcache_lzo_get_blksize(map);
177 p = map->reg_defaults_raw;
178 end = map->reg_defaults_raw + map->cache_size_raw;
179 /* compress the register map and fill the lzo blocks */
180 for (i = 0; i < blkcount; i++, p += blksize) {
181 lzo_blocks[i]->src = p;
182 if (p + blksize > end)
183 lzo_blocks[i]->src_len = end - p;
184 else
185 lzo_blocks[i]->src_len = blksize;
186 ret = regcache_lzo_compress_cache_block(map,
187 lzo_blocks[i]);
188 if (ret < 0)
189 goto err;
190 lzo_blocks[i]->decompressed_size =
191 lzo_blocks[i]->src_len;
192 }
193
194 return 0;
195err:
196 regcache_lzo_exit(map);
197 return ret;
198}
199
200static int regcache_lzo_exit(struct regmap *map)
201{
202 struct regcache_lzo_ctx **lzo_blocks;
203 int i, blkcount;
204
205 lzo_blocks = map->cache;
206 if (!lzo_blocks)
207 return 0;
208
209 blkcount = regcache_lzo_block_count(map);
210 /*
211 * the pointer to the bitmap used for syncing the cache
212 * is shared amongst all lzo_blocks. Ensure it is freed
213 * only once.
214 */
215 if (lzo_blocks[0])
216 bitmap_free(lzo_blocks[0]->sync_bmp);
217 for (i = 0; i < blkcount; i++) {
218 if (lzo_blocks[i]) {
219 kfree(lzo_blocks[i]->wmem);
220 kfree(lzo_blocks[i]->dst);
221 }
222 /* each lzo_block is a pointer returned by kmalloc or NULL */
223 kfree(lzo_blocks[i]);
224 }
225 kfree(lzo_blocks);
226 map->cache = NULL;
227 return 0;
228}
229
230static int regcache_lzo_read(struct regmap *map,
231 unsigned int reg, unsigned int *value)
232{
233 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
234 int ret, blkindex, blkpos;
235 size_t tmp_dst_len;
236 void *tmp_dst;
237
238 /* index of the compressed lzo block */
239 blkindex = regcache_lzo_get_blkindex(map, reg);
240 /* register index within the decompressed block */
241 blkpos = regcache_lzo_get_blkpos(map, reg);
242 lzo_blocks = map->cache;
243 lzo_block = lzo_blocks[blkindex];
244
245 /* save the pointer and length of the compressed block */
246 tmp_dst = lzo_block->dst;
247 tmp_dst_len = lzo_block->dst_len;
248
249 /* prepare the source to be the compressed block */
250 lzo_block->src = lzo_block->dst;
251 lzo_block->src_len = lzo_block->dst_len;
252
253 /* decompress the block */
254 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
255 if (ret >= 0)
256 /* fetch the value from the cache */
257 *value = regcache_get_val(map, lzo_block->dst, blkpos);
258
259 kfree(lzo_block->dst);
260 /* restore the pointer and length of the compressed block */
261 lzo_block->dst = tmp_dst;
262 lzo_block->dst_len = tmp_dst_len;
263
264 return ret;
265}
266
267static int regcache_lzo_write(struct regmap *map,
268 unsigned int reg, unsigned int value)
269{
270 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
271 int ret, blkindex, blkpos;
272 size_t tmp_dst_len;
273 void *tmp_dst;
274
275 /* index of the compressed lzo block */
276 blkindex = regcache_lzo_get_blkindex(map, reg);
277 /* register index within the decompressed block */
278 blkpos = regcache_lzo_get_blkpos(map, reg);
279 lzo_blocks = map->cache;
280 lzo_block = lzo_blocks[blkindex];
281
282 /* save the pointer and length of the compressed block */
283 tmp_dst = lzo_block->dst;
284 tmp_dst_len = lzo_block->dst_len;
285
286 /* prepare the source to be the compressed block */
287 lzo_block->src = lzo_block->dst;
288 lzo_block->src_len = lzo_block->dst_len;
289
290 /* decompress the block */
291 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
292 if (ret < 0) {
293 kfree(lzo_block->dst);
294 goto out;
295 }
296
297 /* write the new value to the cache */
298 if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
299 kfree(lzo_block->dst);
300 goto out;
301 }
302
303 /* prepare the source to be the decompressed block */
304 lzo_block->src = lzo_block->dst;
305 lzo_block->src_len = lzo_block->dst_len;
306
307 /* compress the block */
308 ret = regcache_lzo_compress_cache_block(map, lzo_block);
309 if (ret < 0) {
310 kfree(lzo_block->dst);
311 kfree(lzo_block->src);
312 goto out;
313 }
314
315 /* set the bit so we know we have to sync this register */
316 set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
317 kfree(tmp_dst);
318 kfree(lzo_block->src);
319 return 0;
320out:
321 lzo_block->dst = tmp_dst;
322 lzo_block->dst_len = tmp_dst_len;
323 return ret;
324}
325
326static int regcache_lzo_sync(struct regmap *map, unsigned int min,
327 unsigned int max)
328{
329 struct regcache_lzo_ctx **lzo_blocks;
330 unsigned int val;
331 int i;
332 int ret;
333
334 lzo_blocks = map->cache;
335 i = min;
336 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
337 lzo_blocks[0]->sync_bmp_nbits) {
338 if (i > max)
339 continue;
340
341 ret = regcache_read(map, i, &val);
342 if (ret)
343 return ret;
344
345 /* Is this the hardware default? If so skip. */
346 ret = regcache_lookup_reg(map, i);
347 if (ret > 0 && val == map->reg_defaults[ret].def)
348 continue;
349
350 map->cache_bypass = true;
351 ret = _regmap_write(map, i, val);
352 map->cache_bypass = false;
353 if (ret)
354 return ret;
355 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
356 i, val);
357 }
358
359 return 0;
360}
361
362struct regcache_ops regcache_lzo_ops = {
363 .type = REGCACHE_COMPRESSED,
364 .name = "lzo",
365 .init = regcache_lzo_init,
366 .exit = regcache_lzo_exit,
367 .read = regcache_lzo_read,
368 .write = regcache_lzo_write,
369 .sync = regcache_lzo_sync
370};
Note: See TracBrowser for help on using the repository browser.