wok view linux-cloop/stuff/cloop.u @ rev 24296

Up python-gevent (21.12.0), sqlite3-ruby (1.4.2)
author Pascal Bellard <pascal.bellard@slitaz.org>
date Sat Jan 15 10:17:16 2022 +0000 (2022-01-15)
parents 36e9a3dcd5de
children 5a92a26adcc1
line source
1 --- cloop.h
2 +++ cloop.h
3 @@ -1,15 +1,50 @@
4 +#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */
5 +#define CLOOP_SIGNATURE_SIZE 9
6 +#define CLOOP_SIGNATURE_OFFSET 0x0
7 +
8 #ifndef _COMPRESSED_LOOP_H
9 #define _COMPRESSED_LOOP_H
11 -#define CLOOP_HEADROOM 128
12 +/*************************************************************************\
13 +* Starting with Format V4.0 (cloop version 4.x), cloop can now have two *
14 +* alternative structures: *
15 +* *
16 +* 1. Header first: "robust" format, handles missing blocks well *
17 +* 2. Footer (header last): "streaming" format, easier to create *
18 +* *
19 +* The cloop kernel module autodetects both formats, and can (currently) *
20 +* still handle the V2.0 format as well. *
21 +* *
22 +* 1. Header first: *
23 +* +---------------------------- FIXED SIZE ---------------------------+ *
24 +* |Signature (128 bytes) | *
25 +* |block_size (32bit number, network order) | *
26 +* |num_blocks (32bit number, network order) | *
27 +* +--------------------------- VARIABLE SIZE -------------------------+ *
28 +* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
29 +* |compressed data blocks of variable size ... | *
30 +* +-------------------------------------------------------------------+ *
31 +* *
32 +* 2. Footer (header last): *
33 +* +--------------------------- VARIABLE SIZE -------------------------+ *
34 +* |compressed data blocks of variable size ... | *
35 +* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
36 +* +---------------------------- FIXED SIZE ---------------------------+ *
37 +* |Signature (128 bytes) | *
38 +* |block_size (32bit number, network order) | *
39 +* |num_blocks (32bit number, network order) | *
40 +* +-------------------------------------------------------------------+ *
41 +* *
42 +* Offsets are always relative to beginning of file, in all formats. *
43 +* The block index contains num_blocks+1 offsets, followed (1) or *
44 +* preceded (2) by the compressed blocks. *
45 +\*************************************************************************/
47 -/* The cloop header usually looks like this: */
48 -/* #!/bin/sh */
49 -/* #V2.00 Format */
50 -/* ...padding up to CLOOP_HEADROOM... */
51 -/* block_size (32bit number, network order) */
52 -/* num_blocks (32bit number, network order) */
53 +#include <linux/types.h> /* u_int32_t */
54 +
55 +#define CLOOP_HEADROOM 128
57 +/* Header of fixed length, can be located at beginning or end of file */
58 struct cloop_head
59 {
60 char preamble[CLOOP_HEADROOM];
61 @@ -17,9 +52,163 @@
62 u_int32_t num_blocks;
63 };
65 +/************************************************************************\
66 +* CLOOP4 flags for each compressed block *
67 +* Value Meaning *
68 +* 0 GZIP/7ZIP compression (compatible with V2.0 Format) *
69 +* 1 no compression (incompressible data) *
70 +* 2 xz compression (currently best space saver) *
71 +* 3 lz4 compression *
72 +* 4 lzo compression (fastest) *
73 +\************************************************************************/
74 +
75 +typedef uint64_t cloop_block_ptr;
76 +
77 +/* Get value of first 4 bits */
78 +#define CLOOP_BLOCK_FLAGS(x) ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60))
79 +/* Get value of last 60 bits */
80 +#define CLOOP_BLOCK_OFFSET(x) ((x) & 0x0fffffffffffffffLLU)
81 +
82 +#define CLOOP_COMPRESSOR_ZLIB 0x0
83 +#define CLOOP_COMPRESSOR_NONE 0x1
84 +#define CLOOP_COMPRESSOR_XZ 0x2
85 +#define CLOOP_COMPRESSOR_LZ4 0x3
86 +#define CLOOP_COMPRESSOR_LZO1X 0x4
87 +
88 +#define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
89 +
90 +#define CLOOP_COMPRESSOR_LINK 0xF
91 +
92 +
93 /* data_index (num_blocks 64bit pointers, network order)... */
94 /* compressed data (gzip block compressed format)... */
96 +struct cloop_tail
97 +{
98 + u_int32_t table_size;
99 + u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
100 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
101 +#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4)
102 +#define CLOOP3_TRUNCATED(x) ((unsigned int)((x) & 0x80) >> 7)
103 +#define CLOOP3_LASTLEN(x) (unsigned int)((x) >> 8)
104 + u_int32_t num_blocks;
105 +};
106 +
107 +#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12)
108 +
109 +struct block_info
110 +{
111 + loff_t offset; /* 64-bit offsets of compressed block */
112 + u_int32_t size; /* 32-bit compressed block size */
113 + u_int32_t flags; /* 32-bit compression flags */
114 +};
115 +
116 +static inline char *build_index(struct block_info *offsets, unsigned long n,
117 + unsigned long block_size, unsigned global_flags)
118 +{
119 + u_int32_t *ofs32 = (u_int32_t *) offsets;
120 + loff_t *ofs64 = (loff_t *) offsets;
121 +
122 + /* v3 64bits bug: v1 assumed */
123 + unsigned long v3_64 = (n+1)/2;
124 + loff_t prev;
125 +
126 + if (ofs32[0] != 0 && ofs32[1] == 0) {
127 + for (prev=__le64_to_cpu(ofs64[v3_64]);
128 + v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev;
129 + prev=__le64_to_cpu(ofs64[v3_64]));
130 + }
131 +
132 + if (ofs32[0] == 0) {
133 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
134 + while (n--) {
135 + offsets[n].offset = __be64_to_cpu(offsets[n].offset);
136 + offsets[n].size = ntohl(offsets[n].size);
137 + offsets[n].flags = 0;
138 + }
139 + return (char *) "128BE accelerated knoppix 1.0";
140 + }
141 + else { /* V2.0/V4.0 */
142 + loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
143 + u_int32_t flags;
144 + static char v4[11];
145 + unsigned long i = n;
146 +
147 + for (flags = 0; n-- ;) {
148 + loff_t data = __be64_to_cpu(ofs64[n]);
149 +
150 + offsets[n].size = last -
151 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
152 + last = offsets[n].offset;
153 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
154 + flags |= 1 << offsets[n].flags;
155 + }
156 + if (flags < 2) return (char *) "64BE v2.0";
157 + while (i--) {
158 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
159 + offsets[i] = offsets[offsets[i].offset];
160 + }
161 + }
162 + strcpy(v4, (char *) "64BE v4.0a");
163 + v4[10] = 'a' + ((flags-1) & 0xF); // compressors used
164 + if (flags > 0x10) { // with links ?
165 + v4[10] += 'A' - 'a';
166 + }
167 + return v4;
168 + }
169 + }
170 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
171 + loff_t last = __le64_to_cpu(ofs64[n]);
172 + while (n--) {
173 + offsets[n].size = last -
174 + (offsets[n].offset = __le64_to_cpu(ofs64[n]));
175 + last = offsets[n].offset;
176 + offsets[n].flags = 0;
177 + }
178 + return (char *) "64LE v1.0";
179 + }
180 + else { /* V3.0 or V0.68 */
181 + unsigned long i;
182 + loff_t j;
183 + static char v3[11];
184 +
185 + for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
186 + if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
187 + loff_t last = ntohl(ofs32[n]);
188 + while (n--) {
189 + offsets[n].size = last -
190 + (offsets[n].offset = ntohl(ofs32[n]));
191 + last = offsets[n].offset;
192 + offsets[n].flags = 0;
193 + }
194 + return (char *) "32BE v0.68";
195 + }
196 +
197 + v3_64 = (ofs32[1] == 0);
198 + for (i = n; i-- != 0; )
199 + offsets[i].size = ntohl(ofs32[i << v3_64]);
200 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
201 + offsets[i].offset = j;
202 + offsets[i].flags = global_flags;
203 + if (offsets[i].size == 0xFFFFFFFF) {
204 + offsets[i].flags = CLOOP_COMPRESSOR_NONE;
205 + offsets[i].size = block_size;
206 + }
207 + if ((offsets[i].size & 0x80000000) == 0) {
208 + j += offsets[i].size;
209 + }
210 + }
211 + for (i = 0; i < n; i++) {
212 + if (offsets[i].size & 0x80000000) {
213 + offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
214 + }
215 + }
216 + strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
217 + v3[10] += global_flags;
218 + return v3;
219 + }
220 +}
221 +
222 /* Cloop suspend IOCTL */
223 #define CLOOP_SUSPEND 0x4C07
225 --- cloop.c
226 +++ cloop.c
227 @@ -1,26 +1,23 @@
228 -/*
229 - * compressed_loop.c: Read-only compressed loop blockdevice
230 - * hacked up by Rusty in 1999, extended and maintained by Klaus Knopper
231 - *
232 - * A cloop file looks like this:
233 - * [32-bit uncompressed block size: network order]
234 - * [32-bit number of blocks (n_blocks): network order]
235 - * [64-bit file offsets of start of blocks: network order]
236 - * ...
237 - * (n_blocks + 1).
238 - * n_blocks consisting of:
239 - * [compressed block]
240 - *
241 - * Every version greatly inspired by code seen in loop.c
242 - * by Theodore Ts'o, 3/29/93.
243 - *
244 - * Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper.
245 - * Redistribution of this file is permitted under the GNU Public License.
246 - *
247 - */
248 +/************************************************************************\
249 +* cloop.c: Read-only compressed loop blockdevice *
250 +* hacked up by Rusty in 1999, extended and maintained by Klaus Knopper *
251 +* *
252 +* For all supported cloop file formats, please check the file "cloop.h" *
253 +* New in Version 4: *
254 +* - Header can be first or last in cloop file, *
255 +* - Different compression algorithms supported (compression type *
256 +* encoded in first 4 bytes of block offset address) *
257 +* *
258 +* Every version greatly inspired by code seen in loop.c *
259 +* by Theodore Ts'o, 3/29/93. *
260 +* *
261 +* Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper. *
262 +* Redistribution of this file is permitted under the GNU Public License *
263 +* V2. *
264 +\************************************************************************/
266 #define CLOOP_NAME "cloop"
267 -#define CLOOP_VERSION "2.639"
268 +#define CLOOP_VERSION "4.12"
269 #define CLOOP_MAX 8
271 #ifndef KBUILD_MODNAME
272 @@ -47,8 +44,27 @@
273 #include <asm/div64.h> /* do_div() for 64bit division */
274 #include <asm/uaccess.h>
275 #include <asm/byteorder.h>
276 -/* Use zlib_inflate from lib/zlib_inflate */
277 +/* Check for ZLIB, LZO1X, LZ4 decompression algorithms in kernel. */
278 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
279 #include <linux/zutil.h>
280 +#endif
281 +#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
282 +#include <linux/lzo.h>
283 +#endif
284 +#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
285 +#include <linux/lz4.h>
286 +#endif
287 +#if (defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE))
288 +#include <linux/decompress/unlzma.h>
289 +#endif
290 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
291 +#include <linux/xz.h>
292 +#endif
293 +
294 +#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) || defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE) || defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE) || defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE) || defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)))
295 +#error "No decompression library selected in kernel config!"
296 +#endif
297 +
298 #include <linux/loop.h>
299 #include <linux/kthread.h>
300 #include <linux/compat.h>
301 @@ -92,47 +108,64 @@
302 #define DEBUGP(format, x...)
303 #endif
305 +/* Default size of buffer to keep some decompressed blocks in memory to speed up access */
306 +#define BLOCK_BUFFER_MEM (16*65536)
307 +
308 /* One file can be opened at module insertion time */
309 /* insmod cloop file=/path/to/file */
310 static char *file=NULL;
311 static unsigned int preload=0;
312 static unsigned int cloop_max=CLOOP_MAX;
313 +static unsigned int buffers=BLOCK_BUFFER_MEM;
314 module_param(file, charp, 0);
315 module_param(preload, uint, 0);
316 module_param(cloop_max, uint, 0);
317 MODULE_PARM_DESC(file, "Initial cloop image file (full path) for /dev/cloop");
318 MODULE_PARM_DESC(preload, "Preload n blocks of cloop data into memory");
319 MODULE_PARM_DESC(cloop_max, "Maximum number of cloop devices (default 8)");
320 +MODULE_PARM_DESC(buffers, "Size of buffer to keep uncompressed blocks in memory in MiB (default 1)");
322 static struct file *initial_file=NULL;
323 static int cloop_major=MAJOR_NR;
325 -/* Number of buffered decompressed blocks */
326 -#define BUFFERED_BLOCKS 8
327 struct cloop_device
328 {
329 - /* Copied straight from the file */
330 + /* Header filled from the file */
331 struct cloop_head head;
332 + int header_first;
333 + int file_format;
335 - /* An array of offsets of compressed blocks within the file */
336 - loff_t *offsets;
337 + /* An or'd sum of all flags of each compressed block (v3) */
338 + u_int32_t allflags;
339 +
340 + /* An array of cloop_ptr flags/offset for compressed blocks within the file */
341 + struct block_info *block_ptrs;
343 /* We buffer some uncompressed blocks for performance */
344 - int buffered_blocknum[BUFFERED_BLOCKS];
345 - int current_bufnum;
346 - void *buffer[BUFFERED_BLOCKS];
347 - void *compressed_buffer;
348 - size_t preload_array_size; /* Size of pointer array in blocks */
349 - size_t preload_size; /* Number of successfully allocated blocks */
350 - char **preload_cache; /* Pointers to preloaded blocks */
351 + size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */
352 + int *buffered_blocknum; /* list of numbers of uncompressed blocks in buffer */
353 + int current_bufnum; /* which block is current */
354 + unsigned char **buffer; /* cache space for num_buffered_blocks uncompressed blocks */
355 + void *compressed_buffer; /* space for the largest compressed block */
356 + size_t preload_array_size; /* Size of pointer array in blocks */
357 + size_t preload_size; /* Number of successfully allocated blocks */
358 + char **preload_cache; /* Pointers to preloaded blocks */
360 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
361 z_stream zstream;
362 +#endif
363 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
364 + struct xz_dec *xzdecoderstate;
365 + struct xz_buf xz_buffer;
366 +#endif
368 struct file *backing_file; /* associated file */
369 struct inode *backing_inode; /* for bmap */
371 + unsigned char *underlying_filename;
372 unsigned long largest_block;
373 unsigned int underlying_blksize;
374 + loff_t underlying_total_size;
375 int clo_number;
376 int refcnt;
377 struct block_device *bdev;
378 @@ -147,7 +180,6 @@
379 struct request_queue *clo_queue;
380 struct gendisk *clo_disk;
381 int suspended;
382 - char clo_file_name[LO_NAME_SIZE];
383 };
385 /* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
386 @@ -156,52 +188,113 @@
387 static const char *cloop_name=CLOOP_NAME;
388 static int cloop_count = 0;
390 -#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))) /* Must be compiled into kernel. */
391 -#error "Invalid Kernel configuration. CONFIG_ZLIB_INFLATE support is needed for cloop."
392 -#endif
393 -
394 -/* Use __get_free_pages instead of vmalloc, allows up to 32 pages,
395 - * 2MB in one piece */
396 static void *cloop_malloc(size_t size)
397 {
398 - int order = get_order(size);
399 - if(order <= KMALLOC_MAX_ORDER)
400 - return (void *)kmalloc(size, GFP_KERNEL);
401 - else if(order < MAX_ORDER)
402 - return (void *)__get_free_pages(GFP_KERNEL, order);
403 + /* kmalloc will fail after the system is running for a while, */
404 + /* when large orders can't return contiguous memory. */
405 + /* Let's just use vmalloc for now. :-/ */
406 + /* int order = get_order(size); */
407 + /* if(order <= KMALLOC_MAX_ORDER) */
408 + /* return (void *)kmalloc(size, GFP_KERNEL); */
409 + /* else if(order < MAX_ORDER) */
410 + /* return (void *)__get_free_pages(GFP_KERNEL, order); */
411 return (void *)vmalloc(size);
412 }
414 static void cloop_free(void *mem, size_t size)
415 {
416 - int order = get_order(size);
417 - if(order <= KMALLOC_MAX_ORDER)
418 - kfree(mem);
419 - else if(order < MAX_ORDER)
420 - free_pages((unsigned long)mem, order);
421 - else vfree(mem);
422 + /* int order = get_order(size); */
423 + /* if(order <= KMALLOC_MAX_ORDER) */
424 + /* kfree(mem); */
425 + /* else if(order < MAX_ORDER) */
426 + /* free_pages((unsigned long)mem, order); */
427 + /* else */
428 + vfree(mem);
429 }
431 -static int uncompress(struct cloop_device *clo,
432 - unsigned char *dest, unsigned long *destLen,
433 - unsigned char *source, unsigned long sourceLen)
434 +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags)
435 {
436 - /* Most of this code can be found in fs/cramfs/uncompress.c */
437 - int err;
438 - clo->zstream.next_in = source;
439 - clo->zstream.avail_in = sourceLen;
440 - clo->zstream.next_out = dest;
441 - clo->zstream.avail_out = *destLen;
442 - err = zlib_inflateReset(&clo->zstream);
443 - if (err != Z_OK)
444 - {
445 - printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
446 - zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
447 - }
448 - err = zlib_inflate(&clo->zstream, Z_FINISH);
449 - *destLen = clo->zstream.total_out;
450 - if (err != Z_STREAM_END) return err;
451 - return Z_OK;
452 + int err = -1;
453 + switch(flags)
454 + {
455 + case CLOOP_COMPRESSOR_NONE:
456 + memcpy(dest, source, *destLen = sourceLen);
457 + err = Z_OK;
458 + break;
459 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
460 + case CLOOP_COMPRESSOR_ZLIB:
461 + clo->zstream.next_in = source;
462 + clo->zstream.avail_in = sourceLen;
463 + clo->zstream.next_out = dest;
464 + clo->zstream.avail_out = *destLen;
465 + err = zlib_inflateReset(&clo->zstream);
466 + if (err != Z_OK)
467 + {
468 + printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
469 + zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
470 + }
471 + err = zlib_inflate(&clo->zstream, Z_FINISH);
472 + *destLen = clo->zstream.total_out;
473 + if (err == Z_STREAM_END) err = 0;
474 + DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
475 + break;
476 +#endif
477 +#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
478 + case CLOOP_COMPRESSOR_LZO1X:
479 + {
480 + size_t tmp = (size_t) clo->head.block_size;
481 + err = lzo1x_decompress_safe(source, sourceLen,
482 + dest, &tmp);
483 + if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
484 + }
485 + break;
486 +#endif
487 +#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
488 + case CLOOP_COMPRESSOR_LZ4:
489 + {
490 + size_t outputSize = *destLen;
491 + /* We should adjust outputSize here, in case the last block is smaller than block_size */
492 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
493 + err = lz4_decompress(source, (size_t *) &sourceLen,
494 + dest, outputSize);
495 +#else
496 + err = LZ4_decompress_safe(source,
497 + dest,
498 + sourceLen, outputSize);
499 +#endif
500 + if (err >= 0)
501 + {
502 + err = 0;
503 + *destLen = outputSize;
504 + }
505 + }
506 + break;
507 +#endif
508 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
509 + case CLOOP_COMPRESSOR_XZ:
510 + clo->xz_buffer.in = source;
511 + clo->xz_buffer.in_pos = 0;
512 + clo->xz_buffer.in_size = sourceLen;
513 + clo->xz_buffer.out = dest;
514 + clo->xz_buffer.out_pos = 0;
515 + clo->xz_buffer.out_size = *destLen;
516 + xz_dec_reset(clo->xzdecoderstate);
517 + err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
518 + if (err == XZ_STREAM_END || err == XZ_OK)
519 + {
520 + err = 0;
521 + }
522 + else
523 + {
524 + printk(KERN_ERR "%s: xz_dec_run error %d\n", cloop_name, err);
525 + err = 1;
526 + }
527 + break;
528 +#endif
529 + default:
530 + printk(KERN_ERR "%s: compression method is not supported!\n", cloop_name);
531 + }
532 + return err;
533 }
535 static ssize_t cloop_read_from_file(struct cloop_device *clo, struct file *f, char *buf,
536 @@ -220,7 +313,7 @@
538 if(size_read <= 0)
539 {
540 - printk(KERN_ERR "%s: Read error %d at pos %Lu in file %s, "
541 + printk(KERN_ERR "%s: Read error %d at pos %llu in file %s, "
542 "%d bytes lost.\n", cloop_name, (int)size_read, pos,
543 file, (int)size);
544 memset(buf + buf_len - size, 0, size);
545 @@ -232,72 +325,84 @@
546 }
548 /* This looks more complicated than it is */
549 -/* Returns number of block buffer to use for this request */
550 +/* Returns number of cache block buffer to use for this request */
551 static int cloop_load_buffer(struct cloop_device *clo, int blocknum)
552 {
553 - unsigned int buf_done = 0;
554 - unsigned long buflen;
555 - unsigned int buf_length;
556 + loff_t compressed_block_offset;
557 + long compressed_block_len;
558 + long uncompressed_block_len=0;
559 int ret;
560 int i;
561 - if(blocknum > ntohl(clo->head.num_blocks) || blocknum < 0)
562 - {
563 - printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
564 - cloop_name, blocknum);
565 - return -1;
566 - }
567 + if(blocknum > clo->head.num_blocks || blocknum < 0)
568 + {
569 + printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
570 + cloop_name, blocknum);
571 + return -1;
572 + }
574 /* Quick return if the block we seek is already in one of the buffers. */
575 /* Return number of buffer */
576 - for(i=0; i<BUFFERED_BLOCKS; i++)
577 + for(i=0; i<clo->num_buffered_blocks; i++)
578 if (blocknum == clo->buffered_blocknum[i])
579 - {
580 - DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
581 - return i;
582 - }
583 -
584 - buf_length = be64_to_cpu(clo->offsets[blocknum+1]) - be64_to_cpu(clo->offsets[blocknum]);
585 -
586 -/* Load one compressed block from the file. */
587 - cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
588 - be64_to_cpu(clo->offsets[blocknum]), buf_length);
589 + {
590 + DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
591 + return i;
592 + }
594 - buflen = ntohl(clo->head.block_size);
595 + compressed_block_offset = clo->block_ptrs[blocknum].offset;
596 + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
598 - /* Go to next position in the block ring buffer */
599 - clo->current_bufnum++;
600 - if(clo->current_bufnum >= BUFFERED_BLOCKS) clo->current_bufnum = 0;
601 + /* Load one compressed block from the file. */
602 + if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
603 + {
604 + size_t n = cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
605 + compressed_block_offset, compressed_block_len);
606 + if (n!= compressed_block_len)
607 + {
608 + printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
609 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
610 + /* return -1; */
611 + }
612 + } else {
613 + printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
614 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
615 + return -1;
616 + }
617 +
618 + /* Go to next position in the cache block buffer (which is used as a cyclic buffer) */
619 + if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
621 /* Do the uncompression */
622 - ret = uncompress(clo, clo->buffer[clo->current_bufnum], &buflen, clo->compressed_buffer,
623 - buf_length);
624 + uncompressed_block_len = clo->head.block_size;
625 + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
626 + clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
627 /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
628 if (ret != 0)
629 - {
630 - printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u "
631 - "%Lu-%Lu\n", cloop_name, ret, blocknum,
632 - ntohl(clo->head.block_size), buflen, buf_length, buf_done,
633 - be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1]));
634 - clo->buffered_blocknum[clo->current_bufnum] = -1;
635 - return -1;
636 - }
637 + {
638 + printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
639 + cloop_name, ret, blocknum,
640 + compressed_block_len, clo->block_ptrs[blocknum].offset,
641 + clo->block_ptrs[blocknum].flags);
642 + clo->buffered_blocknum[clo->current_bufnum] = -1;
643 + return -1;
644 + }
645 clo->buffered_blocknum[clo->current_bufnum] = blocknum;
646 return clo->current_bufnum;
647 }
649 /* This function does all the real work. */
650 -/* returns "uptodate" */
651 +/* returns "uptodate" */
652 static int cloop_handle_request(struct cloop_device *clo, struct request *req)
653 {
654 int buffered_blocknum = -1;
655 int preloaded = 0;
656 loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
657 - struct bio_vec *bvec;
658 + struct bio_vec bvec;
659 struct req_iterator iter;
660 rq_for_each_segment(bvec, req, iter)
661 {
662 - unsigned long len = bvec->bv_len;
663 - char *to_ptr = kmap(bvec->bv_page) + bvec->bv_offset;
664 + unsigned long len = bvec.bv_len;
665 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset;
666 while(len > 0)
667 {
668 u_int32_t length_in_buffer;
669 @@ -308,7 +413,7 @@
670 /* puts the result in the first argument, i.e. block_offset */
671 /* becomes the blocknumber to load, and offset_in_buffer the */
672 /* position in the buffer */
673 - offset_in_buffer = do_div(block_offset, ntohl(clo->head.block_size));
674 + offset_in_buffer = do_div(block_offset, clo->head.block_size);
675 /* Lookup preload cache */
676 if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
677 clo->preload_cache[block_offset] != NULL)
678 @@ -325,7 +430,7 @@
679 from_ptr = clo->buffer[buffered_blocknum];
680 }
681 /* Now, at least part of what we want will be in the buffer. */
682 - length_in_buffer = ntohl(clo->head.block_size) - offset_in_buffer;
683 + length_in_buffer = clo->head.block_size - offset_in_buffer;
684 if(length_in_buffer > len)
685 {
686 /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
687 @@ -337,18 +442,19 @@
688 len -= length_in_buffer;
689 offset += length_in_buffer;
690 } /* while inner loop */
691 - kunmap(bvec->bv_page);
692 + kunmap(bvec.bv_page);
693 + cond_resched();
694 } /* end rq_for_each_segment*/
695 return ((buffered_blocknum!=-1) || preloaded);
696 }
698 /* Adopted from loop.c, a kernel thread to handle physical reads and
699 - * decompression. */
700 + decompression. */
701 static int cloop_thread(void *data)
702 {
703 struct cloop_device *clo = data;
704 current->flags |= PF_NOFREEZE;
705 - set_user_nice(current, -15);
706 + set_user_nice(current, 10);
707 while (!kthread_should_stop()||!list_empty(&clo->clo_list))
708 {
709 int err;
710 @@ -390,10 +496,18 @@
711 int rw;
712 /* quick sanity checks */
713 /* blk_fs_request() was removed in 2.6.36 */
714 - if (unlikely(req == NULL || (req->cmd_type != REQ_TYPE_FS)))
715 + if (unlikely(req == NULL
716 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
717 + || (req->cmd_type != REQ_TYPE_FS)
718 +#endif
719 + ))
720 goto error_continue;
721 rw = rq_data_dir(req);
722 - if (unlikely(rw != READ && rw != READA))
723 + if (unlikely(rw != READ
724 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
725 + && rw != READA
726 +#endif
727 + ))
728 {
729 DEBUGP("cloop_do_request: bad command\n");
730 goto error_continue;
731 @@ -409,40 +523,51 @@
732 continue; /* next request */
733 error_continue:
734 DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
735 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
736 req->errors++;
737 +#else
738 + req->error_count++;
739 +#endif
740 __blk_end_request_all(req, -EIO);
741 }
742 }
744 -/* Read header and offsets from already opened file */
745 -static int cloop_set_file(int cloop_num, struct file *file, char *filename)
746 +/* Read header, flags and offsets from already opened file */
747 +static int cloop_set_file(int cloop_num, struct file *file)
748 {
749 struct cloop_device *clo = cloop_dev[cloop_num];
750 struct inode *inode;
751 char *bbuf=NULL;
752 - unsigned int i, offsets_read, total_offsets;
753 - int isblkdev;
754 - int error = 0;
755 + unsigned int bbuf_size = 0;
756 + const unsigned int header_size = sizeof(struct cloop_head);
757 + unsigned int i, total_offsets=0;
758 + loff_t fs_read_position = 0, header_pos[2];
759 + int flags, isblkdev, bytes_read, error = 0;
760 + if (clo->suspended) return error;
761 + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
762 inode = file->f_dentry->d_inode;
763 + clo->underlying_filename = kstrdup(file->f_dentry->d_name.name ? file->f_dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
764 + #else
765 + inode = file->f_path.dentry->d_inode;
766 + clo->underlying_filename = kstrdup(file->f_path.dentry->d_name.name ? file->f_path.dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
767 + #endif
768 isblkdev=S_ISBLK(inode->i_mode)?1:0;
769 if(!isblkdev&&!S_ISREG(inode->i_mode))
770 {
771 printk(KERN_ERR "%s: %s not a regular file or block device\n",
772 - cloop_name, filename);
773 + cloop_name, clo->underlying_filename);
774 error=-EBADF; goto error_release;
775 }
776 clo->backing_file = file;
777 clo->backing_inode= inode ;
778 - if(!isblkdev&&inode->i_size<sizeof(struct cloop_head))
779 + clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size;
780 + if(clo->underlying_total_size < header_size)
781 {
782 - printk(KERN_ERR "%s: %lu bytes (must be >= %u bytes)\n",
783 - cloop_name, (unsigned long)inode->i_size,
784 - (unsigned)sizeof(struct cloop_head));
785 + printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n",
786 + cloop_name, clo->underlying_total_size,
787 + (unsigned int)header_size);
788 error=-EBADF; goto error_release;
789 }
790 - /* In suspended mode, we have done all checks necessary - FF */
791 - if (clo->suspended)
792 - return error;
793 if(isblkdev)
794 {
795 struct request_queue *q = bdev_get_queue(inode->i_bdev);
796 @@ -451,104 +576,225 @@
797 /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */
798 blk_queue_max_segment_size(clo->clo_queue, queue_max_segment_size(q));
799 blk_queue_segment_boundary(clo->clo_queue, queue_segment_boundary(q));
800 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
801 blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn);
802 +#endif
803 clo->underlying_blksize = block_size(inode->i_bdev);
804 }
805 else
806 clo->underlying_blksize = PAGE_SIZE;
807 - DEBUGP("Underlying blocksize is %u\n", clo->underlying_blksize);
808 - bbuf = cloop_malloc(clo->underlying_blksize);
809 +
810 + DEBUGP(KERN_INFO "Underlying blocksize of %s is %u\n", clo->underlying_filename, clo->underlying_blksize);
811 + DEBUGP(KERN_INFO "Underlying total size of %s is %llu\n", clo->underlying_filename, clo->underlying_total_size);
812 +
813 + /* clo->underlying_blksize should be larger than header_size, even if it's only PAGE_SIZE */
814 + bbuf_size = clo->underlying_blksize;
815 + bbuf = cloop_malloc(bbuf_size);
816 if(!bbuf)
817 {
818 - printk(KERN_ERR "%s: out of kernel mem for block buffer (%lu bytes)\n",
819 - cloop_name, (unsigned long)clo->underlying_blksize);
820 + printk(KERN_ERR "%s: out of kernel mem for buffer (%u bytes)\n",
821 + cloop_name, (unsigned int) bbuf_size);
822 + error=-ENOMEM; goto error_release;
823 + }
824 +
825 + header_pos[0] = 0; /* header first */
826 + header_pos[1] = clo->underlying_total_size - sizeof(struct cloop_head); /* header last */
827 + for(i=0; i<2; i++)
828 + {
829 + /* Check for header */
830 + size_t bytes_readable = MIN(clo->underlying_blksize, clo->underlying_total_size - header_pos[i]);
831 + size_t bytes_read = cloop_read_from_file(clo, file, bbuf, header_pos[i], bytes_readable);
832 + if(bytes_read != bytes_readable)
833 + {
834 + printk(KERN_ERR "%s: Bad file %s, read() of %s %u bytes returned %d.\n",
835 + cloop_name, clo->underlying_filename, (i==0)?"first":"last",
836 + (unsigned int)header_size, (int)bytes_read);
837 + error=-EBADF;
838 + goto error_release;
839 + }
840 + memcpy(&clo->head, bbuf, header_size);
841 + if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
842 + {
843 + clo->file_format++;
844 + clo->head.block_size=ntohl(clo->head.block_size);
845 + clo->head.num_blocks=ntohl(clo->head.num_blocks);
846 + clo->header_first = (i==0) ? 1 : 0;
847 + printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
848 + break;
849 + }
850 + }
851 + if (clo->file_format == 0)
852 + {
853 + printk(KERN_ERR "%s: Cannot detect %s format.\n",
854 + cloop_name, cloop_name);
855 + error=-EBADF; goto error_release;
856 + }
857 + if (clo->head.block_size % 512 != 0)
858 + {
859 + printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
860 + cloop_name, clo->head.block_size);
861 + error=-EBADF; goto error_release;
862 + }
863 + total_offsets=clo->head.num_blocks;
864 + if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
865 + total_offsets > inode->i_size))
866 + {
867 + printk(KERN_ERR "%s: file %s too small for %u blocks\n",
868 + cloop_name, clo->underlying_filename, clo->head.num_blocks);
869 + error=-EBADF; goto error_release;
870 + }
871 + /* Allocate Memory for decompressors */
872 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
873 + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
874 + if(!clo->zstream.workspace)
875 + {
876 + printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
877 + cloop_name, zlib_inflate_workspacesize());
878 error=-ENOMEM; goto error_release;
879 }
880 - total_offsets = 1; /* Dummy total_offsets: will be filled in first time around */
881 - for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++)
882 + zlib_inflateInit(&clo->zstream);
883 +#endif
884 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
885 +#if XZ_INTERNAL_CRC32
886 + /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
887 + xz_crc32_init(void);
888 +#endif
889 + clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
890 +#endif
891 + if (total_offsets + 1 == 0) /* Version 3 */
892 {
893 - unsigned int offset = 0, num_readable;
894 - size_t bytes_read = cloop_read_from_file(clo, file, bbuf,
895 - i*clo->underlying_blksize,
896 - clo->underlying_blksize);
897 - if(bytes_read != clo->underlying_blksize)
898 + struct cloop_tail tail;
899 + if (isblkdev)
900 {
901 - printk(KERN_ERR "%s: Bad file, read() of first %lu bytes returned %d.\n",
902 - cloop_name, (unsigned long)clo->underlying_blksize, (int)bytes_read);
903 - error=-EBADF;
904 - goto error_release;
905 + /* No end of file: can't find index */
906 + printk(KERN_ERR "%s: no V3 support for block device\n",
907 + cloop_name);
908 + error=-EBADF; goto error_release;
909 }
910 - /* Header will be in block zero */
911 - if(i==0)
912 + bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
913 + inode->i_size - sizeof(struct cloop_tail),
914 + sizeof(struct cloop_tail));
915 + if (bytes_read == sizeof(struct cloop_tail))
916 {
917 - memcpy(&clo->head, bbuf, sizeof(struct cloop_head));
918 - offset = sizeof(struct cloop_head);
919 - if (ntohl(clo->head.block_size) % 512 != 0)
920 + unsigned long len, zlen;
921 + int ret;
922 + void *zbuf;
923 + clo->head.num_blocks = ntohl(tail.num_blocks);
924 + total_offsets = clo->head.num_blocks;
925 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
926 + zlen = ntohl(tail.table_size);
927 + zbuf = cloop_malloc(zlen);
928 + if (!clo->block_ptrs || !zbuf)
929 {
930 - printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
931 - cloop_name, ntohl(clo->head.block_size));
932 - error=-EBADF; goto error_release;
933 - }
934 - if (clo->head.preamble[0x0B]!='V'||clo->head.preamble[0x0C]<'1')
935 - {
936 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
937 - "please use an older version of %s for this file.\n",
938 - cloop_name, cloop_name);
939 - error=-EBADF; goto error_release;
940 + printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
941 + error=-ENOMEM; goto error_release;
942 }
943 - if (clo->head.preamble[0x0C]<'2')
944 + bytes_read = cloop_read_from_file(clo, file, zbuf,
945 + inode->i_size - zlen - sizeof(struct cloop_tail),
946 + zlen);
947 + if (bytes_read != zlen)
948 {
949 - printk(KERN_ERR "%s: Cannot read old architecture-dependent "
950 - "(format <= 1.0) images, please use an older "
951 - "version of %s for this file.\n",
952 - cloop_name, cloop_name);
953 + printk(KERN_ERR "%s: can't read index\n", cloop_name);
954 error=-EBADF; goto error_release;
955 }
956 - total_offsets=ntohl(clo->head.num_blocks)+1;
957 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
958 - total_offsets > inode->i_size))
959 + len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
960 + flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
961 +// May 3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
962 +printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name,
963 + clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
964 + ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
965 +// May 3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at (null)
966 +printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
967 + cloop_free(zbuf, zlen);
968 + if (ret != 0)
969 {
970 - printk(KERN_ERR "%s: file too small for %u blocks\n",
971 - cloop_name, ntohl(clo->head.num_blocks));
972 + printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
973 + cloop_name, ret, flags);
974 error=-EBADF; goto error_release;
975 }
976 - clo->offsets = cloop_malloc(sizeof(loff_t) * total_offsets);
977 - if (!clo->offsets)
978 - {
979 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
980 - error=-ENOMEM; goto error_release;
981 - }
982 }
983 - num_readable = MIN(total_offsets - offsets_read,
984 - (clo->underlying_blksize - offset)
985 - / sizeof(loff_t));
986 - memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(loff_t));
987 - offsets_read += num_readable;
988 - }
989 - { /* Search for largest block rather than estimate. KK. */
990 - int i;
991 - for(i=0;i<total_offsets-1;i++)
992 + else
993 + {
994 + printk(KERN_ERR "%s: can't find index\n", cloop_name);
995 + error=-ENOMEM; goto error_release;
996 + }
997 + }
998 + else
999 + {
1000 + unsigned int n, total_bytes;
1001 + flags = 0;
1002 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
1003 + if (!clo->block_ptrs)
1004 + {
1005 + printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
1006 + error=-ENOMEM; goto error_release;
1007 + }
1008 + /* Read them offsets! */
1009 + if(clo->header_first)
1010 + {
1011 + total_bytes = total_offsets * sizeof(struct block_info);
1012 + fs_read_position = sizeof(struct cloop_head);
1013 + }
1014 + else
1016 - loff_t d=be64_to_cpu(clo->offsets[i+1]) - be64_to_cpu(clo->offsets[i]);
1017 - clo->largest_block=MAX(clo->largest_block,d);
1018 + total_bytes = total_offsets * sizeof(loff_t);
1019 + fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
1020 + }
1021 + for(n=0;n<total_bytes;)
1022 + {
1023 + size_t bytes_readable;
1024 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
1025 + if(bytes_readable <= 0) break; /* Done */
1026 + bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
1027 + if(bytes_read != bytes_readable)
1028 + {
1029 + printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
1030 + cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
1031 + error=-EBADF;
1032 + goto error_release;
1033 + }
1034 + memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
1035 + /* remember where to read the next blk from file */
1036 + fs_read_position += bytes_read;
1037 + n += bytes_read;
1039 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
1040 - cloop_name, filename, ntohl(clo->head.num_blocks),
1041 - ntohl(clo->head.block_size), clo->largest_block);
1043 -/* Combo kmalloc used too large chunks (>130000). */
1045 int i;
1046 - for(i=0;i<BUFFERED_BLOCKS;i++)
1047 - {
1048 - clo->buffer[i] = cloop_malloc(ntohl(clo->head.block_size));
1049 - if(!clo->buffer[i])
1050 - {
1051 - printk(KERN_ERR "%s: out of memory for buffer %lu\n",
1052 - cloop_name, (unsigned long) ntohl(clo->head.block_size));
1053 - error=-ENOMEM; goto error_release_free;
1054 - }
1055 - }
1056 + char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
1057 + clo->largest_block = 0;
1058 + for (i = 0; i < clo->head.num_blocks; i++)
1059 + if (clo->block_ptrs[i].size > clo->largest_block)
1060 + clo->largest_block = clo->block_ptrs[i].size;
1061 + printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
1062 + cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
1063 + clo->head.block_size, clo->largest_block);
1064 + }
1065 + {
1066 + int i;
1067 + clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
1068 + (buffers / clo->head.block_size) : 1;
1069 + clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
1070 + clo->buffer = cloop_malloc(clo->num_buffered_blocks * sizeof (char*));
1071 + if (!clo->buffered_blocknum || !clo->buffer)
1072 + {
1073 + printk(KERN_ERR "%s: out of memory for index of cache buffer (%lu bytes)\n",
1074 + cloop_name, (unsigned long)clo->num_buffered_blocks * sizeof (u_int32_t) + sizeof(char*) );
1075 + error=-ENOMEM; goto error_release;
1076 + }
1077 + memset(clo->buffer, 0, clo->num_buffered_blocks * sizeof (char*));
1078 + for(i=0;i<clo->num_buffered_blocks;i++)
1079 + {
1080 + clo->buffered_blocknum[i] = -1;
1081 + clo->buffer[i] = cloop_malloc(clo->head.block_size);
1082 + if(!clo->buffer[i])
1083 + {
1084 + printk(KERN_ERR "%s: out of memory for cache buffer %lu\n",
1085 + cloop_name, (unsigned long) clo->head.block_size);
1086 + error=-ENOMEM; goto error_release_free;
1087 + }
1088 + }
1089 + clo->current_bufnum = 0;
1091 clo->compressed_buffer = cloop_malloc(clo->largest_block);
1092 if(!clo->compressed_buffer)
1093 @@ -557,31 +803,7 @@
1094 cloop_name, clo->largest_block);
1095 error=-ENOMEM; goto error_release_free_buffer;
1097 - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
1098 - if(!clo->zstream.workspace)
1099 - {
1100 - printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
1101 - cloop_name, zlib_inflate_workspacesize());
1102 - error=-ENOMEM; goto error_release_free_all;
1103 - }
1104 - zlib_inflateInit(&clo->zstream);
1105 - if(!isblkdev &&
1106 - be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]) != inode->i_size)
1107 - {
1108 - printk(KERN_ERR "%s: final offset wrong (%Lu not %Lu)\n",
1109 - cloop_name,
1110 - be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]),
1111 - inode->i_size);
1112 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
1113 - goto error_release_free_all;
1114 - }
1115 - {
1116 - int i;
1117 - for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1;
1118 - clo->current_bufnum=0;
1119 - }
1120 - set_capacity(clo->clo_disk, (sector_t)(ntohl(clo->head.num_blocks)*
1121 - (ntohl(clo->head.block_size)>>9)));
1122 + set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
1123 clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
1124 if(IS_ERR(clo->clo_thread))
1126 @@ -591,17 +813,17 @@
1128 if(preload > 0)
1130 - clo->preload_array_size = ((preload<=ntohl(clo->head.num_blocks))?preload:ntohl(clo->head.num_blocks));
1131 + clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
1132 clo->preload_size = 0;
1133 if((clo->preload_cache = cloop_malloc(clo->preload_array_size * sizeof(char *))) != NULL)
1135 int i;
1136 for(i=0; i<clo->preload_array_size; i++)
1138 - if((clo->preload_cache[i] = cloop_malloc(ntohl(clo->head.block_size))) == NULL)
1139 + if((clo->preload_cache[i] = cloop_malloc(clo->head.block_size)) == NULL)
1140 { /* Out of memory */
1141 printk(KERN_WARNING "%s: cloop_malloc(%d) failed for preload_cache[%d] (ignored).\n",
1142 - cloop_name, ntohl(clo->head.block_size), i);
1143 + cloop_name, clo->head.block_size, i);
1144 break;
1147 @@ -612,13 +834,13 @@
1148 if(buffered_blocknum >= 0)
1150 memcpy(clo->preload_cache[i], clo->buffer[buffered_blocknum],
1151 - ntohl(clo->head.block_size));
1152 + clo->head.block_size);
1154 else
1156 printk(KERN_WARNING "%s: can't read block %d into preload cache, set to zero.\n",
1157 cloop_name, i);
1158 - memset(clo->preload_cache[i], 0, ntohl(clo->head.block_size));
1159 + memset(clo->preload_cache[i], 0, clo->head.block_size);
1162 printk(KERN_INFO "%s: preloaded %d blocks into cache.\n", cloop_name,
1163 @@ -641,22 +863,19 @@
1164 cloop_free(clo->compressed_buffer, clo->largest_block);
1165 clo->compressed_buffer=NULL;
1166 error_release_free_buffer:
1167 + if(clo->buffer)
1169 int i;
1170 - for(i=0; i<BUFFERED_BLOCKS; i++)
1171 - {
1172 - if(clo->buffer[i])
1173 - {
1174 - cloop_free(clo->buffer[i], ntohl(clo->head.block_size));
1175 - clo->buffer[i]=NULL;
1176 - }
1177 - }
1178 + for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) { cloop_free(clo->buffer[i], clo->head.block_size); clo->buffer[i]=NULL; }}
1179 + cloop_free(clo->buffer, clo->num_buffered_blocks*sizeof(char*)); clo->buffer=NULL;
1181 + if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
1182 error_release_free:
1183 - cloop_free(clo->offsets, sizeof(loff_t) * total_offsets);
1184 - clo->offsets=NULL;
1185 + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
1186 + clo->block_ptrs=NULL;
1187 error_release:
1188 if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
1189 + if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
1190 clo->backing_file=NULL;
1191 return error;
1193 @@ -673,7 +892,7 @@
1194 if(clo->backing_file) return -EBUSY;
1195 file = fget(arg); /* get filp struct from ioctl arg fd */
1196 if(!file) return -EBADF;
1197 - error=cloop_set_file(cloop_num,file,"losetup_file");
1198 + error=cloop_set_file(cloop_num,file);
1199 set_device_ro(bdev, 1);
1200 if(error) fput(file);
1201 return error;
1202 @@ -684,29 +903,48 @@
1204 struct cloop_device *clo = cloop_dev[cloop_num];
1205 struct file *filp = clo->backing_file;
1206 - int i;
1207 if(clo->refcnt > 1) /* we needed one fd for the ioctl */
1208 return -EBUSY;
1209 if(filp==NULL) return -EINVAL;
1210 if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
1211 - if(filp!=initial_file) fput(filp);
1212 - else { filp_close(initial_file,0); initial_file=NULL; }
1213 + if(filp!=initial_file)
1214 + fput(filp);
1215 + else
1216 + {
1217 + filp_close(initial_file,0);
1218 + initial_file=NULL;
1219 + }
1220 clo->backing_file = NULL;
1221 clo->backing_inode = NULL;
1222 - if(clo->offsets) { cloop_free(clo->offsets, clo->underlying_blksize); clo->offsets = NULL; }
1223 + if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
1224 + if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
1225 if(clo->preload_cache)
1226 - {
1227 - for(i=0; i < clo->preload_size; i++)
1228 - cloop_free(clo->preload_cache[i], ntohl(clo->head.block_size));
1229 - cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
1230 - clo->preload_cache = NULL;
1231 - clo->preload_size = clo->preload_array_size = 0;
1232 - }
1233 - for(i=0; i<BUFFERED_BLOCKS; i++)
1234 - if(clo->buffer[i]) { cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); clo->buffer[i]=NULL; }
1235 + {
1236 + int i;
1237 + for(i=0; i < clo->preload_size; i++)
1238 + cloop_free(clo->preload_cache[i], clo->head.block_size);
1239 + cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
1240 + clo->preload_cache = NULL;
1241 + clo->preload_size = clo->preload_array_size = 0;
1242 + }
1243 + if (clo->buffered_blocknum)
1244 + {
1245 + cloop_free(clo->buffered_blocknum, sizeof(int) * clo->num_buffered_blocks); clo->buffered_blocknum = NULL;
1246 + }
1247 + if (clo->buffer)
1248 + {
1249 + int i;
1250 + for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) cloop_free(clo->buffer[i], clo->head.block_size); }
1251 + cloop_free(clo->buffer, sizeof(char*) * clo->num_buffered_blocks); clo->buffer = NULL;
1252 + }
1253 if(clo->compressed_buffer) { cloop_free(clo->compressed_buffer, clo->largest_block); clo->compressed_buffer = NULL; }
1254 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
1255 zlib_inflateEnd(&clo->zstream);
1256 if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
1257 +#endif
1258 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
1259 + xz_dec_end(clo->xzdecoderstate);
1260 +#endif
1261 if(bdev) invalidate_bdev(bdev);
1262 if(clo->clo_disk) set_capacity(clo->clo_disk, 0);
1263 return 0;
1264 @@ -731,8 +969,8 @@
1265 const struct loop_info64 *info)
1267 if (!clo->backing_file) return -ENXIO;
1268 - memcpy(clo->clo_file_name, info->lo_file_name, LO_NAME_SIZE);
1269 - clo->clo_file_name[LO_NAME_SIZE-1] = 0;
1270 + if(clo->underlying_filename) kfree(clo->underlying_filename);
1271 + clo->underlying_filename = kstrdup(info->lo_file_name, GFP_KERNEL);
1272 return 0;
1275 @@ -743,7 +981,11 @@
1276 struct kstat stat;
1277 int err;
1278 if (!file) return -ENXIO;
1279 - err = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
1280 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
1281 + err = vfs_getattr(&file->f_path, &stat);
1282 +#else
1283 + err = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1284 +#endif
1285 if (err) return err;
1286 memset(info, 0, sizeof(*info));
1287 info->lo_number = clo->clo_number;
1288 @@ -753,7 +995,8 @@
1289 info->lo_offset = 0;
1290 info->lo_sizelimit = 0;
1291 info->lo_flags = 0;
1292 - memcpy(info->lo_file_name, clo->clo_file_name, LO_NAME_SIZE);
1293 + strncpy(info->lo_file_name, clo->underlying_filename, LO_NAME_SIZE);
1294 + info->lo_file_name[LO_NAME_SIZE-1]=0;
1295 return 0;
1298 @@ -833,8 +1076,6 @@
1299 if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT;
1300 return err;
1302 -/* EOF get/set_status */
1305 static int cloop_ioctl(struct block_device *bdev, fmode_t mode,
1306 unsigned int cmd, unsigned long arg)
1307 @@ -914,21 +1155,20 @@
1308 /* losetup uses write-open and flags=0x8002 to set a new file */
1309 if(mode & FMODE_WRITE)
1311 - printk(KERN_WARNING "%s: Can't open device read-write in mode 0x%x\n", cloop_name, mode);
1312 + printk(KERN_INFO "%s: Open in read-write mode 0x%x requested, ignored.\n", cloop_name, mode);
1313 return -EROFS;
1315 cloop_dev[cloop_num]->refcnt+=1;
1316 return 0;
1319 -static int cloop_close(struct gendisk *disk, fmode_t mode)
1320 +static void cloop_close(struct gendisk *disk, fmode_t mode)
1322 - int cloop_num, err=0;
1323 - if(!disk) return 0;
1324 + int cloop_num;
1325 + if(!disk) return;
1326 cloop_num=((struct cloop_device *)disk->private_data)->clo_number;
1327 - if(cloop_num < 0 || cloop_num > (cloop_count-1)) return 0;
1328 + if(cloop_num < 0 || cloop_num > (cloop_count-1)) return;
1329 cloop_dev[cloop_num]->refcnt-=1;
1330 - return err;
1333 static struct block_device_operations clo_fops =
1334 @@ -973,6 +1213,10 @@
1335 goto error_out;
1337 clo->clo_queue->queuedata = clo;
1338 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
1339 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
1340 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
1341 +#endif
1342 clo->clo_disk = alloc_disk(1);
1343 if(!clo->clo_disk)
1345 @@ -1004,6 +1248,11 @@
1346 cloop_dev[cloop_num] = NULL;
1349 +/* LZ4 Stuff */
1350 +#if (defined USE_LZ4_INTERNAL)
1351 +#include "lz4_kmod.c"
1352 +#endif
1354 static int __init cloop_init(void)
1356 int error=0;
1357 @@ -1044,7 +1293,7 @@
1358 initial_file=NULL; /* if IS_ERR, it's NOT open. */
1360 else
1361 - error=cloop_set_file(0,initial_file,file);
1362 + error=cloop_set_file(0,initial_file);
1363 if(error)
1365 printk(KERN_ERR
1366 @@ -1052,9 +1301,6 @@
1367 cloop_name, file, error);
1368 goto init_out_dealloc;
1370 - if(namelen >= LO_NAME_SIZE) namelen = LO_NAME_SIZE-1;
1371 - memcpy(cloop_dev[0]->clo_file_name, file, namelen);
1372 - cloop_dev[0]->clo_file_name[namelen] = 0;
1374 return 0;
1375 init_out_dealloc:
1376 --- cloop.h
1377 +++ cloop.h
1378 @@ -86,11 +86,8 @@
1379 struct cloop_tail
1381 u_int32_t table_size;
1382 - u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
1383 + u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
1384 #define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
1385 -#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4)
1386 -#define CLOOP3_TRUNCATED(x) ((unsigned int)((x) & 0x80) >> 7)
1387 -#define CLOOP3_LASTLEN(x) (unsigned int)((x) >> 8)
1388 u_int32_t num_blocks;
1389 };
1391 @@ -104,8 +101,10 @@
1392 };
1394 static inline char *build_index(struct block_info *offsets, unsigned long n,
1395 - unsigned long block_size, unsigned global_flags)
1396 + unsigned long block_size)
1398 + static char v[11];
1399 + u_int32_t flags = 0;
1400 u_int32_t *ofs32 = (u_int32_t *) offsets;
1401 loff_t *ofs64 = (loff_t *) offsets;
1403 @@ -130,8 +129,6 @@
1405 else { /* V2.0/V4.0 */
1406 loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
1407 - u_int32_t flags;
1408 - static char v4[11];
1409 unsigned long i = n;
1411 for (flags = 0; n-- ;) {
1412 @@ -149,12 +146,7 @@
1413 offsets[i] = offsets[offsets[i].offset];
1416 - strcpy(v4, (char *) "64BE v4.0a");
1417 - v4[10] = 'a' + ((flags-1) & 0xF); // compressors used
1418 - if (flags > 0x10) { // with links ?
1419 - v4[10] += 'A' - 'a';
1420 - }
1421 - return v4;
1422 + strcpy(v, (char *) "64BE v4.0a");
1425 else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
1426 @@ -170,7 +162,6 @@
1427 else { /* V3.0 or V0.68 */
1428 unsigned long i;
1429 loff_t j;
1430 - static char v3[11];
1432 for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
1433 if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
1434 @@ -185,28 +176,33 @@
1437 v3_64 = (ofs32[1] == 0);
1438 - for (i = n; i-- != 0; )
1439 + for (i = n; i-- != 0; ) {
1440 offsets[i].size = ntohl(ofs32[i << v3_64]);
1441 - for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
1442 - offsets[i].offset = j;
1443 - offsets[i].flags = global_flags;
1444 if (offsets[i].size == 0xFFFFFFFF) {
1445 - offsets[i].flags = CLOOP_COMPRESSOR_NONE;
1446 - offsets[i].size = block_size;
1447 + offsets[i].size = 0x10000000 | block_size;
1449 - if ((offsets[i].size & 0x80000000) == 0) {
1450 + offsets[i].flags = (offsets[i].size >> 28);
1451 + offsets[i].size &= 0x0FFFFFFF;
1452 + }
1453 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
1454 + offsets[i].offset = j;
1455 + if (offsets[i].flags < 8) {
1456 j += offsets[i].size;
1459 for (i = 0; i < n; i++) {
1460 - if (offsets[i].size & 0x80000000) {
1461 - offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
1462 + flags |= 1 << offsets[i].flags;
1463 + if (offsets[i].flags >= 8) {
1464 + offsets[i] = offsets[offsets[i].size];
1467 - strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
1468 - v3[10] += global_flags;
1469 - return v3;
1470 + strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
1471 + }
1472 + v[10] = 'a' + ((flags-1) & 0xF); // compressors used
1473 + if (flags > 0x10) { // with links ?
1474 + v[10] += 'A' - 'a';
1476 + return v;
1479 /* Cloop suspend IOCTL */
1480 --- cloop.c
1481 +++ cloop.c
1482 @@ -542,7 +542,7 @@
1483 const unsigned int header_size = sizeof(struct cloop_head);
1484 unsigned int i, total_offsets=0;
1485 loff_t fs_read_position = 0, header_pos[2];
1486 - int flags, isblkdev, bytes_read, error = 0;
1487 + int isblkdev, bytes_read, error = 0;
1488 if (clo->suspended) return error;
1489 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
1490 inode = file->f_dentry->d_inode;
1491 @@ -698,18 +698,12 @@
1492 error=-EBADF; goto error_release;
1494 len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
1495 - flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
1496 -// May 3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
1497 -printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name,
1498 - clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
1499 - ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
1500 -// May 3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at (null)
1501 -printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
1502 + ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
1503 cloop_free(zbuf, zlen);
1504 if (ret != 0)
1506 - printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
1507 - cloop_name, ret, flags);
1508 + printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
1509 + cloop_name, ret);
1510 error=-EBADF; goto error_release;
1513 @@ -722,7 +716,6 @@
1514 else
1516 unsigned int n, total_bytes;
1517 - flags = 0;
1518 clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
1519 if (!clo->block_ptrs)
1521 @@ -761,7 +754,7 @@
1524 int i;
1525 - char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
1526 + char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
1527 clo->largest_block = 0;
1528 for (i = 0; i < clo->head.num_blocks; i++)
1529 if (clo->block_ptrs[i].size > clo->largest_block)
1530 @@ -769,9 +762,6 @@
1531 printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
1532 cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
1533 clo->head.block_size, clo->largest_block);
1534 - }
1535 - {
1536 - int i;
1537 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
1538 (buffers / clo->head.block_size) : 1;
1539 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
1540 @@ -874,6 +864,10 @@
1541 cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
1542 clo->block_ptrs=NULL;
1543 error_release:
1544 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
1545 + zlib_inflateEnd(&clo->zstream);
1546 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
1547 +#endif
1548 if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
1549 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
1550 clo->backing_file=NULL;