wok-current view linux64-cloop/stuff/cloop.u @ rev 25278

updated reptyr (0.7.0 -> 0.9.0)
author Hans-G?nter Theisgen
date Mon Jul 18 14:55:21 2022 +0100 (2022-07-18)
parents cea6e929d21e
children c81179c4b106
line source
1 --- cloop.h
2 +++ cloop.h
3 @@ -1,3 +1,7 @@
4 +#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */
5 +#define CLOOP_SIGNATURE_SIZE 9
6 +#define CLOOP_SIGNATURE_OFFSET 0x0
7 +
8 #ifndef _COMPRESSED_LOOP_H
9 #define _COMPRESSED_LOOP_H
11 @@ -38,10 +42,6 @@
13 #include <linux/types.h> /* u_int32_t */
15 -#ifndef __KERNEL__
16 -#include <stdint.h> /* regular uint64_t */
17 -#endif
18 -
19 #define CLOOP_HEADROOM 128
21 /* Header of fixed length, can be located at beginning or end of file */
22 @@ -52,13 +52,6 @@
23 u_int32_t num_blocks;
24 };
26 -#define CLOOP2_SIGNATURE "V2.0" /* @ offset 0x0b */
27 -#define CLOOP2_SIGNATURE_SIZE 4
28 -#define CLOOP2_SIGNATURE_OFFSET 0x0b
29 -#define CLOOP4_SIGNATURE "V4.0" /* @ offset 0x0b */
30 -#define CLOOP4_SIGNATURE_SIZE 4
31 -#define CLOOP4_SIGNATURE_OFFSET 0x0b
32 -
33 /************************************************************************\
34 * CLOOP4 flags for each compressed block *
35 * Value Meaning *
36 @@ -84,6 +77,134 @@
38 #define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
40 +#define CLOOP_COMPRESSOR_LINK 0xF
41 +
42 +
43 +/* data_index (num_blocks 64bit pointers, network order)... */
44 +/* compressed data (gzip block compressed format)... */
45 +
46 +struct cloop_tail
47 +{
48 + u_int32_t table_size;
49 + u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
50 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
51 + u_int32_t num_blocks;
52 +};
53 +
54 +#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12)
55 +
56 +struct block_info
57 +{
58 + loff_t offset; /* 64-bit offsets of compressed block */
59 + u_int32_t size; /* 32-bit compressed block size */
60 + u_int32_t flags; /* 32-bit compression flags */
61 +};
62 +
63 +static inline char *build_index(struct block_info *offsets, unsigned long n,
64 + unsigned long block_size)
65 +{
66 + static char v[11];
67 + u_int32_t flags = 0;
68 + u_int32_t *ofs32 = (u_int32_t *) offsets;
69 + loff_t *ofs64 = (loff_t *) offsets;
70 +
71 + /* v3 64bits bug: v1 assumed */
72 + unsigned long v3_64 = (n+1)/2;
73 + loff_t prev;
74 +
75 + if (ofs32[0] != 0 && ofs32[1] == 0) {
76 + for (prev=__le64_to_cpu(ofs64[v3_64]);
77 + v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev;
78 + prev=__le64_to_cpu(ofs64[v3_64]));
79 + }
80 +
81 + if (ofs32[0] == 0) {
82 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
83 + while (n--) {
84 + offsets[n].offset = __be64_to_cpu(offsets[n].offset);
85 + offsets[n].size = ntohl(offsets[n].size);
86 + offsets[n].flags = 0;
87 + }
88 + return (char *) "128BE accelerated knoppix 1.0";
89 + }
90 + else { /* V2.0/V4.0 */
91 + loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
92 + unsigned long i = n;
93 +
94 + for (flags = 0; n-- ;) {
95 + loff_t data = __be64_to_cpu(ofs64[n]);
96 +
97 + offsets[n].size = last -
98 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
99 + last = offsets[n].offset;
100 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
101 + flags |= 1 << offsets[n].flags;
102 + }
103 + if (flags < 2) return (char *) "64BE v2.0";
104 + while (i--) {
105 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
106 + offsets[i] = offsets[offsets[i].offset];
107 + }
108 + }
109 + strcpy(v, (char *) "64BE v4.0a");
110 + }
111 + }
112 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
113 + loff_t last = __le64_to_cpu(ofs64[n]);
114 + while (n--) {
115 + offsets[n].size = last -
116 + (offsets[n].offset = __le64_to_cpu(ofs64[n]));
117 + last = offsets[n].offset;
118 + offsets[n].flags = 0;
119 + }
120 + return (char *) "64LE v1.0";
121 + }
122 + else { /* V3.0 or V0.68 */
123 + unsigned long i;
124 + loff_t j;
125 +
126 + for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
127 + if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
128 + loff_t last = ntohl(ofs32[n]);
129 + while (n--) {
130 + offsets[n].size = last -
131 + (offsets[n].offset = ntohl(ofs32[n]));
132 + last = offsets[n].offset;
133 + offsets[n].flags = 0;
134 + }
135 + return (char *) "32BE v0.68";
136 + }
137 +
138 + v3_64 = (ofs32[1] == 0);
139 + for (i = n; i-- != 0; ) {
140 + offsets[i].size = ntohl(ofs32[i << v3_64]);
141 + if (offsets[i].size == 0xFFFFFFFF) {
142 + offsets[i].size = 0x10000000 | block_size;
143 + }
144 + offsets[i].flags = (offsets[i].size >> 28);
145 + offsets[i].size &= 0x0FFFFFFF;
146 + }
147 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
148 + offsets[i].offset = j;
149 + if (offsets[i].flags < 8) {
150 + j += offsets[i].size;
151 + }
152 + }
153 + for (i = 0; i < n; i++) {
154 + flags |= 1 << offsets[i].flags;
155 + if (offsets[i].flags >= 8) {
156 + offsets[i] = offsets[offsets[i].size];
157 + }
158 + }
159 + strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
160 + }
161 + v[10] = 'a' + ((flags-1) & 0xF); // compressors used
162 + if (flags > 0x10) { // with links ?
163 + v[10] += 'A' - 'a';
164 + }
165 + return v;
166 +}
167 +
168 /* Cloop suspend IOCTL */
169 #define CLOOP_SUSPEND 0x4C07
171 --- cloop.c
172 +++ cloop.c
173 @@ -17,7 +17,7 @@
174 \************************************************************************/
176 #define CLOOP_NAME "cloop"
177 -#define CLOOP_VERSION "5.3"
178 +#define CLOOP_VERSION "4.12"
179 #define CLOOP_MAX 8
181 #ifndef KBUILD_MODNAME
182 @@ -68,7 +68,6 @@
183 #include <linux/loop.h>
184 #include <linux/kthread.h>
185 #include <linux/compat.h>
186 -#include <linux/blk-mq.h> /* new multiqueue infrastructure */
187 #include "cloop.h"
189 /* New License scheme */
190 @@ -93,10 +92,7 @@
191 /* Use experimental major for now */
192 #define MAJOR_NR 240
194 -#ifndef DEVICE_NAME
195 -#define DEVICE_NAME CLOOP_NAME
196 -#endif
197 -
198 +/* #define DEVICE_NAME CLOOP_NAME */
199 /* #define DEVICE_NR(device) (MINOR(device)) */
200 /* #define DEVICE_ON(device) */
201 /* #define DEVICE_OFF(device) */
202 @@ -143,7 +139,7 @@
203 u_int32_t allflags;
205 /* An array of cloop_ptr flags/offset for compressed blocks within the file */
206 - cloop_block_ptr *block_ptrs;
207 + struct block_info *block_ptrs;
209 /* We buffer some uncompressed blocks for performance */
210 size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */
211 @@ -178,14 +174,16 @@
212 spinlock_t queue_lock;
213 /* mutex for ioctl() */
214 struct mutex clo_ctl_mutex;
215 - /* mutex for request */
216 - struct mutex clo_rq_mutex;
217 + struct list_head clo_list;
218 + struct task_struct *clo_thread;
219 + wait_queue_head_t clo_event;
220 struct request_queue *clo_queue;
221 struct gendisk *clo_disk;
222 - struct blk_mq_tag_set tag_set;
223 int suspended;
224 };
226 +/* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
227 + so we can specify how many devices we need via parameters. */
228 static struct cloop_device **cloop_dev;
229 static const char *cloop_name=CLOOP_NAME;
230 static int cloop_count = 0;
231 @@ -214,24 +212,21 @@
232 vfree(mem);
233 }
235 -/* static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen) */
236 -static int uncompress(struct cloop_device *clo, u_int32_t block_num, u_int32_t compressed_length, unsigned long *uncompressed_length)
237 +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags)
238 {
239 int err = -1;
240 - int flags = CLOOP_BLOCK_FLAGS(clo->block_ptrs[block_num]);
241 switch(flags)
242 {
243 case CLOOP_COMPRESSOR_NONE:
244 - /* block is umcompressed, swap pointers only! */
245 - { char *tmp = clo->compressed_buffer; clo->compressed_buffer = clo->buffer[clo->current_bufnum]; clo->buffer[clo->current_bufnum] = tmp; }
246 - DEBUGP("cloop: block %d is uncompressed (flags=%d), just swapping %u bytes\n", block_num, flags, compressed_length);
247 + memcpy(dest, source, *destLen = sourceLen);
248 + err = Z_OK;
249 break;
250 #if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
251 case CLOOP_COMPRESSOR_ZLIB:
252 - clo->zstream.next_in = clo->compressed_buffer;
253 - clo->zstream.avail_in = compressed_length;
254 - clo->zstream.next_out = clo->buffer[clo->current_bufnum];
255 - clo->zstream.avail_out = clo->head.block_size;
256 + clo->zstream.next_in = source;
257 + clo->zstream.avail_in = sourceLen;
258 + clo->zstream.next_out = dest;
259 + clo->zstream.avail_out = *destLen;
260 err = zlib_inflateReset(&clo->zstream);
261 if (err != Z_OK)
262 {
263 @@ -239,50 +234,50 @@
264 zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
265 }
266 err = zlib_inflate(&clo->zstream, Z_FINISH);
267 - *uncompressed_length = clo->zstream.total_out;
268 + *destLen = clo->zstream.total_out;
269 if (err == Z_STREAM_END) err = 0;
270 - DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *uncompressed_length);
271 + DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
272 break;
273 #endif
274 #if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
275 case CLOOP_COMPRESSOR_LZO1X:
276 {
277 size_t tmp = (size_t) clo->head.block_size;
278 - err = lzo1x_decompress_safe(clo->compressed_buffer, compressed_length,
279 - clo->buffer[clo->current_bufnum], &tmp);
280 - if (err == LZO_E_OK) *uncompressed_length = (u_int32_t) tmp;
281 + err = lzo1x_decompress_safe(source, sourceLen,
282 + dest, &tmp);
283 + if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
284 }
285 break;
286 #endif
287 #if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
288 case CLOOP_COMPRESSOR_LZ4:
289 {
290 - size_t outputSize = clo->head.block_size;
291 + size_t outputSize = *destLen;
292 /* We should adjust outputSize here, in case the last block is smaller than block_size */
293 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
294 - err = lz4_decompress(clo->compressed_buffer, (size_t *) &compressed_length,
295 - clo->buffer[clo->current_bufnum], outputSize);
296 + err = lz4_decompress(source, (size_t *) &sourceLen,
297 + dest, outputSize);
298 #else
299 - err = LZ4_decompress_safe(clo->compressed_buffer,
300 - clo->buffer[clo->current_bufnum],
301 - compressed_length, outputSize);
302 + err = LZ4_decompress_safe(source,
303 + dest,
304 + sourceLen, outputSize);
305 #endif
306 if (err >= 0)
307 {
308 err = 0;
309 - *uncompressed_length = outputSize;
310 + *destLen = outputSize;
311 }
312 }
313 break;
314 #endif
315 #if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
316 case CLOOP_COMPRESSOR_XZ:
317 - clo->xz_buffer.in = clo->compressed_buffer;
318 + clo->xz_buffer.in = source;
319 clo->xz_buffer.in_pos = 0;
320 - clo->xz_buffer.in_size = compressed_length;
321 - clo->xz_buffer.out = clo->buffer[clo->current_bufnum];
322 + clo->xz_buffer.in_size = sourceLen;
323 + clo->xz_buffer.out = dest;
324 clo->xz_buffer.out_pos = 0;
325 - clo->xz_buffer.out_size = clo->head.block_size;
326 + clo->xz_buffer.out_size = *destLen;
327 xz_dec_reset(clo->xzdecoderstate);
328 err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
329 if (err == XZ_STREAM_END || err == XZ_OK)
330 @@ -309,16 +304,12 @@
331 while (buf_done < buf_len)
332 {
333 size_t size = buf_len - buf_done, size_read;
334 - mm_segment_t old_fs;
335 /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */
336 /* int size_read = kernel_read(f, pos, buf + buf_done, size); */
337 -
338 - // mutex_lock(&clo->clo_rq_mutex);
339 - old_fs = get_fs();
340 - set_fs(KERNEL_DS);
341 + mm_segment_t old_fs = get_fs();
342 + set_fs(get_ds());
343 size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos);
344 set_fs(old_fs);
345 - // mutex_unlock(&clo->clo_rq_mutex);
347 if(size_read <= 0)
348 {
349 @@ -358,8 +349,8 @@
350 return i;
351 }
353 - compressed_block_offset = CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]);
354 - compressed_block_len = (long) (CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum+1]) - compressed_block_offset) ;
355 + compressed_block_offset = clo->block_ptrs[blocknum].offset;
356 + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
358 /* Load one compressed block from the file. */
359 if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
360 @@ -369,12 +360,12 @@
361 if (n!= compressed_block_len)
362 {
363 printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
364 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
365 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
366 /* return -1; */
367 }
368 } else {
369 printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
370 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
371 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
372 return -1;
373 }
375 @@ -382,14 +373,16 @@
376 if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
378 /* Do the uncompression */
379 - ret = uncompress(clo, blocknum, compressed_block_len, &uncompressed_block_len);
380 + uncompressed_block_len = clo->head.block_size;
381 + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
382 + clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
383 /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
384 if (ret != 0)
385 {
386 printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
387 cloop_name, ret, blocknum,
388 - compressed_block_len, CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]),
389 - CLOOP_BLOCK_FLAGS(clo->block_ptrs[blocknum]));
390 + compressed_block_len, clo->block_ptrs[blocknum].offset,
391 + clo->block_ptrs[blocknum].flags);
392 clo->buffered_blocknum[clo->current_bufnum] = -1;
393 return -1;
394 }
395 @@ -397,107 +390,146 @@
396 return clo->current_bufnum;
397 }
399 -static blk_status_t cloop_handle_request(struct cloop_device *clo, struct request *req)
400 +/* This function does all the real work. */
401 +/* returns "uptodate" */
402 +static int cloop_handle_request(struct cloop_device *clo, struct request *req)
403 {
404 int buffered_blocknum = -1;
405 int preloaded = 0;
406 - loff_t offset = (loff_t) blk_rq_pos(req)<<9;
407 + loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
408 struct bio_vec bvec;
409 struct req_iterator iter;
410 - blk_status_t ret = BLK_STS_OK;
411 -
412 - if (unlikely(req_op(req) != REQ_OP_READ ))
413 - {
414 - blk_dump_rq_flags(req, DEVICE_NAME " bad request");
415 - return BLK_STS_IOERR;
416 - }
417 -
418 - if (unlikely(!clo->backing_file && !clo->suspended))
419 - {
420 - DEBUGP("cloop_handle_request: not connected to a file\n");
421 - return BLK_STS_IOERR;
422 - }
423 -
424 rq_for_each_segment(bvec, req, iter)
425 - {
426 - unsigned long len = bvec.bv_len;
427 - loff_t to_offset = bvec.bv_offset;
428 -
429 - while(len > 0)
430 {
431 - u_int32_t length_in_buffer;
432 - loff_t block_offset = offset;
433 - u_int32_t offset_in_buffer;
434 - char *from_ptr, *to_ptr;
435 - /* do_div (div64.h) returns the 64bit division remainder and */
436 - /* puts the result in the first argument, i.e. block_offset */
437 - /* becomes the blocknumber to load, and offset_in_buffer the */
438 - /* position in the buffer */
439 - offset_in_buffer = do_div(block_offset, clo->head.block_size);
440 - /* Lookup preload cache */
441 - if(block_offset < clo->preload_size && clo->preload_cache != NULL && clo->preload_cache[block_offset] != NULL)
442 - { /* Copy from cache */
443 - preloaded = 1;
444 - from_ptr = clo->preload_cache[block_offset];
445 - }
446 - else
447 - {
448 - preloaded = 0;
449 - buffered_blocknum = cloop_load_buffer(clo,block_offset);
450 - if(buffered_blocknum == -1)
451 + unsigned long len = bvec.bv_len;
452 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset;
453 + while(len > 0)
454 {
455 - ret = BLK_STS_IOERR;
456 - break; /* invalid data, leave inner loop */
457 + u_int32_t length_in_buffer;
458 + loff_t block_offset = offset;
459 + u_int32_t offset_in_buffer;
460 + char *from_ptr;
461 + /* do_div (div64.h) returns the 64bit division remainder and */
462 + /* puts the result in the first argument, i.e. block_offset */
463 + /* becomes the blocknumber to load, and offset_in_buffer the */
464 + /* position in the buffer */
465 + offset_in_buffer = do_div(block_offset, clo->head.block_size);
466 + /* Lookup preload cache */
467 + if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
468 + clo->preload_cache[block_offset] != NULL)
469 + { /* Copy from cache */
470 + preloaded = 1;
471 + from_ptr = clo->preload_cache[block_offset];
472 + }
473 + else
474 + {
475 + preloaded = 0;
476 + buffered_blocknum = cloop_load_buffer(clo,block_offset);
477 + if(buffered_blocknum == -1) break; /* invalid data, leave inner loop */
478 + /* Copy from buffer */
479 + from_ptr = clo->buffer[buffered_blocknum];
480 + }
481 + /* Now, at least part of what we want will be in the buffer. */
482 + length_in_buffer = clo->head.block_size - offset_in_buffer;
483 + if(length_in_buffer > len)
484 + {
485 +/* DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
486 + length_in_buffer,len); */
487 + length_in_buffer = len;
488 + }
489 + memcpy(to_ptr, from_ptr + offset_in_buffer, length_in_buffer);
490 + to_ptr += length_in_buffer;
491 + len -= length_in_buffer;
492 + offset += length_in_buffer;
493 + } /* while inner loop */
494 + kunmap(bvec.bv_page);
495 + cond_resched();
496 + } /* end rq_for_each_segment*/
497 + return ((buffered_blocknum!=-1) || preloaded);
498 +}
499 +
500 +/* Adopted from loop.c, a kernel thread to handle physical reads and
501 + decompression. */
502 +static int cloop_thread(void *data)
503 +{
504 + struct cloop_device *clo = data;
505 + current->flags |= PF_NOFREEZE;
506 + set_user_nice(current, 10);
507 + while (!kthread_should_stop()||!list_empty(&clo->clo_list))
508 + {
509 + int err;
510 + err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) ||
511 + kthread_should_stop());
512 + if(unlikely(err))
513 + {
514 + DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n");
515 + continue;
516 }
517 - /* Copy from buffer */
518 - from_ptr = clo->buffer[buffered_blocknum];
519 - }
520 - /* Now, at least part of what we want will be in the buffer. */
521 - length_in_buffer = clo->head.block_size - offset_in_buffer;
522 - if(length_in_buffer > len)
523 - {
524 - /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", length_in_buffer,len); */
525 - length_in_buffer = len;
526 - }
527 - to_ptr = kmap_atomic(bvec.bv_page);
528 - memcpy(to_ptr + to_offset, from_ptr + offset_in_buffer, length_in_buffer);
529 - kunmap_atomic(to_ptr);
530 - to_offset += length_in_buffer;
531 - len -= length_in_buffer;
532 - offset += length_in_buffer;
533 - } /* while inner loop */
534 - } /* rq_for_each_segment */
535 - return ret;
536 -}
537 -
538 -static blk_status_t cloop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
539 -{
540 -// struct request_queue *q = hctx->queue;
541 -// struct cloop_device *clo = q->queuedata;
542 - struct request *req = bd->rq;
543 - struct cloop_device *clo = req->rq_disk->private_data;
544 - blk_status_t ret = BLK_STS_OK;
545 -
546 -#if 1 /* Does it work when loading libraries? */
547 - /* Since we have a buffered block list as well as data to read */
548 - /* from disk (slow), and are (probably) never called from an */
549 - /* interrupt, we use a simple mutex lock right here to ensure */
550 - /* consistency. */
551 - mutex_lock(&clo->clo_rq_mutex);
552 - #else
553 - spin_lock_irq(&clo->queue_lock);
554 - #endif
555 - blk_mq_start_request(req);
556 - do {
557 - ret = cloop_handle_request(clo, req);
558 - } while(blk_update_request(req, ret, blk_rq_cur_bytes(req)));
559 - blk_mq_end_request(req, ret);
560 - #if 1 /* See above */
561 - mutex_unlock(&clo->clo_rq_mutex);
562 - #else
563 - spin_unlock_irq(&clo->queue_lock);
564 - #endif
565 - return ret;
566 + if(!list_empty(&clo->clo_list))
567 + {
568 + struct request *req;
569 + unsigned long flags;
570 + int uptodate;
571 + spin_lock_irq(&clo->queue_lock);
572 + req = list_entry(clo->clo_list.next, struct request, queuelist);
573 + list_del_init(&req->queuelist);
574 + spin_unlock_irq(&clo->queue_lock);
575 + uptodate = cloop_handle_request(clo, req);
576 + spin_lock_irqsave(&clo->queue_lock, flags);
577 + __blk_end_request_all(req, uptodate ? 0 : -EIO);
578 + spin_unlock_irqrestore(&clo->queue_lock, flags);
579 + }
580 + }
581 + DEBUGP(KERN_ERR "cloop_thread exited.\n");
582 + return 0;
583 +}
584 +
585 +/* This is called by the kernel block queue management every now and then,
586 + * with successive read requests qeued and sorted in a (hopefully)
587 + * "most efficient way". spin_lock_irq() is being held by the kernel. */
588 +static void cloop_do_request(struct request_queue *q)
589 +{
590 + struct request *req;
591 + while((req = blk_fetch_request(q)) != NULL)
592 + {
593 + struct cloop_device *clo;
594 + int rw;
595 + /* quick sanity checks */
596 + /* blk_fs_request() was removed in 2.6.36 */
597 + if (unlikely(req == NULL
598 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
599 + || (req->cmd_type != REQ_TYPE_FS)
600 +#endif
601 + ))
602 + goto error_continue;
603 + rw = rq_data_dir(req);
604 + if (unlikely(rw != READ
605 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
606 + && rw != READA
607 +#endif
608 + ))
609 + {
610 + DEBUGP("cloop_do_request: bad command\n");
611 + goto error_continue;
612 + }
613 + clo = req->rq_disk->private_data;
614 + if (unlikely(!clo->backing_file && !clo->suspended))
615 + {
616 + DEBUGP("cloop_do_request: not connected to a file\n");
617 + goto error_continue;
618 + }
619 + list_add_tail(&req->queuelist, &clo->clo_list); /* Add to working list for thread */
620 + wake_up(&clo->clo_event); /* Wake up cloop_thread */
621 + continue; /* next request */
622 + error_continue:
623 + DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
624 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
625 + req->errors++;
626 +#else
627 + req->error_count++;
628 +#endif
629 + __blk_end_request_all(req, -EIO);
630 + }
631 }
633 /* Read header, flags and offsets from already opened file */
634 @@ -508,7 +540,7 @@
635 char *bbuf=NULL;
636 unsigned int bbuf_size = 0;
637 const unsigned int header_size = sizeof(struct cloop_head);
638 - unsigned int i, offsets_read=0, total_offsets=0;
639 + unsigned int i, total_offsets=0;
640 loff_t fs_read_position = 0, header_pos[2];
641 int isblkdev, bytes_read, error = 0;
642 if (clo->suspended) return error;
643 @@ -581,29 +613,19 @@
644 goto error_release;
645 }
646 memcpy(&clo->head, bbuf, header_size);
647 - if (strncmp(bbuf+CLOOP4_SIGNATURE_OFFSET, CLOOP4_SIGNATURE, CLOOP4_SIGNATURE_SIZE)==0)
648 + if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
649 {
650 - clo->file_format=4;
651 + clo->file_format++;
652 clo->head.block_size=ntohl(clo->head.block_size);
653 clo->head.num_blocks=ntohl(clo->head.num_blocks);
654 clo->header_first = (i==0) ? 1 : 0;
655 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
656 - break;
657 - }
658 - else if (strncmp(bbuf+CLOOP2_SIGNATURE_OFFSET, CLOOP2_SIGNATURE, CLOOP2_SIGNATURE_SIZE)==0)
659 - {
660 - clo->file_format=2;
661 - clo->head.block_size=ntohl(clo->head.block_size);
662 - clo->head.num_blocks=ntohl(clo->head.num_blocks);
663 - clo->header_first = (i==0) ? 1 : 0;
664 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
665 + printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
666 break;
667 }
668 }
669 if (clo->file_format == 0)
670 {
671 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
672 - "please use an older version of %s for this file.\n",
673 + printk(KERN_ERR "%s: Cannot detect %s format.\n",
674 cloop_name, cloop_name);
675 error=-EBADF; goto error_release;
676 }
677 @@ -613,67 +635,133 @@
678 cloop_name, clo->head.block_size);
679 error=-EBADF; goto error_release;
680 }
681 - total_offsets=clo->head.num_blocks+1;
682 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
683 + total_offsets=clo->head.num_blocks;
684 + if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
685 total_offsets > inode->i_size))
686 {
687 printk(KERN_ERR "%s: file %s too small for %u blocks\n",
688 cloop_name, clo->underlying_filename, clo->head.num_blocks);
689 error=-EBADF; goto error_release;
690 }
691 - clo->block_ptrs = cloop_malloc(sizeof(cloop_block_ptr) * total_offsets);
692 - if (!clo->block_ptrs)
693 + /* Allocate Memory for decompressors */
694 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
695 + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
696 + if(!clo->zstream.workspace)
697 {
698 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
699 + printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
700 + cloop_name, zlib_inflate_workspacesize());
701 error=-ENOMEM; goto error_release;
702 }
703 - /* Read them offsets! */
704 - if(clo->header_first)
705 + zlib_inflateInit(&clo->zstream);
706 +#endif
707 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
708 +#if XZ_INTERNAL_CRC32
709 + /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
710 + xz_crc32_init(void);
711 +#endif
712 + clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
713 +#endif
714 + if (total_offsets + 1 == 0) /* Version 3 */
715 {
716 - fs_read_position = sizeof(struct cloop_head);
717 + struct cloop_tail tail;
718 + if (isblkdev)
719 + {
720 + /* No end of file: can't find index */
721 + printk(KERN_ERR "%s: no V3 support for block device\n",
722 + cloop_name);
723 + error=-EBADF; goto error_release;
724 + }
725 + bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
726 + inode->i_size - sizeof(struct cloop_tail),
727 + sizeof(struct cloop_tail));
728 + if (bytes_read == sizeof(struct cloop_tail))
729 + {
730 + unsigned long len, zlen;
731 + int ret;
732 + void *zbuf;
733 + clo->head.num_blocks = ntohl(tail.num_blocks);
734 + total_offsets = clo->head.num_blocks;
735 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
736 + zlen = ntohl(tail.table_size);
737 + zbuf = cloop_malloc(zlen);
738 + if (!clo->block_ptrs || !zbuf)
739 + {
740 + printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
741 + error=-ENOMEM; goto error_release;
742 + }
743 + bytes_read = cloop_read_from_file(clo, file, zbuf,
744 + inode->i_size - zlen - sizeof(struct cloop_tail),
745 + zlen);
746 + if (bytes_read != zlen)
747 + {
748 + printk(KERN_ERR "%s: can't read index\n", cloop_name);
749 + error=-EBADF; goto error_release;
750 + }
751 + len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
752 + ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
753 + cloop_free(zbuf, zlen);
754 + if (ret != 0)
755 + {
756 + printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
757 + cloop_name, ret);
758 + error=-EBADF; goto error_release;
759 + }
760 + }
761 + else
762 + {
763 + printk(KERN_ERR "%s: can't find index\n", cloop_name);
764 + error=-ENOMEM; goto error_release;
765 + }
766 }
767 else
768 {
769 - fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_offsets * sizeof(loff_t);
770 - }
771 - for(offsets_read=0;offsets_read<total_offsets;)
772 - {
773 - size_t bytes_readable;
774 - unsigned int num_readable, offset = 0;
775 - bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
776 - if(bytes_readable <= 0) break; /* Done */
777 - bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
778 - if(bytes_read != bytes_readable)
779 + unsigned int n, total_bytes;
780 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
781 + if (!clo->block_ptrs)
782 + {
783 + printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
784 + error=-ENOMEM; goto error_release;
785 + }
786 + /* Read them offsets! */
787 + if(clo->header_first)
788 {
789 - printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
790 - cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
791 - error=-EBADF;
792 - goto error_release;
793 + total_bytes = total_offsets * sizeof(struct block_info);
794 + fs_read_position = sizeof(struct cloop_head);
795 }
796 - /* remember where to read the next blk from file */
797 - fs_read_position += bytes_read;
798 - /* calculate how many offsets can be taken from current bbuf */
799 - num_readable = MIN(total_offsets - offsets_read,
800 - bytes_read / sizeof(loff_t));
801 - DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1);
802 - for (i=0,offset=0; i<num_readable; i++)
803 + else
804 {
805 - loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) );
806 - if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp);
807 - if(offsets_read > 0)
808 + total_bytes = total_offsets * sizeof(loff_t);
809 + fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
810 + }
811 + for(n=0;n<total_bytes;)
812 + {
813 + size_t bytes_readable;
814 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
815 + if(bytes_readable <= 0) break; /* Done */
816 + bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
817 + if(bytes_read != bytes_readable)
818 {
819 - loff_t d = CLOOP_BLOCK_OFFSET(tmp) - CLOOP_BLOCK_OFFSET(clo->block_ptrs[offsets_read-1]);
820 - if(d > clo->largest_block) clo->largest_block = d;
821 + printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
822 + cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
823 + error=-EBADF;
824 + goto error_release;
825 }
826 - clo->block_ptrs[offsets_read++] = tmp;
827 - offset += sizeof(loff_t);
828 + memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
829 + /* remember where to read the next blk from file */
830 + fs_read_position += bytes_read;
831 + n += bytes_read;
832 }
833 }
834 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
835 - cloop_name, clo->underlying_filename, clo->head.num_blocks,
836 - clo->head.block_size, clo->largest_block);
837 {
838 int i;
839 + char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
840 + clo->largest_block = 0;
841 + for (i = 0; i < clo->head.num_blocks; i++)
842 + if (clo->block_ptrs[i].size > clo->largest_block)
843 + clo->largest_block = clo->block_ptrs[i].size;
844 + printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
845 + cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
846 + clo->head.block_size, clo->largest_block);
847 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
848 (buffers / clo->head.block_size) : 1;
849 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
850 @@ -705,36 +793,14 @@
851 cloop_name, clo->largest_block);
852 error=-ENOMEM; goto error_release_free_buffer;
853 }
854 - /* Allocate Memory for decompressors */
855 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
856 - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
857 - if(!clo->zstream.workspace)
858 - {
859 - printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
860 - cloop_name, zlib_inflate_workspacesize());
861 - error=-ENOMEM; goto error_release_free_all;
862 - }
863 - zlib_inflateInit(&clo->zstream);
864 -#endif
865 -#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
866 -#if XZ_INTERNAL_CRC32
867 - /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
868 - xz_crc32_init(void);
869 -#endif
870 - clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
871 -#endif
872 - if(CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]) > clo->underlying_total_size)
873 + set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
874 + clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
875 + if(IS_ERR(clo->clo_thread))
876 {
877 - printk(KERN_ERR "%s: final offset wrong (%llu > %llu)\n",
878 - cloop_name,
879 - CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]),
880 - clo->underlying_total_size);
881 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
882 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
883 -#endif
884 + error = PTR_ERR(clo->clo_thread);
885 + clo->clo_thread=NULL;
886 goto error_release_free_all;
887 }
888 - set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
889 if(preload > 0)
890 {
891 clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
892 @@ -780,6 +846,7 @@
893 clo->preload_array_size = clo->preload_size = 0;
894 }
895 }
896 + wake_up_process(clo->clo_thread);
897 /* Uncheck */
898 return error;
899 error_release_free_all:
900 @@ -794,9 +861,13 @@
901 }
902 if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
903 error_release_free:
904 - cloop_free(clo->block_ptrs, sizeof(cloop_block_ptr) * total_offsets);
905 + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
906 clo->block_ptrs=NULL;
907 error_release:
908 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
909 + zlib_inflateEnd(&clo->zstream);
910 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
911 +#endif
912 if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
913 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
914 clo->backing_file=NULL;
915 @@ -829,6 +900,7 @@
916 if(clo->refcnt > 1) /* we needed one fd for the ioctl */
917 return -EBUSY;
918 if(filp==NULL) return -EINVAL;
919 + if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
920 if(filp!=initial_file)
921 fput(filp);
922 else
923 @@ -839,7 +911,7 @@
924 clo->backing_file = NULL;
925 clo->backing_inode = NULL;
926 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
927 - if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks+1); clo->block_ptrs = NULL; }
928 + if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
929 if(clo->preload_cache)
930 {
931 int i;
932 @@ -1054,15 +1126,15 @@
933 case LOOP_CLR_FD: /* Change arg */
934 case LOOP_GET_STATUS64: /* Change arg */
935 case LOOP_SET_STATUS64: /* Change arg */
936 - return cloop_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
937 + arg = (unsigned long) compat_ptr(arg);
938 case LOOP_SET_STATUS: /* unchanged */
939 case LOOP_GET_STATUS: /* unchanged */
940 case LOOP_SET_FD: /* unchanged */
941 case LOOP_CHANGE_FD: /* unchanged */
942 - return cloop_ioctl(bdev, mode, cmd, arg);
943 - default:
944 - return -ENOIOCTLCMD;
945 + return cloop_ioctl(bdev, mode, cmd, arg);
946 + break;
947 }
948 + return -ENOIOCTLCMD;
949 }
950 #endif
952 @@ -1093,7 +1165,7 @@
953 cloop_dev[cloop_num]->refcnt-=1;
954 }
956 -static const struct block_device_operations clo_fops =
957 +static struct block_device_operations clo_fops =
958 {
959 owner: THIS_MODULE,
960 open: cloop_open,
961 @@ -1105,12 +1177,6 @@
962 /* locked_ioctl ceased to exist in 2.6.36 */
963 };
965 -static const struct blk_mq_ops cloop_mq_ops = {
966 - .queue_rq = cloop_queue_rq,
967 -/* .init_request = cloop_init_request, */
968 -/* .complete = cloop_complete_rq, */
969 -};
970 -
971 static int cloop_register_blkdev(int major_nr)
972 {
973 return register_blkdev(major_nr, cloop_name);
974 @@ -1124,37 +1190,33 @@
976 static int cloop_alloc(int cloop_num)
977 {
978 - struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));
979 + struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));;
980 if(clo == NULL) goto error_out;
981 cloop_dev[cloop_num] = clo;
982 memset(clo, 0, sizeof(struct cloop_device));
983 clo->clo_number = cloop_num;
984 - clo->tag_set.ops = &cloop_mq_ops;
985 - clo->tag_set.nr_hw_queues = 1;
986 - clo->tag_set.queue_depth = 128;
987 - clo->tag_set.numa_node = NUMA_NO_NODE;
988 - clo->tag_set.cmd_size = 0; /* No extra data needed */
989 - /* BLK_MQ_F_BLOCKING is extremely important if we want to call blocking functions like vfs_read */
990 - clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
991 - clo->tag_set.driver_data = clo;
992 - if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo;
993 - clo->clo_queue = blk_mq_init_queue(&clo->tag_set);
994 - if(IS_ERR(clo->clo_queue))
995 + clo->clo_thread = NULL;
996 + init_waitqueue_head(&clo->clo_event);
997 + spin_lock_init(&clo->queue_lock);
998 + mutex_init(&clo->clo_ctl_mutex);
999 + INIT_LIST_HEAD(&clo->clo_list);
1000 + clo->clo_queue = blk_init_queue(cloop_do_request, &clo->queue_lock);
1001 + if(!clo->clo_queue)
1003 printk(KERN_ERR "%s: Unable to alloc queue[%d]\n", cloop_name, cloop_num);
1004 - goto error_out_free_tags;
1005 + goto error_out;
1007 clo->clo_queue->queuedata = clo;
1008 - blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS);
1009 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
1010 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
1011 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
1012 +#endif
1013 clo->clo_disk = alloc_disk(1);
1014 if(!clo->clo_disk)
1016 printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num);
1017 - goto error_out_free_queue;
1018 + goto error_disk;
1020 - spin_lock_init(&clo->queue_lock);
1021 - mutex_init(&clo->clo_ctl_mutex);
1022 - mutex_init(&clo->clo_rq_mutex);
1023 clo->clo_disk->major = cloop_major;
1024 clo->clo_disk->first_minor = cloop_num;
1025 clo->clo_disk->fops = &clo_fops;
1026 @@ -1163,12 +1225,8 @@
1027 sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num);
1028 add_disk(clo->clo_disk);
1029 return 0;
1030 -error_out_free_queue:
1031 +error_disk:
1032 blk_cleanup_queue(clo->clo_queue);
1033 -error_out_free_tags:
1034 - blk_mq_free_tag_set(&clo->tag_set);
1035 -error_out_free_clo:
1036 - cloop_free(clo, sizeof(struct cloop_device));
1037 error_out:
1038 return -ENOMEM;
1040 @@ -1179,7 +1237,6 @@
1041 if(clo == NULL) return;
1042 del_gendisk(clo->clo_disk);
1043 blk_cleanup_queue(clo->clo_queue);
1044 - blk_mq_free_tag_set(&clo->tag_set);
1045 put_disk(clo->clo_disk);
1046 cloop_free(clo, sizeof(struct cloop_device));
1047 cloop_dev[cloop_num] = NULL;
1048 --- cloop_suspend.c
1049 +++ cloop_suspend.c
1050 @@ -14,6 +14,7 @@
1051 #include <fcntl.h>
1052 #include <unistd.h>
1053 #include <stdio.h>
1054 +#include <stdint.h>
1056 /* We don't use the structure, so that define does not hurt */
1057 #define dev_t int