wok view linux64-cloop/stuff/cloop.u @ rev 25543

linux-cloop: update cloop.u
author Pascal Bellard <pascal.bellard@slitaz.org>
date Sat Mar 11 12:33:53 2023 +0000 (15 months ago)
parents 5a92a26adcc1
children
line source
1 --- cloop.h
2 +++ cloop.h
3 @@ -1,3 +1,7 @@
4 +#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */
5 +#define CLOOP_SIGNATURE_SIZE 9
6 +#define CLOOP_SIGNATURE_OFFSET 0x0
7 +
8 #ifndef _COMPRESSED_LOOP_H
9 #define _COMPRESSED_LOOP_H
11 @@ -38,10 +42,6 @@
13 #include <linux/types.h> /* u_int32_t */
15 -#ifndef __KERNEL__
16 -#include <stdint.h> /* regular uint64_t */
17 -#endif
18 -
19 #define CLOOP_HEADROOM 128
21 /* Header of fixed length, can be located at beginning or end of file */
22 @@ -52,13 +52,6 @@
23 u_int32_t num_blocks;
24 };
26 -#define CLOOP2_SIGNATURE "V2.0" /* @ offset 0x0b */
27 -#define CLOOP2_SIGNATURE_SIZE 4
28 -#define CLOOP2_SIGNATURE_OFFSET 0x0b
29 -#define CLOOP4_SIGNATURE "V4.0" /* @ offset 0x0b */
30 -#define CLOOP4_SIGNATURE_SIZE 4
31 -#define CLOOP4_SIGNATURE_OFFSET 0x0b
32 -
33 /************************************************************************\
34 * CLOOP4 flags for each compressed block *
35 * Value Meaning *
36 @@ -84,6 +77,134 @@
38 #define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
40 +#define CLOOP_COMPRESSOR_LINK 0xF
41 +
42 +
43 +/* data_index (num_blocks 64bit pointers, network order)... */
44 +/* compressed data (gzip block compressed format)... */
45 +
46 +struct cloop_tail
47 +{
48 + u_int32_t table_size;
49 + u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
50 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
51 + u_int32_t num_blocks;
52 +};
53 +
54 +#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12)
55 +
56 +struct block_info
57 +{
58 + loff_t offset; /* 64-bit offsets of compressed block */
59 + u_int32_t size; /* 32-bit compressed block size */
60 + u_int32_t flags; /* 32-bit compression flags */
61 +};
62 +
63 +static inline char *build_index(struct block_info *offsets, unsigned long n,
64 + unsigned long block_size)
65 +{
66 + static char v[11];
67 + u_int32_t flags = 0;
68 + u_int32_t *ofs32 = (u_int32_t *) offsets;
69 + loff_t *ofs64 = (loff_t *) offsets;
70 +
71 + /* v3 64bits bug: v1 assumed */
72 + unsigned long v3_64 = (n+1)/2;
73 + loff_t prev;
74 +
75 + if (ofs32[0] != 0 && ofs32[1] == 0) {
76 + for (prev=__le64_to_cpu(ofs64[v3_64]);
77 + v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev;
78 + prev=__le64_to_cpu(ofs64[v3_64]));
79 + }
80 +
81 + if (ofs32[0] == 0) {
82 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
83 + while (n--) {
84 + offsets[n].offset = __be64_to_cpu(offsets[n].offset);
85 + offsets[n].size = ntohl(offsets[n].size);
86 + offsets[n].flags = 0;
87 + }
88 + return (char *) "128BE accelerated knoppix 1.0";
89 + }
90 + else { /* V2.0/V4.0 */
91 + loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
92 + unsigned long i = n;
93 +
94 + for (flags = 0; n-- ;) {
95 + loff_t data = __be64_to_cpu(ofs64[n]);
96 +
97 + offsets[n].size = last -
98 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
99 + last = offsets[n].offset;
100 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
101 + flags |= 1 << offsets[n].flags;
102 + }
103 + if (flags < 2) return (char *) "64BE v2.0";
104 + while (i--) {
105 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
106 + offsets[i] = offsets[offsets[i].offset];
107 + }
108 + }
109 + strcpy(v, (char *) "64BE v4.0a");
110 + }
111 + }
112 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
113 + loff_t last = __le64_to_cpu(ofs64[n]);
114 + while (n--) {
115 + offsets[n].size = last -
116 + (offsets[n].offset = __le64_to_cpu(ofs64[n]));
117 + last = offsets[n].offset;
118 + offsets[n].flags = 0;
119 + }
120 + return (char *) "64LE v1.0";
121 + }
122 + else { /* V3.0 or V0.68 */
123 + unsigned long i;
124 + loff_t j;
125 +
126 + for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
127 + if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
128 + loff_t last = ntohl(ofs32[n]);
129 + while (n--) {
130 + offsets[n].size = last -
131 + (offsets[n].offset = ntohl(ofs32[n]));
132 + last = offsets[n].offset;
133 + offsets[n].flags = 0;
134 + }
135 + return (char *) "32BE v0.68";
136 + }
137 +
138 + v3_64 = (ofs32[1] == 0);
139 + for (i = n; i-- != 0; ) {
140 + offsets[i].size = ntohl(ofs32[i << v3_64]);
141 + if (offsets[i].size == 0xFFFFFFFF) {
142 + offsets[i].size = 0x10000000 | block_size;
143 + }
144 + offsets[i].flags = (offsets[i].size >> 28);
145 + offsets[i].size &= 0x0FFFFFFF;
146 + }
147 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
148 + offsets[i].offset = j;
149 + if (offsets[i].flags < 8) {
150 + j += offsets[i].size;
151 + }
152 + }
153 + for (i = 0; i < n; i++) {
154 + flags |= 1 << offsets[i].flags;
155 + if (offsets[i].flags >= 8) {
156 + offsets[i] = offsets[offsets[i].size];
157 + }
158 + }
159 + strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
160 + }
161 + v[10] = 'a' + ((flags-1) & 0xF); // compressors used
162 + if (flags > 0x10) { // with links ?
163 + v[10] += 'A' - 'a';
164 + }
165 + return v;
166 +}
167 +
168 /* Cloop suspend IOCTL */
169 #define CLOOP_SUSPEND 0x4C07
171 --- cloop.c
172 +++ cloop.c
173 @@ -17,7 +17,7 @@
174 \************************************************************************/
176 #define CLOOP_NAME "cloop"
177 -#define CLOOP_VERSION "5.3"
178 +#define CLOOP_VERSION "4.12"
179 #define CLOOP_MAX 8
181 #ifndef KBUILD_MODNAME
182 @@ -68,7 +68,6 @@
183 #include <linux/loop.h>
184 #include <linux/kthread.h>
185 #include <linux/compat.h>
186 -#include <linux/blk-mq.h> /* new multiqueue infrastructure */
187 #include "cloop.h"
189 /* New License scheme */
190 @@ -93,10 +92,7 @@
191 /* Use experimental major for now */
192 #define MAJOR_NR 240
194 -#ifndef DEVICE_NAME
195 -#define DEVICE_NAME CLOOP_NAME
196 -#endif
197 -
198 +/* #define DEVICE_NAME CLOOP_NAME */
199 /* #define DEVICE_NR(device) (MINOR(device)) */
200 /* #define DEVICE_ON(device) */
201 /* #define DEVICE_OFF(device) */
202 @@ -143,7 +139,7 @@
203 u_int32_t allflags;
205 /* An array of cloop_ptr flags/offset for compressed blocks within the file */
206 - cloop_block_ptr *block_ptrs;
207 + struct block_info *block_ptrs;
209 /* We buffer some uncompressed blocks for performance */
210 size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */
211 @@ -178,14 +174,16 @@
212 spinlock_t queue_lock;
213 /* mutex for ioctl() */
214 struct mutex clo_ctl_mutex;
215 - /* mutex for request */
216 - struct mutex clo_rq_mutex;
217 + struct list_head clo_list;
218 + struct task_struct *clo_thread;
219 + wait_queue_head_t clo_event;
220 struct request_queue *clo_queue;
221 struct gendisk *clo_disk;
222 - struct blk_mq_tag_set tag_set;
223 int suspended;
224 };
226 +/* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
227 + so we can specify how many devices we need via parameters. */
228 static struct cloop_device **cloop_dev;
229 static const char *cloop_name=CLOOP_NAME;
230 static int cloop_count = 0;
231 @@ -214,24 +212,21 @@
232 vfree(mem);
233 }
235 -/* static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen) */
236 -static int uncompress(struct cloop_device *clo, u_int32_t block_num, u_int32_t compressed_length, unsigned long *uncompressed_length)
237 +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags)
238 {
239 int err = -1;
240 - int flags = CLOOP_BLOCK_FLAGS(clo->block_ptrs[block_num]);
241 switch(flags)
242 {
243 case CLOOP_COMPRESSOR_NONE:
244 - /* block is umcompressed, swap pointers only! */
245 - { char *tmp = clo->compressed_buffer; clo->compressed_buffer = clo->buffer[clo->current_bufnum]; clo->buffer[clo->current_bufnum] = tmp; }
246 - DEBUGP("cloop: block %d is uncompressed (flags=%d), just swapping %u bytes\n", block_num, flags, compressed_length);
247 + memcpy(dest, source, *destLen = sourceLen);
248 + err = Z_OK;
249 break;
250 #if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
251 case CLOOP_COMPRESSOR_ZLIB:
252 - clo->zstream.next_in = clo->compressed_buffer;
253 - clo->zstream.avail_in = compressed_length;
254 - clo->zstream.next_out = clo->buffer[clo->current_bufnum];
255 - clo->zstream.avail_out = clo->head.block_size;
256 + clo->zstream.next_in = source;
257 + clo->zstream.avail_in = sourceLen;
258 + clo->zstream.next_out = dest;
259 + clo->zstream.avail_out = *destLen;
260 err = zlib_inflateReset(&clo->zstream);
261 if (err != Z_OK)
262 {
263 @@ -239,50 +234,50 @@
264 zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
265 }
266 err = zlib_inflate(&clo->zstream, Z_FINISH);
267 - *uncompressed_length = clo->zstream.total_out;
268 + *destLen = clo->zstream.total_out;
269 if (err == Z_STREAM_END) err = 0;
270 - DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *uncompressed_length);
271 + DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
272 break;
273 #endif
274 #if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
275 case CLOOP_COMPRESSOR_LZO1X:
276 {
277 size_t tmp = (size_t) clo->head.block_size;
278 - err = lzo1x_decompress_safe(clo->compressed_buffer, compressed_length,
279 - clo->buffer[clo->current_bufnum], &tmp);
280 - if (err == LZO_E_OK) *uncompressed_length = (u_int32_t) tmp;
281 + err = lzo1x_decompress_safe(source, sourceLen,
282 + dest, &tmp);
283 + if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
284 }
285 break;
286 #endif
287 #if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
288 case CLOOP_COMPRESSOR_LZ4:
289 {
290 - size_t outputSize = clo->head.block_size;
291 + size_t outputSize = *destLen;
292 /* We should adjust outputSize here, in case the last block is smaller than block_size */
293 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
294 - err = lz4_decompress(clo->compressed_buffer, (size_t *) &compressed_length,
295 - clo->buffer[clo->current_bufnum], outputSize);
296 + err = lz4_decompress(source, (size_t *) &sourceLen,
297 + dest, outputSize);
298 #else
299 - err = LZ4_decompress_safe(clo->compressed_buffer,
300 - clo->buffer[clo->current_bufnum],
301 - compressed_length, outputSize);
302 + err = LZ4_decompress_safe(source,
303 + dest,
304 + sourceLen, outputSize);
305 #endif
306 if (err >= 0)
307 {
308 err = 0;
309 - *uncompressed_length = outputSize;
310 + *destLen = outputSize;
311 }
312 }
313 break;
314 #endif
315 #if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
316 case CLOOP_COMPRESSOR_XZ:
317 - clo->xz_buffer.in = clo->compressed_buffer;
318 + clo->xz_buffer.in = source;
319 clo->xz_buffer.in_pos = 0;
320 - clo->xz_buffer.in_size = compressed_length;
321 - clo->xz_buffer.out = clo->buffer[clo->current_bufnum];
322 + clo->xz_buffer.in_size = sourceLen;
323 + clo->xz_buffer.out = dest;
324 clo->xz_buffer.out_pos = 0;
325 - clo->xz_buffer.out_size = clo->head.block_size;
326 + clo->xz_buffer.out_size = *destLen;
327 xz_dec_reset(clo->xzdecoderstate);
328 err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
329 if (err == XZ_STREAM_END || err == XZ_OK)
330 @@ -309,9 +304,12 @@
331 while (buf_done < buf_len)
332 {
333 size_t size = buf_len - buf_done, size_read;
334 - // mutex_lock(&clo->clo_rq_mutex);
335 - size_read = kernel_read(f, buf + buf_done, size, &pos);
336 - // mutex_unlock(&clo->clo_rq_mutex);
337 + /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */
338 + /* int size_read = kernel_read(f, pos, buf + buf_done, size); */
339 + mm_segment_t old_fs = get_fs();
340 + set_fs(get_ds());
341 + size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos);
342 + set_fs(old_fs);
344 if(size_read <= 0)
345 {
346 @@ -351,8 +349,8 @@
347 return i;
348 }
350 - compressed_block_offset = CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]);
351 - compressed_block_len = (long) (CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum+1]) - compressed_block_offset) ;
352 + compressed_block_offset = clo->block_ptrs[blocknum].offset;
353 + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
355 /* Load one compressed block from the file. */
356 if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
357 @@ -362,12 +360,12 @@
358 if (n!= compressed_block_len)
359 {
360 printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
361 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
362 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
363 /* return -1; */
364 }
365 } else {
366 printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
367 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
368 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
369 return -1;
370 }
372 @@ -375,14 +373,16 @@
373 if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
375 /* Do the uncompression */
376 - ret = uncompress(clo, blocknum, compressed_block_len, &uncompressed_block_len);
377 + uncompressed_block_len = clo->head.block_size;
378 + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
379 + clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
380 /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
381 if (ret != 0)
382 {
383 printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
384 cloop_name, ret, blocknum,
385 - compressed_block_len, CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]),
386 - CLOOP_BLOCK_FLAGS(clo->block_ptrs[blocknum]));
387 + compressed_block_len, clo->block_ptrs[blocknum].offset,
388 + clo->block_ptrs[blocknum].flags);
389 clo->buffered_blocknum[clo->current_bufnum] = -1;
390 return -1;
391 }
392 @@ -390,107 +390,146 @@
393 return clo->current_bufnum;
394 }
396 -static blk_status_t cloop_handle_request(struct cloop_device *clo, struct request *req)
397 +/* This function does all the real work. */
398 +/* returns "uptodate" */
399 +static int cloop_handle_request(struct cloop_device *clo, struct request *req)
400 {
401 int buffered_blocknum = -1;
402 int preloaded = 0;
403 - loff_t offset = (loff_t) blk_rq_pos(req)<<9;
404 + loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
405 struct bio_vec bvec;
406 struct req_iterator iter;
407 - blk_status_t ret = BLK_STS_OK;
408 -
409 - if (unlikely(req_op(req) != REQ_OP_READ ))
410 - {
411 - blk_dump_rq_flags(req, DEVICE_NAME " bad request");
412 - return BLK_STS_IOERR;
413 - }
414 -
415 - if (unlikely(!clo->backing_file && !clo->suspended))
416 - {
417 - DEBUGP("cloop_handle_request: not connected to a file\n");
418 - return BLK_STS_IOERR;
419 - }
420 -
421 rq_for_each_segment(bvec, req, iter)
422 - {
423 - unsigned long len = bvec.bv_len;
424 - loff_t to_offset = bvec.bv_offset;
425 -
426 - while(len > 0)
427 {
428 - u_int32_t length_in_buffer;
429 - loff_t block_offset = offset;
430 - u_int32_t offset_in_buffer;
431 - char *from_ptr, *to_ptr;
432 - /* do_div (div64.h) returns the 64bit division remainder and */
433 - /* puts the result in the first argument, i.e. block_offset */
434 - /* becomes the blocknumber to load, and offset_in_buffer the */
435 - /* position in the buffer */
436 - offset_in_buffer = do_div(block_offset, clo->head.block_size);
437 - /* Lookup preload cache */
438 - if(block_offset < clo->preload_size && clo->preload_cache != NULL && clo->preload_cache[block_offset] != NULL)
439 - { /* Copy from cache */
440 - preloaded = 1;
441 - from_ptr = clo->preload_cache[block_offset];
442 - }
443 - else
444 - {
445 - preloaded = 0;
446 - buffered_blocknum = cloop_load_buffer(clo,block_offset);
447 - if(buffered_blocknum == -1)
448 + unsigned long len = bvec.bv_len;
449 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset;
450 + while(len > 0)
451 + {
452 + u_int32_t length_in_buffer;
453 + loff_t block_offset = offset;
454 + u_int32_t offset_in_buffer;
455 + char *from_ptr;
456 + /* do_div (div64.h) returns the 64bit division remainder and */
457 + /* puts the result in the first argument, i.e. block_offset */
458 + /* becomes the blocknumber to load, and offset_in_buffer the */
459 + /* position in the buffer */
460 + offset_in_buffer = do_div(block_offset, clo->head.block_size);
461 + /* Lookup preload cache */
462 + if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
463 + clo->preload_cache[block_offset] != NULL)
464 + { /* Copy from cache */
465 + preloaded = 1;
466 + from_ptr = clo->preload_cache[block_offset];
467 + }
468 + else
469 + {
470 + preloaded = 0;
471 + buffered_blocknum = cloop_load_buffer(clo,block_offset);
472 + if(buffered_blocknum == -1) break; /* invalid data, leave inner loop */
473 + /* Copy from buffer */
474 + from_ptr = clo->buffer[buffered_blocknum];
475 + }
476 + /* Now, at least part of what we want will be in the buffer. */
477 + length_in_buffer = clo->head.block_size - offset_in_buffer;
478 + if(length_in_buffer > len)
479 + {
480 +/* DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
481 + length_in_buffer,len); */
482 + length_in_buffer = len;
483 + }
484 + memcpy(to_ptr, from_ptr + offset_in_buffer, length_in_buffer);
485 + to_ptr += length_in_buffer;
486 + len -= length_in_buffer;
487 + offset += length_in_buffer;
488 + } /* while inner loop */
489 + kunmap(bvec.bv_page);
490 + cond_resched();
491 + } /* end rq_for_each_segment*/
492 + return ((buffered_blocknum!=-1) || preloaded);
493 +}
494 +
495 +/* Adopted from loop.c, a kernel thread to handle physical reads and
496 + decompression. */
497 +static int cloop_thread(void *data)
498 +{
499 + struct cloop_device *clo = data;
500 + current->flags |= PF_NOFREEZE;
501 + set_user_nice(current, 10);
502 + while (!kthread_should_stop()||!list_empty(&clo->clo_list))
503 + {
504 + int err;
505 + err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) ||
506 + kthread_should_stop());
507 + if(unlikely(err))
508 {
509 - ret = BLK_STS_IOERR;
510 - break; /* invalid data, leave inner loop */
511 + DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n");
512 + continue;
513 }
514 - /* Copy from buffer */
515 - from_ptr = clo->buffer[buffered_blocknum];
516 - }
517 - /* Now, at least part of what we want will be in the buffer. */
518 - length_in_buffer = clo->head.block_size - offset_in_buffer;
519 - if(length_in_buffer > len)
520 - {
521 - /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", length_in_buffer,len); */
522 - length_in_buffer = len;
523 - }
524 - to_ptr = kmap_atomic(bvec.bv_page);
525 - memcpy(to_ptr + to_offset, from_ptr + offset_in_buffer, length_in_buffer);
526 - kunmap_atomic(to_ptr);
527 - to_offset += length_in_buffer;
528 - len -= length_in_buffer;
529 - offset += length_in_buffer;
530 - } /* while inner loop */
531 - } /* rq_for_each_segment */
532 - return ret;
533 -}
534 -
535 -static blk_status_t cloop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
536 -{
537 -// struct request_queue *q = hctx->queue;
538 -// struct cloop_device *clo = q->queuedata;
539 - struct request *req = bd->rq;
540 - struct cloop_device *clo = req->rq_disk->private_data;
541 - blk_status_t ret = BLK_STS_OK;
542 -
543 -#if 1 /* Does it work when loading libraries? */
544 - /* Since we have a buffered block list as well as data to read */
545 - /* from disk (slow), and are (probably) never called from an */
546 - /* interrupt, we use a simple mutex lock right here to ensure */
547 - /* consistency. */
548 - mutex_lock(&clo->clo_rq_mutex);
549 - #else
550 - spin_lock_irq(&clo->queue_lock);
551 - #endif
552 - blk_mq_start_request(req);
553 - do {
554 - ret = cloop_handle_request(clo, req);
555 - } while(blk_update_request(req, ret, blk_rq_cur_bytes(req)));
556 - blk_mq_end_request(req, ret);
557 - #if 1 /* See above */
558 - mutex_unlock(&clo->clo_rq_mutex);
559 - #else
560 - spin_unlock_irq(&clo->queue_lock);
561 - #endif
562 - return ret;
563 + if(!list_empty(&clo->clo_list))
564 + {
565 + struct request *req;
566 + unsigned long flags;
567 + int uptodate;
568 + spin_lock_irq(&clo->queue_lock);
569 + req = list_entry(clo->clo_list.next, struct request, queuelist);
570 + list_del_init(&req->queuelist);
571 + spin_unlock_irq(&clo->queue_lock);
572 + uptodate = cloop_handle_request(clo, req);
573 + spin_lock_irqsave(&clo->queue_lock, flags);
574 + __blk_end_request_all(req, uptodate ? 0 : -EIO);
575 + spin_unlock_irqrestore(&clo->queue_lock, flags);
576 + }
577 + }
578 + DEBUGP(KERN_ERR "cloop_thread exited.\n");
579 + return 0;
580 +}
581 +
582 +/* This is called by the kernel block queue management every now and then,
583 + * with successive read requests qeued and sorted in a (hopefully)
584 + * "most efficient way". spin_lock_irq() is being held by the kernel. */
585 +static void cloop_do_request(struct request_queue *q)
586 +{
587 + struct request *req;
588 + while((req = blk_fetch_request(q)) != NULL)
589 + {
590 + struct cloop_device *clo;
591 + int rw;
592 + /* quick sanity checks */
593 + /* blk_fs_request() was removed in 2.6.36 */
594 + if (unlikely(req == NULL
595 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
596 + || (req->cmd_type != REQ_TYPE_FS)
597 +#endif
598 + ))
599 + goto error_continue;
600 + rw = rq_data_dir(req);
601 + if (unlikely(rw != READ
602 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
603 + && rw != READA
604 +#endif
605 + ))
606 + {
607 + DEBUGP("cloop_do_request: bad command\n");
608 + goto error_continue;
609 + }
610 + clo = req->rq_disk->private_data;
611 + if (unlikely(!clo->backing_file && !clo->suspended))
612 + {
613 + DEBUGP("cloop_do_request: not connected to a file\n");
614 + goto error_continue;
615 + }
616 + list_add_tail(&req->queuelist, &clo->clo_list); /* Add to working list for thread */
617 + wake_up(&clo->clo_event); /* Wake up cloop_thread */
618 + continue; /* next request */
619 + error_continue:
620 + DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
621 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
622 + req->errors++;
623 +#else
624 + req->error_count++;
625 +#endif
626 + __blk_end_request_all(req, -EIO);
627 + }
628 }
630 /* Read header, flags and offsets from already opened file */
631 @@ -501,7 +540,7 @@
632 char *bbuf=NULL;
633 unsigned int bbuf_size = 0;
634 const unsigned int header_size = sizeof(struct cloop_head);
635 - unsigned int i, offsets_read=0, total_offsets=0;
636 + unsigned int i, total_offsets=0;
637 loff_t fs_read_position = 0, header_pos[2];
638 int isblkdev, bytes_read, error = 0;
639 if (clo->suspended) return error;
640 @@ -521,7 +560,7 @@
641 }
642 clo->backing_file = file;
643 clo->backing_inode= inode ;
644 - clo->underlying_total_size = (isblkdev) ? file->f_mapping->host->i_size : inode->i_size;
645 + clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size;
646 if(clo->underlying_total_size < header_size)
647 {
648 printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n",
649 @@ -531,7 +570,7 @@
650 }
651 if(isblkdev)
652 {
653 - struct request_queue *q = bdev_get_queue(I_BDEV(file->f_mapping->host));
654 + struct request_queue *q = bdev_get_queue(inode->i_bdev);
655 blk_queue_max_hw_sectors(clo->clo_queue, queue_max_hw_sectors(q)); /* Renamed in 2.6.34 */
656 blk_queue_max_segments(clo->clo_queue, queue_max_segments(q)); /* Renamed in 2.6.34 */
657 /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */
658 @@ -540,7 +579,7 @@
659 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
660 blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn);
661 #endif
662 - clo->underlying_blksize = block_size(I_BDEV(file->f_mapping->host));
663 + clo->underlying_blksize = block_size(inode->i_bdev);
664 }
665 else
666 clo->underlying_blksize = PAGE_SIZE;
667 @@ -574,29 +613,19 @@
668 goto error_release;
669 }
670 memcpy(&clo->head, bbuf, header_size);
671 - if (strncmp(bbuf+CLOOP4_SIGNATURE_OFFSET, CLOOP4_SIGNATURE, CLOOP4_SIGNATURE_SIZE)==0)
672 + if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
673 {
674 - clo->file_format=4;
675 + clo->file_format++;
676 clo->head.block_size=ntohl(clo->head.block_size);
677 clo->head.num_blocks=ntohl(clo->head.num_blocks);
678 clo->header_first = (i==0) ? 1 : 0;
679 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
680 - break;
681 - }
682 - else if (strncmp(bbuf+CLOOP2_SIGNATURE_OFFSET, CLOOP2_SIGNATURE, CLOOP2_SIGNATURE_SIZE)==0)
683 - {
684 - clo->file_format=2;
685 - clo->head.block_size=ntohl(clo->head.block_size);
686 - clo->head.num_blocks=ntohl(clo->head.num_blocks);
687 - clo->header_first = (i==0) ? 1 : 0;
688 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
689 + printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
690 break;
691 }
692 }
693 if (clo->file_format == 0)
694 {
695 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
696 - "please use an older version of %s for this file.\n",
697 + printk(KERN_ERR "%s: Cannot detect %s format.\n",
698 cloop_name, cloop_name);
699 error=-EBADF; goto error_release;
700 }
701 @@ -606,67 +635,133 @@
702 cloop_name, clo->head.block_size);
703 error=-EBADF; goto error_release;
704 }
705 - total_offsets=clo->head.num_blocks+1;
706 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
707 + total_offsets=clo->head.num_blocks;
708 + if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
709 total_offsets > inode->i_size))
710 {
711 printk(KERN_ERR "%s: file %s too small for %u blocks\n",
712 cloop_name, clo->underlying_filename, clo->head.num_blocks);
713 error=-EBADF; goto error_release;
714 }
715 - clo->block_ptrs = cloop_malloc(sizeof(cloop_block_ptr) * total_offsets);
716 - if (!clo->block_ptrs)
717 + /* Allocate Memory for decompressors */
718 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
719 + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
720 + if(!clo->zstream.workspace)
721 {
722 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
723 + printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
724 + cloop_name, zlib_inflate_workspacesize());
725 error=-ENOMEM; goto error_release;
726 }
727 - /* Read them offsets! */
728 - if(clo->header_first)
729 + zlib_inflateInit(&clo->zstream);
730 +#endif
731 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
732 +#if XZ_INTERNAL_CRC32
733 + /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
734 + xz_crc32_init(void);
735 +#endif
736 + clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
737 +#endif
738 + if (total_offsets + 1 == 0) /* Version 3 */
739 {
740 - fs_read_position = sizeof(struct cloop_head);
741 + struct cloop_tail tail;
742 + if (isblkdev)
743 + {
744 + /* No end of file: can't find index */
745 + printk(KERN_ERR "%s: no V3 support for block device\n",
746 + cloop_name);
747 + error=-EBADF; goto error_release;
748 + }
749 + bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
750 + inode->i_size - sizeof(struct cloop_tail),
751 + sizeof(struct cloop_tail));
752 + if (bytes_read == sizeof(struct cloop_tail))
753 + {
754 + unsigned long len, zlen;
755 + int ret;
756 + void *zbuf;
757 + clo->head.num_blocks = ntohl(tail.num_blocks);
758 + total_offsets = clo->head.num_blocks;
759 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
760 + zlen = ntohl(tail.table_size);
761 + zbuf = cloop_malloc(zlen);
762 + if (!clo->block_ptrs || !zbuf)
763 + {
764 + printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
765 + error=-ENOMEM; goto error_release;
766 + }
767 + bytes_read = cloop_read_from_file(clo, file, zbuf,
768 + inode->i_size - zlen - sizeof(struct cloop_tail),
769 + zlen);
770 + if (bytes_read != zlen)
771 + {
772 + printk(KERN_ERR "%s: can't read index\n", cloop_name);
773 + error=-EBADF; goto error_release;
774 + }
775 + len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
776 + ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
777 + cloop_free(zbuf, zlen);
778 + if (ret != 0)
779 + {
780 + printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
781 + cloop_name, ret);
782 + error=-EBADF; goto error_release;
783 + }
784 + }
785 + else
786 + {
787 + printk(KERN_ERR "%s: can't find index\n", cloop_name);
788 + error=-ENOMEM; goto error_release;
789 + }
790 }
791 else
792 {
793 - fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_offsets * sizeof(loff_t);
794 - }
795 - for(offsets_read=0;offsets_read<total_offsets;)
796 - {
797 - size_t bytes_readable;
798 - unsigned int num_readable, offset = 0;
799 - bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
800 - if(bytes_readable <= 0) break; /* Done */
801 - bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
802 - if(bytes_read != bytes_readable)
803 + unsigned int n, total_bytes;
804 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
805 + if (!clo->block_ptrs)
806 + {
807 + printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
808 + error=-ENOMEM; goto error_release;
809 + }
810 + /* Read them offsets! */
811 + if(clo->header_first)
812 + {
813 + total_bytes = total_offsets * sizeof(struct block_info);
814 + fs_read_position = sizeof(struct cloop_head);
815 + }
816 + else
817 {
818 - printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
819 - cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
820 - error=-EBADF;
821 - goto error_release;
822 + total_bytes = total_offsets * sizeof(loff_t);
823 + fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
824 }
825 - /* remember where to read the next blk from file */
826 - fs_read_position += bytes_read;
827 - /* calculate how many offsets can be taken from current bbuf */
828 - num_readable = MIN(total_offsets - offsets_read,
829 - bytes_read / sizeof(loff_t));
830 - DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1);
831 - for (i=0,offset=0; i<num_readable; i++)
832 + for(n=0;n<total_bytes;)
833 {
834 - loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) );
835 - if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp);
836 - if(offsets_read > 0)
837 + size_t bytes_readable;
838 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
839 + if(bytes_readable <= 0) break; /* Done */
840 + bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
841 + if(bytes_read != bytes_readable)
842 {
843 - loff_t d = CLOOP_BLOCK_OFFSET(tmp) - CLOOP_BLOCK_OFFSET(clo->block_ptrs[offsets_read-1]);
844 - if(d > clo->largest_block) clo->largest_block = d;
845 + printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
846 + cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
847 + error=-EBADF;
848 + goto error_release;
849 }
850 - clo->block_ptrs[offsets_read++] = tmp;
851 - offset += sizeof(loff_t);
852 + memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
853 + /* remember where to read the next blk from file */
854 + fs_read_position += bytes_read;
855 + n += bytes_read;
856 }
857 }
858 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
859 - cloop_name, clo->underlying_filename, clo->head.num_blocks,
860 - clo->head.block_size, clo->largest_block);
861 {
862 int i;
863 + char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
864 + clo->largest_block = 0;
865 + for (i = 0; i < clo->head.num_blocks; i++)
866 + if (clo->block_ptrs[i].size > clo->largest_block)
867 + clo->largest_block = clo->block_ptrs[i].size;
868 + printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
869 + cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
870 + clo->head.block_size, clo->largest_block);
871 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
872 (buffers / clo->head.block_size) : 1;
873 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
874 @@ -698,36 +793,14 @@
875 cloop_name, clo->largest_block);
876 error=-ENOMEM; goto error_release_free_buffer;
877 }
878 - /* Allocate Memory for decompressors */
879 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
880 - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
881 - if(!clo->zstream.workspace)
882 - {
883 - printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
884 - cloop_name, zlib_inflate_workspacesize());
885 - error=-ENOMEM; goto error_release_free_all;
886 - }
887 - zlib_inflateInit(&clo->zstream);
888 -#endif
889 -#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
890 -#if XZ_INTERNAL_CRC32
891 - /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
892 - xz_crc32_init(void);
893 -#endif
894 - clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
895 -#endif
896 - if(CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]) > clo->underlying_total_size)
897 + set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
898 + clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
899 + if(IS_ERR(clo->clo_thread))
900 {
901 - printk(KERN_ERR "%s: final offset wrong (%llu > %llu)\n",
902 - cloop_name,
903 - CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]),
904 - clo->underlying_total_size);
905 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
906 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
907 -#endif
908 + error = PTR_ERR(clo->clo_thread);
909 + clo->clo_thread=NULL;
910 goto error_release_free_all;
911 }
912 - set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
913 if(preload > 0)
914 {
915 clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
916 @@ -773,6 +846,7 @@
917 clo->preload_array_size = clo->preload_size = 0;
918 }
919 }
920 + wake_up_process(clo->clo_thread);
921 /* Uncheck */
922 return error;
923 error_release_free_all:
924 @@ -787,9 +861,13 @@
925 }
926 if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
927 error_release_free:
928 - cloop_free(clo->block_ptrs, sizeof(cloop_block_ptr) * total_offsets);
929 + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
930 clo->block_ptrs=NULL;
931 error_release:
932 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
933 + zlib_inflateEnd(&clo->zstream);
934 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
935 +#endif
936 if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
937 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
938 clo->backing_file=NULL;
939 @@ -809,7 +887,7 @@
940 file = fget(arg); /* get filp struct from ioctl arg fd */
941 if(!file) return -EBADF;
942 error=cloop_set_file(cloop_num,file);
943 - set_disk_ro(clo->clo_disk, true);
944 + set_device_ro(bdev, 1);
945 if(error) fput(file);
946 return error;
947 }
948 @@ -822,6 +900,7 @@
949 if(clo->refcnt > 1) /* we needed one fd for the ioctl */
950 return -EBUSY;
951 if(filp==NULL) return -EINVAL;
952 + if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
953 if(filp!=initial_file)
954 fput(filp);
955 else
956 @@ -832,7 +911,7 @@
957 clo->backing_file = NULL;
958 clo->backing_inode = NULL;
959 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
960 - if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks+1); clo->block_ptrs = NULL; }
961 + if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
962 if(clo->preload_cache)
963 {
964 int i;
965 @@ -1047,15 +1126,15 @@
966 case LOOP_CLR_FD: /* Change arg */
967 case LOOP_GET_STATUS64: /* Change arg */
968 case LOOP_SET_STATUS64: /* Change arg */
969 - return cloop_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
970 + arg = (unsigned long) compat_ptr(arg);
971 case LOOP_SET_STATUS: /* unchanged */
972 case LOOP_GET_STATUS: /* unchanged */
973 case LOOP_SET_FD: /* unchanged */
974 case LOOP_CHANGE_FD: /* unchanged */
975 - return cloop_ioctl(bdev, mode, cmd, arg);
976 - default:
977 - return -ENOIOCTLCMD;
978 + return cloop_ioctl(bdev, mode, cmd, arg);
979 + break;
980 }
981 + return -ENOIOCTLCMD;
982 }
983 #endif
985 @@ -1086,7 +1165,7 @@
986 cloop_dev[cloop_num]->refcnt-=1;
987 }
989 -static const struct block_device_operations clo_fops =
990 +static struct block_device_operations clo_fops =
991 {
992 owner: THIS_MODULE,
993 open: cloop_open,
994 @@ -1098,12 +1177,6 @@
995 /* locked_ioctl ceased to exist in 2.6.36 */
996 };
998 -static const struct blk_mq_ops cloop_mq_ops = {
999 - .queue_rq = cloop_queue_rq,
1000 -/* .init_request = cloop_init_request, */
1001 -/* .complete = cloop_complete_rq, */
1002 -};
1004 static int cloop_register_blkdev(int major_nr)
1006 return register_blkdev(major_nr, cloop_name);
1007 @@ -1117,73 +1190,45 @@
1009 static int cloop_alloc(int cloop_num)
1011 - struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));
1012 - int error = -ENOMEM;
1013 + struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));;
1014 if(clo == NULL) goto error_out;
1015 cloop_dev[cloop_num] = clo;
1016 memset(clo, 0, sizeof(struct cloop_device));
1017 clo->clo_number = cloop_num;
1018 - clo->tag_set.ops = &cloop_mq_ops;
1019 - clo->tag_set.nr_hw_queues = 1;
1020 - clo->tag_set.queue_depth = 128;
1021 - clo->tag_set.numa_node = NUMA_NO_NODE;
1022 - clo->tag_set.cmd_size = 0; /* No extra data needed */
1023 - /* BLK_MQ_F_BLOCKING is extremely important if we want to call blocking functions like vfs_read */
1024 - clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
1025 - clo->tag_set.driver_data = clo;
1026 - if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo;
1027 -#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
1028 - clo->clo_queue = blk_mq_init_queue(&clo->tag_set);
1029 - if(IS_ERR(clo->clo_queue))
1030 + clo->clo_thread = NULL;
1031 + init_waitqueue_head(&clo->clo_event);
1032 + spin_lock_init(&clo->queue_lock);
1033 + mutex_init(&clo->clo_ctl_mutex);
1034 + INIT_LIST_HEAD(&clo->clo_list);
1035 + clo->clo_queue = blk_init_queue(cloop_do_request, &clo->queue_lock);
1036 + if(!clo->clo_queue)
1038 printk(KERN_ERR "%s: Unable to alloc queue[%d]\n", cloop_name, cloop_num);
1039 - goto error_out_free_tags;
1040 + goto error_out;
1042 - clo->clo_disk = alloc_disk(1);
1043 -#else
1044 - clo->clo_disk = blk_mq_alloc_disk(&clo->tag_set, NULL);
1045 + clo->clo_queue->queuedata = clo;
1046 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
1047 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
1048 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
1049 #endif
1050 + clo->clo_disk = alloc_disk(1);
1051 if(!clo->clo_disk)
1053 printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num);
1054 - goto error_out_free_queue;
1055 + goto error_disk;
1057 -#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)
1058 - clo->clo_disk->queue = clo->clo_queue;
1059 -#else
1060 - clo->clo_disk->minors = 1;
1061 - clo->clo_queue = clo->clo_disk->queue;
1062 -#endif
1063 - clo->clo_queue->queuedata = clo;
1064 - blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS);
1065 - spin_lock_init(&clo->queue_lock);
1066 - mutex_init(&clo->clo_ctl_mutex);
1067 - mutex_init(&clo->clo_rq_mutex);
1068 clo->clo_disk->major = cloop_major;
1069 clo->clo_disk->first_minor = cloop_num;
1070 clo->clo_disk->fops = &clo_fops;
1071 + clo->clo_disk->queue = clo->clo_queue;
1072 clo->clo_disk->private_data = clo;
1073 sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num);
1074 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
1075 - error = add_disk(clo->clo_disk);
1076 - if (error)
1077 - goto error_out_free_disk;
1078 -#endif
1079 + add_disk(clo->clo_disk);
1080 return 0;
1081 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
1082 -error_out_free_disk:
1083 - blk_cleanup_disk(clo->clo_disk);
1084 -#endif
1085 -error_out_free_queue:
1086 -#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0)
1087 +error_disk:
1088 blk_cleanup_queue(clo->clo_queue);
1089 -error_out_free_tags:
1090 -#endif
1091 - blk_mq_free_tag_set(&clo->tag_set);
1092 -error_out_free_clo:
1093 - cloop_free(clo, sizeof(struct cloop_device));
1094 error_out:
1095 - return error;
1096 + return -ENOMEM;
1099 static void cloop_dealloc(int cloop_num)
1100 @@ -1191,13 +1236,8 @@
1101 struct cloop_device *clo = cloop_dev[cloop_num];
1102 if(clo == NULL) return;
1103 del_gendisk(clo->clo_disk);
1104 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0)
1105 - blk_cleanup_disk(clo->clo_disk);
1106 -#else
1107 blk_cleanup_queue(clo->clo_queue);
1108 put_disk(clo->clo_disk);
1109 -#endif
1110 - blk_mq_free_tag_set(&clo->tag_set);
1111 cloop_free(clo, sizeof(struct cloop_device));
1112 cloop_dev[cloop_num] = NULL;
1114 @@ -1286,3 +1326,8 @@
1115 /* The cloop init and exit function registration (especially needed for Kernel 2.6) */
1116 module_init(cloop_init);
1117 module_exit(cloop_exit);
1119 +#include <linux/vermagic.h>
1120 +#include <linux/compiler.h>
1122 +MODULE_INFO(vermagic, VERMAGIC_STRING);
1123 --- cloop_suspend.c
1124 +++ cloop_suspend.c
1125 @@ -14,6 +14,7 @@
1126 #include <fcntl.h>
1127 #include <unistd.h>
1128 #include <stdio.h>
1129 +#include <stdint.h>
1131 /* We don't use the structure, so that define does not hurt */
1132 #define dev_t int