wok-current annotate linux64-cloop/stuff/cloop.u @ rev 25278

updated reptyr (0.7.0 -> 0.9.0)
author Hans-G?nter Theisgen
date Mon Jul 18 14:55:21 2022 +0100 (2022-07-18)
parents cea6e929d21e
children c81179c4b106
rev   line source
pascal@17214 1 --- cloop.h
pascal@17214 2 +++ cloop.h
pascal@24983 3 @@ -1,3 +1,7 @@
pascal@23761 4 +#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */
pascal@23761 5 +#define CLOOP_SIGNATURE_SIZE 9
pascal@23761 6 +#define CLOOP_SIGNATURE_OFFSET 0x0
pascal@23761 7 +
pascal@23761 8 #ifndef _COMPRESSED_LOOP_H
pascal@23761 9 #define _COMPRESSED_LOOP_H
pascal@23761 10
pascal@24983 11 @@ -38,10 +42,6 @@
pascal@23761 12
pascal@24983 13 #include <linux/types.h> /* u_int32_t */
pascal@23761 14
pascal@24983 15 -#ifndef __KERNEL__
pascal@24983 16 -#include <stdint.h> /* regular uint64_t */
pascal@24983 17 -#endif
pascal@24983 18 -
pascal@24983 19 #define CLOOP_HEADROOM 128
pascal@24983 20
pascal@24983 21 /* Header of fixed length, can be located at beginning or end of file */
pascal@24983 22 @@ -52,13 +52,6 @@
pascal@23761 23 u_int32_t num_blocks;
pascal@23761 24 };
pascal@23761 25
pascal@24983 26 -#define CLOOP2_SIGNATURE "V2.0" /* @ offset 0x0b */
pascal@24983 27 -#define CLOOP2_SIGNATURE_SIZE 4
pascal@24983 28 -#define CLOOP2_SIGNATURE_OFFSET 0x0b
pascal@24983 29 -#define CLOOP4_SIGNATURE "V4.0" /* @ offset 0x0b */
pascal@24983 30 -#define CLOOP4_SIGNATURE_SIZE 4
pascal@24983 31 -#define CLOOP4_SIGNATURE_OFFSET 0x0b
pascal@24983 32 -
pascal@24983 33 /************************************************************************\
pascal@24983 34 * CLOOP4 flags for each compressed block *
pascal@24983 35 * Value Meaning *
pascal@24983 36 @@ -84,6 +77,134 @@
pascal@24983 37
pascal@24983 38 #define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
pascal@24983 39
pascal@23761 40 +#define CLOOP_COMPRESSOR_LINK 0xF
pascal@23761 41 +
pascal@23761 42 +
pascal@24983 43 +/* data_index (num_blocks 64bit pointers, network order)... */
pascal@24983 44 +/* compressed data (gzip block compressed format)... */
pascal@24983 45 +
pascal@17214 46 +struct cloop_tail
pascal@17214 47 +{
pascal@23761 48 + u_int32_t table_size;
pascal@24983 49 + u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
pascal@23761 50 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
pascal@17214 51 + u_int32_t num_blocks;
pascal@17214 52 +};
pascal@17214 53 +
pascal@23761 54 +#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12)
pascal@23761 55 +
pascal@17214 56 +struct block_info
pascal@17214 57 +{
pascal@17214 58 + loff_t offset; /* 64-bit offsets of compressed block */
pascal@17214 59 + u_int32_t size; /* 32-bit compressed block size */
pascal@23761 60 + u_int32_t flags; /* 32-bit compression flags */
pascal@17214 61 +};
pascal@17214 62 +
pascal@23761 63 +static inline char *build_index(struct block_info *offsets, unsigned long n,
pascal@24983 64 + unsigned long block_size)
pascal@17214 65 +{
pascal@24983 66 + static char v[11];
pascal@24983 67 + u_int32_t flags = 0;
pascal@17214 68 + u_int32_t *ofs32 = (u_int32_t *) offsets;
pascal@17214 69 + loff_t *ofs64 = (loff_t *) offsets;
pascal@23761 70 +
pascal@23761 71 + /* v3 64bits bug: v1 assumed */
pascal@23761 72 + unsigned long v3_64 = (n+1)/2;
pascal@23761 73 + loff_t prev;
pascal@23761 74 +
pascal@23761 75 + if (ofs32[0] != 0 && ofs32[1] == 0) {
pascal@23761 76 + for (prev=__le64_to_cpu(ofs64[v3_64]);
pascal@23761 77 + v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev;
pascal@23761 78 + prev=__le64_to_cpu(ofs64[v3_64]));
pascal@23761 79 + }
pascal@23761 80 +
pascal@17214 81 + if (ofs32[0] == 0) {
pascal@17214 82 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
pascal@17214 83 + while (n--) {
pascal@17214 84 + offsets[n].offset = __be64_to_cpu(offsets[n].offset);
pascal@17214 85 + offsets[n].size = ntohl(offsets[n].size);
pascal@23761 86 + offsets[n].flags = 0;
pascal@17214 87 + }
pascal@17214 88 + return (char *) "128BE accelerated knoppix 1.0";
pascal@17214 89 + }
pascal@23761 90 + else { /* V2.0/V4.0 */
pascal@23761 91 + loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
pascal@23761 92 + unsigned long i = n;
pascal@23761 93 +
pascal@23761 94 + for (flags = 0; n-- ;) {
pascal@23761 95 + loff_t data = __be64_to_cpu(ofs64[n]);
pascal@23761 96 +
pascal@17214 97 + offsets[n].size = last -
pascal@23761 98 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
pascal@17214 99 + last = offsets[n].offset;
pascal@23761 100 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
pascal@23761 101 + flags |= 1 << offsets[n].flags;
pascal@17214 102 + }
pascal@23761 103 + if (flags < 2) return (char *) "64BE v2.0";
pascal@23761 104 + while (i--) {
pascal@23761 105 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
pascal@23761 106 + offsets[i] = offsets[offsets[i].offset];
pascal@23761 107 + }
pascal@23761 108 + }
pascal@24983 109 + strcpy(v, (char *) "64BE v4.0a");
pascal@17214 110 + }
pascal@17214 111 + }
pascal@23761 112 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
pascal@23761 113 + loff_t last = __le64_to_cpu(ofs64[n]);
pascal@17214 114 + while (n--) {
pascal@17214 115 + offsets[n].size = last -
pascal@17214 116 + (offsets[n].offset = __le64_to_cpu(ofs64[n]));
pascal@17214 117 + last = offsets[n].offset;
pascal@23761 118 + offsets[n].flags = 0;
pascal@17214 119 + }
pascal@17214 120 + return (char *) "64LE v1.0";
pascal@17214 121 + }
pascal@23761 122 + else { /* V3.0 or V0.68 */
pascal@17214 123 + unsigned long i;
pascal@17214 124 + loff_t j;
pascal@17214 125 +
pascal@23761 126 + for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
pascal@23761 127 + if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
pascal@23761 128 + loff_t last = ntohl(ofs32[n]);
pascal@23761 129 + while (n--) {
pascal@23761 130 + offsets[n].size = last -
pascal@23761 131 + (offsets[n].offset = ntohl(ofs32[n]));
pascal@23761 132 + last = offsets[n].offset;
pascal@23761 133 + offsets[n].flags = 0;
pascal@23761 134 + }
pascal@23761 135 + return (char *) "32BE v0.68";
pascal@23761 136 + }
pascal@23761 137 +
pascal@23761 138 + v3_64 = (ofs32[1] == 0);
pascal@24983 139 + for (i = n; i-- != 0; ) {
pascal@23761 140 + offsets[i].size = ntohl(ofs32[i << v3_64]);
pascal@24983 141 + if (offsets[i].size == 0xFFFFFFFF) {
pascal@24983 142 + offsets[i].size = 0x10000000 | block_size;
pascal@24983 143 + }
pascal@24983 144 + offsets[i].flags = (offsets[i].size >> 28);
pascal@24983 145 + offsets[i].size &= 0x0FFFFFFF;
pascal@24983 146 + }
pascal@17214 147 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
pascal@17214 148 + offsets[i].offset = j;
pascal@24983 149 + if (offsets[i].flags < 8) {
pascal@23761 150 + j += offsets[i].size;
pascal@23761 151 + }
pascal@23761 152 + }
pascal@23761 153 + for (i = 0; i < n; i++) {
pascal@24983 154 + flags |= 1 << offsets[i].flags;
pascal@24983 155 + if (offsets[i].flags >= 8) {
pascal@24983 156 + offsets[i] = offsets[offsets[i].size];
pascal@18828 157 + }
pascal@17214 158 + }
pascal@24983 159 + strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
pascal@17214 160 + }
pascal@24983 161 + v[10] = 'a' + ((flags-1) & 0xF); // compressors used
pascal@24983 162 + if (flags > 0x10) { // with links ?
pascal@24983 163 + v[10] += 'A' - 'a';
pascal@24983 164 + }
pascal@24983 165 + return v;
pascal@17214 166 +}
pascal@17214 167 +
pascal@17214 168 /* Cloop suspend IOCTL */
pascal@17214 169 #define CLOOP_SUSPEND 0x4C07
pascal@17214 170
pascal@17214 171 --- cloop.c
pascal@17214 172 +++ cloop.c
pascal@24983 173 @@ -17,7 +17,7 @@
pascal@24983 174 \************************************************************************/
pascal@23761 175
pascal@23761 176 #define CLOOP_NAME "cloop"
pascal@24983 177 -#define CLOOP_VERSION "5.3"
pascal@23761 178 +#define CLOOP_VERSION "4.12"
pascal@23761 179 #define CLOOP_MAX 8
pascal@23761 180
pascal@23761 181 #ifndef KBUILD_MODNAME
pascal@24983 182 @@ -68,7 +68,6 @@
pascal@23761 183 #include <linux/loop.h>
pascal@23761 184 #include <linux/kthread.h>
pascal@23761 185 #include <linux/compat.h>
pascal@24983 186 -#include <linux/blk-mq.h> /* new multiqueue infrastructure */
pascal@24983 187 #include "cloop.h"
pascal@23761 188
pascal@24983 189 /* New License scheme */
pascal@24983 190 @@ -93,10 +92,7 @@
pascal@24983 191 /* Use experimental major for now */
pascal@24983 192 #define MAJOR_NR 240
pascal@23761 193
pascal@24983 194 -#ifndef DEVICE_NAME
pascal@24983 195 -#define DEVICE_NAME CLOOP_NAME
pascal@24983 196 -#endif
pascal@24983 197 -
pascal@24983 198 +/* #define DEVICE_NAME CLOOP_NAME */
pascal@24983 199 /* #define DEVICE_NR(device) (MINOR(device)) */
pascal@24983 200 /* #define DEVICE_ON(device) */
pascal@24983 201 /* #define DEVICE_OFF(device) */
pascal@24983 202 @@ -143,7 +139,7 @@
pascal@24983 203 u_int32_t allflags;
pascal@23761 204
pascal@24983 205 /* An array of cloop_ptr flags/offset for compressed blocks within the file */
pascal@24983 206 - cloop_block_ptr *block_ptrs;
pascal@23761 207 + struct block_info *block_ptrs;
pascal@17214 208
pascal@17214 209 /* We buffer some uncompressed blocks for performance */
pascal@24983 210 size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */
pascal@24983 211 @@ -178,14 +174,16 @@
pascal@24983 212 spinlock_t queue_lock;
pascal@24983 213 /* mutex for ioctl() */
pascal@24983 214 struct mutex clo_ctl_mutex;
pascal@24983 215 - /* mutex for request */
pascal@24983 216 - struct mutex clo_rq_mutex;
pascal@24983 217 + struct list_head clo_list;
pascal@24983 218 + struct task_struct *clo_thread;
pascal@24983 219 + wait_queue_head_t clo_event;
pascal@23761 220 struct request_queue *clo_queue;
pascal@23761 221 struct gendisk *clo_disk;
pascal@24983 222 - struct blk_mq_tag_set tag_set;
pascal@23761 223 int suspended;
pascal@23761 224 };
pascal@23761 225
pascal@24983 226 +/* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
pascal@24983 227 + so we can specify how many devices we need via parameters. */
pascal@24983 228 static struct cloop_device **cloop_dev;
pascal@23761 229 static const char *cloop_name=CLOOP_NAME;
pascal@23761 230 static int cloop_count = 0;
pascal@24983 231 @@ -214,24 +212,21 @@
pascal@24983 232 vfree(mem);
pascal@23761 233 }
pascal@23761 234
pascal@24983 235 -/* static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen) */
pascal@24983 236 -static int uncompress(struct cloop_device *clo, u_int32_t block_num, u_int32_t compressed_length, unsigned long *uncompressed_length)
pascal@23761 237 +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags)
pascal@23761 238 {
pascal@24983 239 int err = -1;
pascal@24983 240 - int flags = CLOOP_BLOCK_FLAGS(clo->block_ptrs[block_num]);
pascal@24983 241 switch(flags)
pascal@24983 242 {
pascal@24983 243 case CLOOP_COMPRESSOR_NONE:
pascal@24983 244 - /* block is umcompressed, swap pointers only! */
pascal@24983 245 - { char *tmp = clo->compressed_buffer; clo->compressed_buffer = clo->buffer[clo->current_bufnum]; clo->buffer[clo->current_bufnum] = tmp; }
pascal@24983 246 - DEBUGP("cloop: block %d is uncompressed (flags=%d), just swapping %u bytes\n", block_num, flags, compressed_length);
pascal@23761 247 + memcpy(dest, source, *destLen = sourceLen);
pascal@23761 248 + err = Z_OK;
pascal@24983 249 break;
pascal@24983 250 #if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
pascal@24983 251 case CLOOP_COMPRESSOR_ZLIB:
pascal@24983 252 - clo->zstream.next_in = clo->compressed_buffer;
pascal@24983 253 - clo->zstream.avail_in = compressed_length;
pascal@24983 254 - clo->zstream.next_out = clo->buffer[clo->current_bufnum];
pascal@24983 255 - clo->zstream.avail_out = clo->head.block_size;
pascal@23761 256 + clo->zstream.next_in = source;
pascal@23761 257 + clo->zstream.avail_in = sourceLen;
pascal@23761 258 + clo->zstream.next_out = dest;
pascal@23761 259 + clo->zstream.avail_out = *destLen;
pascal@24983 260 err = zlib_inflateReset(&clo->zstream);
pascal@24983 261 if (err != Z_OK)
pascal@24983 262 {
pascal@24983 263 @@ -239,50 +234,50 @@
pascal@24983 264 zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
pascal@24983 265 }
pascal@24983 266 err = zlib_inflate(&clo->zstream, Z_FINISH);
pascal@24983 267 - *uncompressed_length = clo->zstream.total_out;
pascal@23761 268 + *destLen = clo->zstream.total_out;
pascal@24983 269 if (err == Z_STREAM_END) err = 0;
pascal@24983 270 - DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *uncompressed_length);
pascal@23761 271 + DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
pascal@24983 272 break;
pascal@24983 273 #endif
pascal@24983 274 #if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
pascal@24983 275 case CLOOP_COMPRESSOR_LZO1X:
pascal@24983 276 {
pascal@24983 277 size_t tmp = (size_t) clo->head.block_size;
pascal@24983 278 - err = lzo1x_decompress_safe(clo->compressed_buffer, compressed_length,
pascal@24983 279 - clo->buffer[clo->current_bufnum], &tmp);
pascal@24983 280 - if (err == LZO_E_OK) *uncompressed_length = (u_int32_t) tmp;
pascal@23761 281 + err = lzo1x_decompress_safe(source, sourceLen,
pascal@23761 282 + dest, &tmp);
pascal@23761 283 + if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
pascal@24983 284 }
pascal@24983 285 break;
pascal@24983 286 #endif
pascal@24983 287 #if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
pascal@24983 288 case CLOOP_COMPRESSOR_LZ4:
pascal@24983 289 {
pascal@24983 290 - size_t outputSize = clo->head.block_size;
pascal@23761 291 + size_t outputSize = *destLen;
pascal@24983 292 /* We should adjust outputSize here, in case the last block is smaller than block_size */
pascal@24983 293 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
pascal@24983 294 - err = lz4_decompress(clo->compressed_buffer, (size_t *) &compressed_length,
pascal@24983 295 - clo->buffer[clo->current_bufnum], outputSize);
pascal@23761 296 + err = lz4_decompress(source, (size_t *) &sourceLen,
pascal@23761 297 + dest, outputSize);
pascal@24983 298 #else
pascal@24983 299 - err = LZ4_decompress_safe(clo->compressed_buffer,
pascal@24983 300 - clo->buffer[clo->current_bufnum],
pascal@24983 301 - compressed_length, outputSize);
pascal@23761 302 + err = LZ4_decompress_safe(source,
pascal@23761 303 + dest,
pascal@23761 304 + sourceLen, outputSize);
pascal@24983 305 #endif
pascal@24983 306 if (err >= 0)
pascal@24983 307 {
pascal@24983 308 err = 0;
pascal@24983 309 - *uncompressed_length = outputSize;
pascal@23761 310 + *destLen = outputSize;
pascal@24983 311 }
pascal@24983 312 }
pascal@24983 313 break;
pascal@24983 314 #endif
pascal@24983 315 #if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
pascal@24983 316 case CLOOP_COMPRESSOR_XZ:
pascal@24983 317 - clo->xz_buffer.in = clo->compressed_buffer;
pascal@23761 318 + clo->xz_buffer.in = source;
pascal@24983 319 clo->xz_buffer.in_pos = 0;
pascal@24983 320 - clo->xz_buffer.in_size = compressed_length;
pascal@24983 321 - clo->xz_buffer.out = clo->buffer[clo->current_bufnum];
pascal@23761 322 + clo->xz_buffer.in_size = sourceLen;
pascal@23761 323 + clo->xz_buffer.out = dest;
pascal@24983 324 clo->xz_buffer.out_pos = 0;
pascal@24983 325 - clo->xz_buffer.out_size = clo->head.block_size;
pascal@23761 326 + clo->xz_buffer.out_size = *destLen;
pascal@24983 327 xz_dec_reset(clo->xzdecoderstate);
pascal@24983 328 err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
pascal@24983 329 if (err == XZ_STREAM_END || err == XZ_OK)
pascal@24983 330 @@ -309,16 +304,12 @@
pascal@24983 331 while (buf_done < buf_len)
pascal@24983 332 {
pascal@24983 333 size_t size = buf_len - buf_done, size_read;
pascal@24983 334 - mm_segment_t old_fs;
pascal@24983 335 /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */
pascal@24983 336 /* int size_read = kernel_read(f, pos, buf + buf_done, size); */
pascal@24983 337 -
pascal@24983 338 - // mutex_lock(&clo->clo_rq_mutex);
pascal@24983 339 - old_fs = get_fs();
pascal@24983 340 - set_fs(KERNEL_DS);
pascal@24983 341 + mm_segment_t old_fs = get_fs();
pascal@24983 342 + set_fs(get_ds());
pascal@24983 343 size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos);
pascal@24983 344 set_fs(old_fs);
pascal@24983 345 - // mutex_unlock(&clo->clo_rq_mutex);
pascal@23761 346
pascal@23761 347 if(size_read <= 0)
pascal@23761 348 {
pascal@24983 349 @@ -358,8 +349,8 @@
pascal@24983 350 return i;
pascal@24983 351 }
pascal@23761 352
pascal@24983 353 - compressed_block_offset = CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]);
pascal@24983 354 - compressed_block_len = (long) (CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum+1]) - compressed_block_offset) ;
pascal@23761 355 + compressed_block_offset = clo->block_ptrs[blocknum].offset;
pascal@23761 356 + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
pascal@17214 357
pascal@24983 358 /* Load one compressed block from the file. */
pascal@24983 359 if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
pascal@24983 360 @@ -369,12 +360,12 @@
pascal@24983 361 if (n!= compressed_block_len)
pascal@24983 362 {
pascal@24983 363 printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
pascal@24983 364 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
pascal@23761 365 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
pascal@24983 366 /* return -1; */
pascal@24983 367 }
pascal@24983 368 } else {
pascal@24983 369 printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
pascal@24983 370 - cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
pascal@23761 371 + cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
pascal@24983 372 return -1;
pascal@24983 373 }
pascal@24983 374
pascal@24983 375 @@ -382,14 +373,16 @@
pascal@24983 376 if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
pascal@17214 377
pascal@23761 378 /* Do the uncompression */
pascal@24983 379 - ret = uncompress(clo, blocknum, compressed_block_len, &uncompressed_block_len);
pascal@23761 380 + uncompressed_block_len = clo->head.block_size;
pascal@23761 381 + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
pascal@23761 382 + clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
pascal@23761 383 /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
pascal@17214 384 if (ret != 0)
pascal@24983 385 {
pascal@24983 386 printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
pascal@24983 387 cloop_name, ret, blocknum,
pascal@24983 388 - compressed_block_len, CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]),
pascal@24983 389 - CLOOP_BLOCK_FLAGS(clo->block_ptrs[blocknum]));
pascal@23761 390 + compressed_block_len, clo->block_ptrs[blocknum].offset,
pascal@23761 391 + clo->block_ptrs[blocknum].flags);
pascal@24983 392 clo->buffered_blocknum[clo->current_bufnum] = -1;
pascal@24983 393 return -1;
pascal@24983 394 }
pascal@24983 395 @@ -397,107 +390,146 @@
pascal@23761 396 return clo->current_bufnum;
pascal@23761 397 }
pascal@23761 398
pascal@24983 399 -static blk_status_t cloop_handle_request(struct cloop_device *clo, struct request *req)
pascal@24983 400 +/* This function does all the real work. */
pascal@23761 401 +/* returns "uptodate" */
pascal@24983 402 +static int cloop_handle_request(struct cloop_device *clo, struct request *req)
pascal@23761 403 {
pascal@23761 404 int buffered_blocknum = -1;
pascal@23761 405 int preloaded = 0;
pascal@24983 406 - loff_t offset = (loff_t) blk_rq_pos(req)<<9;
pascal@24983 407 + loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
pascal@24983 408 struct bio_vec bvec;
pascal@23761 409 struct req_iterator iter;
pascal@24983 410 - blk_status_t ret = BLK_STS_OK;
pascal@24983 411 -
pascal@24983 412 - if (unlikely(req_op(req) != REQ_OP_READ ))
pascal@24983 413 - {
pascal@24983 414 - blk_dump_rq_flags(req, DEVICE_NAME " bad request");
pascal@24983 415 - return BLK_STS_IOERR;
pascal@24983 416 - }
pascal@24983 417 -
pascal@24983 418 - if (unlikely(!clo->backing_file && !clo->suspended))
pascal@24983 419 - {
pascal@24983 420 - DEBUGP("cloop_handle_request: not connected to a file\n");
pascal@24983 421 - return BLK_STS_IOERR;
pascal@24983 422 - }
pascal@24983 423 -
pascal@23761 424 rq_for_each_segment(bvec, req, iter)
pascal@24983 425 - {
pascal@24983 426 - unsigned long len = bvec.bv_len;
pascal@24983 427 - loff_t to_offset = bvec.bv_offset;
pascal@24983 428 -
pascal@24983 429 - while(len > 0)
pascal@17214 430 {
pascal@24983 431 - u_int32_t length_in_buffer;
pascal@24983 432 - loff_t block_offset = offset;
pascal@24983 433 - u_int32_t offset_in_buffer;
pascal@24983 434 - char *from_ptr, *to_ptr;
pascal@24983 435 - /* do_div (div64.h) returns the 64bit division remainder and */
pascal@24983 436 - /* puts the result in the first argument, i.e. block_offset */
pascal@24983 437 - /* becomes the blocknumber to load, and offset_in_buffer the */
pascal@24983 438 - /* position in the buffer */
pascal@24983 439 - offset_in_buffer = do_div(block_offset, clo->head.block_size);
pascal@24983 440 - /* Lookup preload cache */
pascal@24983 441 - if(block_offset < clo->preload_size && clo->preload_cache != NULL && clo->preload_cache[block_offset] != NULL)
pascal@24983 442 - { /* Copy from cache */
pascal@24983 443 - preloaded = 1;
pascal@24983 444 - from_ptr = clo->preload_cache[block_offset];
pascal@24983 445 - }
pascal@24983 446 - else
pascal@24983 447 - {
pascal@24983 448 - preloaded = 0;
pascal@24983 449 - buffered_blocknum = cloop_load_buffer(clo,block_offset);
pascal@24983 450 - if(buffered_blocknum == -1)
pascal@23761 451 + unsigned long len = bvec.bv_len;
pascal@23761 452 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset;
pascal@24983 453 + while(len > 0)
pascal@23761 454 {
pascal@24983 455 - ret = BLK_STS_IOERR;
pascal@24983 456 - break; /* invalid data, leave inner loop */
pascal@24983 457 + u_int32_t length_in_buffer;
pascal@24983 458 + loff_t block_offset = offset;
pascal@24983 459 + u_int32_t offset_in_buffer;
pascal@24983 460 + char *from_ptr;
pascal@24983 461 + /* do_div (div64.h) returns the 64bit division remainder and */
pascal@24983 462 + /* puts the result in the first argument, i.e. block_offset */
pascal@24983 463 + /* becomes the blocknumber to load, and offset_in_buffer the */
pascal@24983 464 + /* position in the buffer */
pascal@23761 465 + offset_in_buffer = do_div(block_offset, clo->head.block_size);
pascal@24983 466 + /* Lookup preload cache */
pascal@24983 467 + if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
pascal@24983 468 + clo->preload_cache[block_offset] != NULL)
pascal@24983 469 + { /* Copy from cache */
pascal@24983 470 + preloaded = 1;
pascal@24983 471 + from_ptr = clo->preload_cache[block_offset];
pascal@24983 472 + }
pascal@24983 473 + else
pascal@24983 474 + {
pascal@24983 475 + preloaded = 0;
pascal@24983 476 + buffered_blocknum = cloop_load_buffer(clo,block_offset);
pascal@24983 477 + if(buffered_blocknum == -1) break; /* invalid data, leave inner loop */
pascal@24983 478 + /* Copy from buffer */
pascal@24983 479 + from_ptr = clo->buffer[buffered_blocknum];
pascal@24983 480 + }
pascal@24983 481 + /* Now, at least part of what we want will be in the buffer. */
pascal@23761 482 + length_in_buffer = clo->head.block_size - offset_in_buffer;
pascal@24983 483 + if(length_in_buffer > len)
pascal@24983 484 + {
pascal@24983 485 +/* DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
pascal@24983 486 + length_in_buffer,len); */
pascal@24983 487 + length_in_buffer = len;
pascal@24983 488 + }
pascal@24983 489 + memcpy(to_ptr, from_ptr + offset_in_buffer, length_in_buffer);
pascal@24983 490 + to_ptr += length_in_buffer;
pascal@24983 491 + len -= length_in_buffer;
pascal@24983 492 + offset += length_in_buffer;
pascal@24983 493 + } /* while inner loop */
pascal@23761 494 + kunmap(bvec.bv_page);
pascal@23761 495 + cond_resched();
pascal@24983 496 + } /* end rq_for_each_segment*/
pascal@24983 497 + return ((buffered_blocknum!=-1) || preloaded);
pascal@24983 498 +}
pascal@24983 499 +
pascal@24983 500 +/* Adopted from loop.c, a kernel thread to handle physical reads and
pascal@23761 501 + decompression. */
pascal@24983 502 +static int cloop_thread(void *data)
pascal@24983 503 +{
pascal@24983 504 + struct cloop_device *clo = data;
pascal@24983 505 + current->flags |= PF_NOFREEZE;
pascal@23761 506 + set_user_nice(current, 10);
pascal@24983 507 + while (!kthread_should_stop()||!list_empty(&clo->clo_list))
pascal@24983 508 + {
pascal@24983 509 + int err;
pascal@24983 510 + err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) ||
pascal@24983 511 + kthread_should_stop());
pascal@24983 512 + if(unlikely(err))
pascal@24983 513 + {
pascal@24983 514 + DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n");
pascal@24983 515 + continue;
pascal@24983 516 }
pascal@24983 517 - /* Copy from buffer */
pascal@24983 518 - from_ptr = clo->buffer[buffered_blocknum];
pascal@24983 519 - }
pascal@24983 520 - /* Now, at least part of what we want will be in the buffer. */
pascal@24983 521 - length_in_buffer = clo->head.block_size - offset_in_buffer;
pascal@24983 522 - if(length_in_buffer > len)
pascal@24983 523 - {
pascal@24983 524 - /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", length_in_buffer,len); */
pascal@24983 525 - length_in_buffer = len;
pascal@24983 526 - }
pascal@24983 527 - to_ptr = kmap_atomic(bvec.bv_page);
pascal@24983 528 - memcpy(to_ptr + to_offset, from_ptr + offset_in_buffer, length_in_buffer);
pascal@24983 529 - kunmap_atomic(to_ptr);
pascal@24983 530 - to_offset += length_in_buffer;
pascal@24983 531 - len -= length_in_buffer;
pascal@24983 532 - offset += length_in_buffer;
pascal@24983 533 - } /* while inner loop */
pascal@24983 534 - } /* rq_for_each_segment */
pascal@24983 535 - return ret;
pascal@24983 536 -}
pascal@24983 537 -
pascal@24983 538 -static blk_status_t cloop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
pascal@24983 539 -{
pascal@24983 540 -// struct request_queue *q = hctx->queue;
pascal@24983 541 -// struct cloop_device *clo = q->queuedata;
pascal@24983 542 - struct request *req = bd->rq;
pascal@24983 543 - struct cloop_device *clo = req->rq_disk->private_data;
pascal@24983 544 - blk_status_t ret = BLK_STS_OK;
pascal@24983 545 -
pascal@24983 546 -#if 1 /* Does it work when loading libraries? */
pascal@24983 547 - /* Since we have a buffered block list as well as data to read */
pascal@24983 548 - /* from disk (slow), and are (probably) never called from an */
pascal@24983 549 - /* interrupt, we use a simple mutex lock right here to ensure */
pascal@24983 550 - /* consistency. */
pascal@24983 551 - mutex_lock(&clo->clo_rq_mutex);
pascal@24983 552 - #else
pascal@24983 553 - spin_lock_irq(&clo->queue_lock);
pascal@24983 554 - #endif
pascal@24983 555 - blk_mq_start_request(req);
pascal@24983 556 - do {
pascal@24983 557 - ret = cloop_handle_request(clo, req);
pascal@24983 558 - } while(blk_update_request(req, ret, blk_rq_cur_bytes(req)));
pascal@24983 559 - blk_mq_end_request(req, ret);
pascal@24983 560 - #if 1 /* See above */
pascal@24983 561 - mutex_unlock(&clo->clo_rq_mutex);
pascal@24983 562 - #else
pascal@24983 563 - spin_unlock_irq(&clo->queue_lock);
pascal@24983 564 - #endif
pascal@24983 565 - return ret;
pascal@24983 566 + if(!list_empty(&clo->clo_list))
pascal@24983 567 + {
pascal@24983 568 + struct request *req;
pascal@24983 569 + unsigned long flags;
pascal@24983 570 + int uptodate;
pascal@24983 571 + spin_lock_irq(&clo->queue_lock);
pascal@24983 572 + req = list_entry(clo->clo_list.next, struct request, queuelist);
pascal@24983 573 + list_del_init(&req->queuelist);
pascal@24983 574 + spin_unlock_irq(&clo->queue_lock);
pascal@24983 575 + uptodate = cloop_handle_request(clo, req);
pascal@24983 576 + spin_lock_irqsave(&clo->queue_lock, flags);
pascal@24983 577 + __blk_end_request_all(req, uptodate ? 0 : -EIO);
pascal@24983 578 + spin_unlock_irqrestore(&clo->queue_lock, flags);
pascal@24983 579 + }
pascal@24983 580 + }
pascal@24983 581 + DEBUGP(KERN_ERR "cloop_thread exited.\n");
pascal@24983 582 + return 0;
pascal@24983 583 +}
pascal@24983 584 +
pascal@24983 585 +/* This is called by the kernel block queue management every now and then,
pascal@24983 586 + * with successive read requests qeued and sorted in a (hopefully)
pascal@24983 587 + * "most efficient way". spin_lock_irq() is being held by the kernel. */
pascal@24983 588 +static void cloop_do_request(struct request_queue *q)
pascal@24983 589 +{
pascal@24983 590 + struct request *req;
pascal@24983 591 + while((req = blk_fetch_request(q)) != NULL)
pascal@24983 592 + {
pascal@24983 593 + struct cloop_device *clo;
pascal@24983 594 + int rw;
pascal@24983 595 + /* quick sanity checks */
pascal@24983 596 + /* blk_fs_request() was removed in 2.6.36 */
pascal@23761 597 + if (unlikely(req == NULL
pascal@23761 598 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
pascal@23761 599 + || (req->cmd_type != REQ_TYPE_FS)
pascal@23761 600 +#endif
pascal@23761 601 + ))
pascal@24983 602 + goto error_continue;
pascal@24983 603 + rw = rq_data_dir(req);
pascal@23761 604 + if (unlikely(rw != READ
pascal@23761 605 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
pascal@23761 606 + && rw != READA
pascal@23761 607 +#endif
pascal@23761 608 + ))
pascal@24983 609 + {
pascal@24983 610 + DEBUGP("cloop_do_request: bad command\n");
pascal@24983 611 + goto error_continue;
pascal@24983 612 + }
pascal@24983 613 + clo = req->rq_disk->private_data;
pascal@24983 614 + if (unlikely(!clo->backing_file && !clo->suspended))
pascal@24983 615 + {
pascal@24983 616 + DEBUGP("cloop_do_request: not connected to a file\n");
pascal@24983 617 + goto error_continue;
pascal@24983 618 + }
pascal@24983 619 + list_add_tail(&req->queuelist, &clo->clo_list); /* Add to working list for thread */
pascal@24983 620 + wake_up(&clo->clo_event); /* Wake up cloop_thread */
pascal@24983 621 + continue; /* next request */
pascal@24983 622 + error_continue:
pascal@24983 623 + DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
pascal@23761 624 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
pascal@24983 625 + req->errors++;
pascal@23761 626 +#else
pascal@23761 627 + req->error_count++;
pascal@23761 628 +#endif
pascal@24983 629 + __blk_end_request_all(req, -EIO);
pascal@24983 630 + }
pascal@23761 631 }
pascal@23761 632
pascal@24983 633 /* Read header, flags and offsets from already opened file */
pascal@24983 634 @@ -508,7 +540,7 @@
pascal@23761 635 char *bbuf=NULL;
pascal@24983 636 unsigned int bbuf_size = 0;
pascal@24983 637 const unsigned int header_size = sizeof(struct cloop_head);
pascal@24983 638 - unsigned int i, offsets_read=0, total_offsets=0;
pascal@23761 639 + unsigned int i, total_offsets=0;
pascal@24983 640 loff_t fs_read_position = 0, header_pos[2];
pascal@24983 641 int isblkdev, bytes_read, error = 0;
pascal@24983 642 if (clo->suspended) return error;
pascal@24983 643 @@ -581,29 +613,19 @@
pascal@24983 644 goto error_release;
pascal@24983 645 }
pascal@24983 646 memcpy(&clo->head, bbuf, header_size);
pascal@24983 647 - if (strncmp(bbuf+CLOOP4_SIGNATURE_OFFSET, CLOOP4_SIGNATURE, CLOOP4_SIGNATURE_SIZE)==0)
pascal@24983 648 + if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
pascal@24983 649 {
pascal@24983 650 - clo->file_format=4;
pascal@24983 651 + clo->file_format++;
pascal@24983 652 clo->head.block_size=ntohl(clo->head.block_size);
pascal@24983 653 clo->head.num_blocks=ntohl(clo->head.num_blocks);
pascal@24983 654 clo->header_first = (i==0) ? 1 : 0;
pascal@24983 655 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
pascal@24983 656 - break;
pascal@24983 657 - }
pascal@24983 658 - else if (strncmp(bbuf+CLOOP2_SIGNATURE_OFFSET, CLOOP2_SIGNATURE, CLOOP2_SIGNATURE_SIZE)==0)
pascal@24983 659 - {
pascal@24983 660 - clo->file_format=2;
pascal@24983 661 - clo->head.block_size=ntohl(clo->head.block_size);
pascal@24983 662 - clo->head.num_blocks=ntohl(clo->head.num_blocks);
pascal@24983 663 - clo->header_first = (i==0) ? 1 : 0;
pascal@24983 664 - printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
pascal@24983 665 + printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
pascal@24983 666 break;
pascal@24983 667 }
pascal@24983 668 }
pascal@24983 669 if (clo->file_format == 0)
pascal@23761 670 {
pascal@24983 671 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
pascal@24983 672 - "please use an older version of %s for this file.\n",
pascal@24983 673 + printk(KERN_ERR "%s: Cannot detect %s format.\n",
pascal@24983 674 cloop_name, cloop_name);
pascal@24983 675 error=-EBADF; goto error_release;
pascal@24983 676 }
pascal@24983 677 @@ -613,67 +635,133 @@
pascal@24983 678 cloop_name, clo->head.block_size);
pascal@23761 679 error=-EBADF; goto error_release;
pascal@23761 680 }
pascal@24983 681 - total_offsets=clo->head.num_blocks+1;
pascal@24983 682 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
pascal@24983 683 + total_offsets=clo->head.num_blocks;
pascal@24983 684 + if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
pascal@24983 685 total_offsets > inode->i_size))
pascal@23761 686 {
pascal@24983 687 printk(KERN_ERR "%s: file %s too small for %u blocks\n",
pascal@24983 688 cloop_name, clo->underlying_filename, clo->head.num_blocks);
pascal@23761 689 error=-EBADF; goto error_release;
pascal@23761 690 }
pascal@24983 691 - clo->block_ptrs = cloop_malloc(sizeof(cloop_block_ptr) * total_offsets);
pascal@24983 692 - if (!clo->block_ptrs)
pascal@23761 693 + /* Allocate Memory for decompressors */
pascal@23761 694 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
pascal@23761 695 + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
pascal@23761 696 + if(!clo->zstream.workspace)
pascal@24983 697 {
pascal@24983 698 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
pascal@23761 699 + printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
pascal@23761 700 + cloop_name, zlib_inflate_workspacesize());
pascal@23761 701 error=-ENOMEM; goto error_release;
pascal@23761 702 }
pascal@24983 703 - /* Read them offsets! */
pascal@24983 704 - if(clo->header_first)
pascal@23761 705 + zlib_inflateInit(&clo->zstream);
pascal@23761 706 +#endif
pascal@23761 707 +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
pascal@23761 708 +#if XZ_INTERNAL_CRC32
pascal@23761 709 + /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
pascal@23761 710 + xz_crc32_init(void);
pascal@23761 711 +#endif
pascal@23761 712 + clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
pascal@23761 713 +#endif
pascal@23761 714 + if (total_offsets + 1 == 0) /* Version 3 */
pascal@23761 715 {
pascal@24983 716 - fs_read_position = sizeof(struct cloop_head);
pascal@23761 717 + struct cloop_tail tail;
pascal@23761 718 + if (isblkdev)
pascal@24983 719 + {
pascal@23761 720 + /* No end of file: can't find index */
pascal@23761 721 + printk(KERN_ERR "%s: no V3 support for block device\n",
pascal@23761 722 + cloop_name);
pascal@23761 723 + error=-EBADF; goto error_release;
pascal@24983 724 + }
pascal@23761 725 + bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
pascal@23761 726 + inode->i_size - sizeof(struct cloop_tail),
pascal@23761 727 + sizeof(struct cloop_tail));
pascal@23761 728 + if (bytes_read == sizeof(struct cloop_tail))
pascal@24983 729 + {
pascal@23761 730 + unsigned long len, zlen;
pascal@23761 731 + int ret;
pascal@23761 732 + void *zbuf;
pascal@23761 733 + clo->head.num_blocks = ntohl(tail.num_blocks);
pascal@23761 734 + total_offsets = clo->head.num_blocks;
pascal@23761 735 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
pascal@23761 736 + zlen = ntohl(tail.table_size);
pascal@23761 737 + zbuf = cloop_malloc(zlen);
pascal@23761 738 + if (!clo->block_ptrs || !zbuf)
pascal@24983 739 + {
pascal@23761 740 + printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
pascal@23761 741 + error=-ENOMEM; goto error_release;
pascal@24983 742 + }
pascal@23761 743 + bytes_read = cloop_read_from_file(clo, file, zbuf,
pascal@23761 744 + inode->i_size - zlen - sizeof(struct cloop_tail),
pascal@23761 745 + zlen);
pascal@23761 746 + if (bytes_read != zlen)
pascal@24983 747 + {
pascal@23761 748 + printk(KERN_ERR "%s: can't read index\n", cloop_name);
pascal@24983 749 + error=-EBADF; goto error_release;
pascal@24983 750 + }
pascal@23761 751 + len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
pascal@24983 752 + ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
pascal@23761 753 + cloop_free(zbuf, zlen);
pascal@23761 754 + if (ret != 0)
pascal@24983 755 + {
pascal@24983 756 + printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
pascal@24983 757 + cloop_name, ret);
pascal@24983 758 + error=-EBADF; goto error_release;
pascal@24983 759 + }
pascal@24983 760 + }
pascal@23761 761 + else
pascal@23761 762 + {
pascal@23761 763 + printk(KERN_ERR "%s: can't find index\n", cloop_name);
pascal@23761 764 + error=-ENOMEM; goto error_release;
pascal@23761 765 + }
pascal@24983 766 }
pascal@24983 767 else
pascal@24983 768 {
pascal@24983 769 - fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_offsets * sizeof(loff_t);
pascal@24983 770 - }
pascal@24983 771 - for(offsets_read=0;offsets_read<total_offsets;)
pascal@24983 772 - {
pascal@24983 773 - size_t bytes_readable;
pascal@24983 774 - unsigned int num_readable, offset = 0;
pascal@24983 775 - bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
pascal@24983 776 - if(bytes_readable <= 0) break; /* Done */
pascal@24983 777 - bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
pascal@24983 778 - if(bytes_read != bytes_readable)
pascal@23761 779 + unsigned int n, total_bytes;
pascal@23761 780 + clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
pascal@23761 781 + if (!clo->block_ptrs)
pascal@23761 782 + {
pascal@23761 783 + printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
pascal@23761 784 + error=-ENOMEM; goto error_release;
pascal@23761 785 + }
pascal@23761 786 + /* Read them offsets! */
pascal@23761 787 + if(clo->header_first)
pascal@24983 788 {
pascal@24983 789 - printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
pascal@24983 790 - cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
pascal@24983 791 - error=-EBADF;
pascal@24983 792 - goto error_release;
pascal@23761 793 + total_bytes = total_offsets * sizeof(struct block_info);
pascal@23761 794 + fs_read_position = sizeof(struct cloop_head);
pascal@24983 795 }
pascal@24983 796 - /* remember where to read the next blk from file */
pascal@24983 797 - fs_read_position += bytes_read;
pascal@24983 798 - /* calculate how many offsets can be taken from current bbuf */
pascal@24983 799 - num_readable = MIN(total_offsets - offsets_read,
pascal@24983 800 - bytes_read / sizeof(loff_t));
pascal@24983 801 - DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1);
pascal@24983 802 - for (i=0,offset=0; i<num_readable; i++)
pascal@23761 803 + else
pascal@17214 804 {
pascal@24983 805 - loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) );
pascal@24983 806 - if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp);
pascal@24983 807 - if(offsets_read > 0)
pascal@23761 808 + total_bytes = total_offsets * sizeof(loff_t);
pascal@23761 809 + fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
pascal@23761 810 + }
pascal@23761 811 + for(n=0;n<total_bytes;)
pascal@23761 812 + {
pascal@23761 813 + size_t bytes_readable;
pascal@23761 814 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
pascal@23761 815 + if(bytes_readable <= 0) break; /* Done */
pascal@23761 816 + bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
pascal@23761 817 + if(bytes_read != bytes_readable)
pascal@24983 818 {
pascal@24983 819 - loff_t d = CLOOP_BLOCK_OFFSET(tmp) - CLOOP_BLOCK_OFFSET(clo->block_ptrs[offsets_read-1]);
pascal@24983 820 - if(d > clo->largest_block) clo->largest_block = d;
pascal@23761 821 + printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
pascal@23761 822 + cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
pascal@23761 823 + error=-EBADF;
pascal@23761 824 + goto error_release;
pascal@24983 825 }
pascal@24983 826 - clo->block_ptrs[offsets_read++] = tmp;
pascal@24983 827 - offset += sizeof(loff_t);
pascal@23761 828 + memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
pascal@23761 829 + /* remember where to read the next blk from file */
pascal@23761 830 + fs_read_position += bytes_read;
pascal@23761 831 + n += bytes_read;
pascal@17214 832 }
pascal@17214 833 }
pascal@24983 834 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
pascal@24983 835 - cloop_name, clo->underlying_filename, clo->head.num_blocks,
pascal@24983 836 - clo->head.block_size, clo->largest_block);
pascal@23761 837 {
pascal@23761 838 int i;
pascal@24983 839 + char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
pascal@23761 840 + clo->largest_block = 0;
pascal@23761 841 + for (i = 0; i < clo->head.num_blocks; i++)
pascal@23761 842 + if (clo->block_ptrs[i].size > clo->largest_block)
pascal@23761 843 + clo->largest_block = clo->block_ptrs[i].size;
pascal@23761 844 + printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
pascal@23761 845 + cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
pascal@23761 846 + clo->head.block_size, clo->largest_block);
pascal@24983 847 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
pascal@24983 848 (buffers / clo->head.block_size) : 1;
pascal@24983 849 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
pascal@24983 850 @@ -705,36 +793,14 @@
pascal@23761 851 cloop_name, clo->largest_block);
pascal@23761 852 error=-ENOMEM; goto error_release_free_buffer;
pascal@17214 853 }
pascal@24983 854 - /* Allocate Memory for decompressors */
pascal@24983 855 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
pascal@23761 856 - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
pascal@23761 857 - if(!clo->zstream.workspace)
pascal@23761 858 - {
pascal@23761 859 - printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
pascal@23761 860 - cloop_name, zlib_inflate_workspacesize());
pascal@23761 861 - error=-ENOMEM; goto error_release_free_all;
pascal@23761 862 - }
pascal@23761 863 - zlib_inflateInit(&clo->zstream);
pascal@24983 864 -#endif
pascal@24983 865 -#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
pascal@24983 866 -#if XZ_INTERNAL_CRC32
pascal@24983 867 - /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
pascal@24983 868 - xz_crc32_init(void);
pascal@24983 869 -#endif
pascal@24983 870 - clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
pascal@24983 871 -#endif
pascal@24983 872 - if(CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]) > clo->underlying_total_size)
pascal@24983 873 + set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
pascal@24983 874 + clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
pascal@24983 875 + if(IS_ERR(clo->clo_thread))
pascal@24983 876 {
pascal@24983 877 - printk(KERN_ERR "%s: final offset wrong (%llu > %llu)\n",
pascal@17214 878 - cloop_name,
pascal@24983 879 - CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]),
pascal@24983 880 - clo->underlying_total_size);
pascal@24983 881 -#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
pascal@17214 882 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
pascal@24983 883 -#endif
pascal@24983 884 + error = PTR_ERR(clo->clo_thread);
pascal@24983 885 + clo->clo_thread=NULL;
pascal@24983 886 goto error_release_free_all;
pascal@23761 887 }
pascal@24983 888 - set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
pascal@23761 889 if(preload > 0)
pascal@23761 890 {
pascal@24983 891 clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
pascal@24983 892 @@ -780,6 +846,7 @@
pascal@24983 893 clo->preload_array_size = clo->preload_size = 0;
pascal@24983 894 }
pascal@24983 895 }
pascal@24983 896 + wake_up_process(clo->clo_thread);
pascal@24983 897 /* Uncheck */
pascal@24983 898 return error;
pascal@24983 899 error_release_free_all:
pascal@24983 900 @@ -794,9 +861,13 @@
pascal@17214 901 }
pascal@24983 902 if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
pascal@17214 903 error_release_free:
pascal@24983 904 - cloop_free(clo->block_ptrs, sizeof(cloop_block_ptr) * total_offsets);
pascal@23761 905 + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
pascal@23762 906 clo->block_ptrs=NULL;
pascal@23762 907 error_release:
pascal@23762 908 +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
pascal@23762 909 + zlib_inflateEnd(&clo->zstream);
pascal@23762 910 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
pascal@23762 911 +#endif
pascal@23762 912 if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
pascal@23762 913 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
pascal@23762 914 clo->backing_file=NULL;
pascal@24983 915 @@ -829,6 +900,7 @@
pascal@24983 916 if(clo->refcnt > 1) /* we needed one fd for the ioctl */
pascal@24983 917 return -EBUSY;
pascal@24983 918 if(filp==NULL) return -EINVAL;
pascal@24983 919 + if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
pascal@24983 920 if(filp!=initial_file)
pascal@24983 921 fput(filp);
pascal@24983 922 else
pascal@24983 923 @@ -839,7 +911,7 @@
pascal@24983 924 clo->backing_file = NULL;
pascal@24983 925 clo->backing_inode = NULL;
pascal@24983 926 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
pascal@24983 927 - if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks+1); clo->block_ptrs = NULL; }
pascal@24983 928 + if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
pascal@24983 929 if(clo->preload_cache)
pascal@24983 930 {
pascal@24983 931 int i;
pascal@24983 932 @@ -1054,15 +1126,15 @@
pascal@24983 933 case LOOP_CLR_FD: /* Change arg */
pascal@24983 934 case LOOP_GET_STATUS64: /* Change arg */
pascal@24983 935 case LOOP_SET_STATUS64: /* Change arg */
pascal@24983 936 - return cloop_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
pascal@24983 937 + arg = (unsigned long) compat_ptr(arg);
pascal@24983 938 case LOOP_SET_STATUS: /* unchanged */
pascal@24983 939 case LOOP_GET_STATUS: /* unchanged */
pascal@24983 940 case LOOP_SET_FD: /* unchanged */
pascal@24983 941 case LOOP_CHANGE_FD: /* unchanged */
pascal@24983 942 - return cloop_ioctl(bdev, mode, cmd, arg);
pascal@24983 943 - default:
pascal@24983 944 - return -ENOIOCTLCMD;
pascal@24983 945 + return cloop_ioctl(bdev, mode, cmd, arg);
pascal@24983 946 + break;
pascal@24983 947 }
pascal@24983 948 + return -ENOIOCTLCMD;
pascal@24983 949 }
pascal@24983 950 #endif
pascal@24983 951
pascal@24983 952 @@ -1093,7 +1165,7 @@
pascal@24983 953 cloop_dev[cloop_num]->refcnt-=1;
pascal@24983 954 }
pascal@24983 955
pascal@24983 956 -static const struct block_device_operations clo_fops =
pascal@24983 957 +static struct block_device_operations clo_fops =
pascal@24983 958 {
pascal@24983 959 owner: THIS_MODULE,
pascal@24983 960 open: cloop_open,
pascal@24983 961 @@ -1105,12 +1177,6 @@
pascal@24983 962 /* locked_ioctl ceased to exist in 2.6.36 */
pascal@24983 963 };
pascal@24983 964
pascal@24983 965 -static const struct blk_mq_ops cloop_mq_ops = {
pascal@24983 966 - .queue_rq = cloop_queue_rq,
pascal@24983 967 -/* .init_request = cloop_init_request, */
pascal@24983 968 -/* .complete = cloop_complete_rq, */
pascal@24983 969 -};
pascal@24983 970 -
pascal@24983 971 static int cloop_register_blkdev(int major_nr)
pascal@24983 972 {
pascal@24983 973 return register_blkdev(major_nr, cloop_name);
pascal@24983 974 @@ -1124,37 +1190,33 @@
pascal@24983 975
pascal@24983 976 static int cloop_alloc(int cloop_num)
pascal@24983 977 {
pascal@24983 978 - struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));
pascal@24983 979 + struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));;
pascal@24983 980 if(clo == NULL) goto error_out;
pascal@24983 981 cloop_dev[cloop_num] = clo;
pascal@24983 982 memset(clo, 0, sizeof(struct cloop_device));
pascal@24983 983 clo->clo_number = cloop_num;
pascal@24983 984 - clo->tag_set.ops = &cloop_mq_ops;
pascal@24983 985 - clo->tag_set.nr_hw_queues = 1;
pascal@24983 986 - clo->tag_set.queue_depth = 128;
pascal@24983 987 - clo->tag_set.numa_node = NUMA_NO_NODE;
pascal@24983 988 - clo->tag_set.cmd_size = 0; /* No extra data needed */
pascal@24983 989 - /* BLK_MQ_F_BLOCKING is extremely important if we want to call blocking functions like vfs_read */
pascal@24983 990 - clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
pascal@24983 991 - clo->tag_set.driver_data = clo;
pascal@24983 992 - if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo;
pascal@24983 993 - clo->clo_queue = blk_mq_init_queue(&clo->tag_set);
pascal@24983 994 - if(IS_ERR(clo->clo_queue))
pascal@24983 995 + clo->clo_thread = NULL;
pascal@24983 996 + init_waitqueue_head(&clo->clo_event);
pascal@24983 997 + spin_lock_init(&clo->queue_lock);
pascal@24983 998 + mutex_init(&clo->clo_ctl_mutex);
pascal@24983 999 + INIT_LIST_HEAD(&clo->clo_list);
pascal@24983 1000 + clo->clo_queue = blk_init_queue(cloop_do_request, &clo->queue_lock);
pascal@24983 1001 + if(!clo->clo_queue)
pascal@24983 1002 {
pascal@24983 1003 printk(KERN_ERR "%s: Unable to alloc queue[%d]\n", cloop_name, cloop_num);
pascal@24983 1004 - goto error_out_free_tags;
pascal@24983 1005 + goto error_out;
pascal@24983 1006 }
pascal@24983 1007 clo->clo_queue->queuedata = clo;
pascal@24983 1008 - blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS);
pascal@24983 1009 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
pascal@24983 1010 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
pascal@24983 1011 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
pascal@24983 1012 +#endif
pascal@24983 1013 clo->clo_disk = alloc_disk(1);
pascal@24983 1014 if(!clo->clo_disk)
pascal@24983 1015 {
pascal@24983 1016 printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num);
pascal@24983 1017 - goto error_out_free_queue;
pascal@24983 1018 + goto error_disk;
pascal@24983 1019 }
pascal@24983 1020 - spin_lock_init(&clo->queue_lock);
pascal@24983 1021 - mutex_init(&clo->clo_ctl_mutex);
pascal@24983 1022 - mutex_init(&clo->clo_rq_mutex);
pascal@24983 1023 clo->clo_disk->major = cloop_major;
pascal@24983 1024 clo->clo_disk->first_minor = cloop_num;
pascal@24983 1025 clo->clo_disk->fops = &clo_fops;
pascal@24983 1026 @@ -1163,12 +1225,8 @@
pascal@24983 1027 sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num);
pascal@24983 1028 add_disk(clo->clo_disk);
pascal@24983 1029 return 0;
pascal@24983 1030 -error_out_free_queue:
pascal@24983 1031 +error_disk:
pascal@24983 1032 blk_cleanup_queue(clo->clo_queue);
pascal@24983 1033 -error_out_free_tags:
pascal@24983 1034 - blk_mq_free_tag_set(&clo->tag_set);
pascal@24983 1035 -error_out_free_clo:
pascal@24983 1036 - cloop_free(clo, sizeof(struct cloop_device));
pascal@24983 1037 error_out:
pascal@24983 1038 return -ENOMEM;
pascal@24983 1039 }
pascal@24983 1040 @@ -1179,7 +1237,6 @@
pascal@24983 1041 if(clo == NULL) return;
pascal@24983 1042 del_gendisk(clo->clo_disk);
pascal@24983 1043 blk_cleanup_queue(clo->clo_queue);
pascal@24983 1044 - blk_mq_free_tag_set(&clo->tag_set);
pascal@24983 1045 put_disk(clo->clo_disk);
pascal@24983 1046 cloop_free(clo, sizeof(struct cloop_device));
pascal@24983 1047 cloop_dev[cloop_num] = NULL;
pascal@24983 1048 --- cloop_suspend.c
pascal@24983 1049 +++ cloop_suspend.c
pascal@24983 1050 @@ -14,6 +14,7 @@
pascal@24983 1051 #include <fcntl.h>
pascal@24983 1052 #include <unistd.h>
pascal@24983 1053 #include <stdio.h>
pascal@24983 1054 +#include <stdint.h>
pascal@24983 1055
pascal@24983 1056 /* We don't use the structure, so that define does not hurt */
pascal@24983 1057 #define dev_t int