wok-current rev 23761
Up linux-cloop (4.12)
author | Pascal Bellard <pascal.bellard@slitaz.org> |
---|---|
date | Mon May 04 09:05:12 2020 +0000 (2020-05-04) |
parents | d98ac734626d |
children | cea6e929d21e |
files | fusecloop/stuff/fusecloop.u linux-cloop/receipt linux-cloop/stuff/cloop.u linux64-cloop/receipt linux64-cloop/stuff/cloop.u |
line diff
1.1 --- a/fusecloop/stuff/fusecloop.u Mon May 04 07:56:50 2020 +0100 1.2 +++ b/fusecloop/stuff/fusecloop.u Mon May 04 09:05:12 2020 +0000 1.3 @@ -734,7 +734,7 @@ 1.4 + tail.table_size = ntohl(len); 1.5 + pos += len + sizeof(tail); 1.6 + n = pos & 511; 1.7 -+ if (n) write(STDOUT_FILENO, padding, 512 - n); 1.8 ++ // if (n) write(STDOUT_FILENO, padding, 512 - n); 1.9 + write(STDOUT_FILENO, compressed, len); 1.10 + write(STDOUT_FILENO, &tail, sizeof(tail)); 1.11 + exit(sig != 0);
2.1 --- a/linux-cloop/receipt Mon May 04 07:56:50 2020 +0100 2.2 +++ b/linux-cloop/receipt Mon May 04 09:05:12 2020 +0000 2.3 @@ -2,13 +2,15 @@ 2.4 2.5 PACKAGE="linux-cloop" 2.6 SOURCE="cloop" 2.7 -VERSION="2.639-2" 2.8 +_VERSION="2.639-2" 2.9 +#VERSION="$(sed '/+#define CLOOP_VERSION/!d;s|.* "\(.*\)"|\1|' stuff/cloop.u)" 2.10 +VERSION="4.12" 2.11 CATEGORY="base-system" 2.12 MAINTAINER="pascal.bellard@slitaz.org" 2.13 LICENSE="GPL2" 2.14 SHORT_DESC="The read-only compressed loop device kernel module." 2.15 WEB_SITE="http://knoppix.net/wiki/Cloop" 2.16 -TARBALL="${SOURCE}_${VERSION}.tar.gz" 2.17 +TARBALL="${SOURCE}_${_VERSION}.tar.gz" 2.18 WGET_URL="http://debian-knoppix.alioth.debian.org/packages/$SOURCE/$TARBALL" 2.19 2.20 DEPENDS="linux" 2.21 @@ -19,9 +21,7 @@ 2.22 2.23 compile_rules() 2.24 { 2.25 - patch -p0 < $stuff/cloop.u # 3.2.98 2.26 - sed -i -e 's|file->f_path.mnt, file->f_path.dentry|\&file->f_path|' \ 2.27 - -e 's|bvec->|bvec.|g;s|*bvec|bvec|' cloop.c 2.28 + patch -p0 < $stuff/cloop.u 2.29 make KERNEL_DIR="/usr/src/linux" cloop.ko && xz cloop.ko 2.30 } 2.31
3.1 --- a/linux-cloop/stuff/cloop.u Mon May 04 07:56:50 2020 +0100 3.2 +++ b/linux-cloop/stuff/cloop.u Mon May 04 09:05:12 2020 +0000 3.3 @@ -1,80 +1,221 @@ 3.4 --- cloop.h 3.5 +++ cloop.h 3.6 -@@ -20,6 +20,80 @@ 3.7 +@@ -1,15 +1,50 @@ 3.8 ++#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */ 3.9 ++#define CLOOP_SIGNATURE_SIZE 9 3.10 ++#define CLOOP_SIGNATURE_OFFSET 0x0 3.11 ++ 3.12 + #ifndef _COMPRESSED_LOOP_H 3.13 + #define _COMPRESSED_LOOP_H 3.14 + 3.15 +-#define CLOOP_HEADROOM 128 3.16 ++/*************************************************************************\ 3.17 ++* Starting with Format V4.0 (cloop version 4.x), cloop can now have two * 3.18 ++* alternative structures: * 3.19 ++* * 3.20 ++* 1. Header first: "robust" format, handles missing blocks well * 3.21 ++* 2. Footer (header last): "streaming" format, easier to create * 3.22 ++* * 3.23 ++* The cloop kernel module autodetects both formats, and can (currently) * 3.24 ++* still handle the V2.0 format as well. * 3.25 ++* * 3.26 ++* 1. Header first: * 3.27 ++* +---------------------------- FIXED SIZE ---------------------------+ * 3.28 ++* |Signature (128 bytes) | * 3.29 ++* |block_size (32bit number, network order) | * 3.30 ++* |num_blocks (32bit number, network order) | * 3.31 ++* +--------------------------- VARIABLE SIZE -------------------------+ * 3.32 ++* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| * 3.33 ++* |compressed data blocks of variable size ... | * 3.34 ++* +-------------------------------------------------------------------+ * 3.35 ++* * 3.36 ++* 2. Footer (header last): * 3.37 ++* +--------------------------- VARIABLE SIZE -------------------------+ * 3.38 ++* |compressed data blocks of variable size ... | * 3.39 ++* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| * 3.40 ++* +---------------------------- FIXED SIZE ---------------------------+ * 3.41 ++* |Signature (128 bytes) | * 3.42 ++* |block_size (32bit number, network order) | * 3.43 ++* |num_blocks (32bit number, network order) | * 3.44 ++* +-------------------------------------------------------------------+ * 3.45 ++* * 3.46 ++* Offsets are always relative to beginning of file, in all formats. * 3.47 ++* The block index contains num_blocks+1 offsets, followed (1) or * 3.48 ++* preceded (2) by the compressed blocks. * 3.49 ++\*************************************************************************/ 3.50 + 3.51 +-/* The cloop header usually looks like this: */ 3.52 +-/* #!/bin/sh */ 3.53 +-/* #V2.00 Format */ 3.54 +-/* ...padding up to CLOOP_HEADROOM... */ 3.55 +-/* block_size (32bit number, network order) */ 3.56 +-/* num_blocks (32bit number, network order) */ 3.57 ++#include <linux/types.h> /* u_int32_t */ 3.58 ++ 3.59 ++#define CLOOP_HEADROOM 128 3.60 + 3.61 ++/* Header of fixed length, can be located at beginning or end of file */ 3.62 + struct cloop_head 3.63 + { 3.64 + char preamble[CLOOP_HEADROOM]; 3.65 +@@ -17,9 +52,163 @@ 3.66 + u_int32_t num_blocks; 3.67 + }; 3.68 + 3.69 ++/************************************************************************\ 3.70 ++* CLOOP4 flags for each compressed block * 3.71 ++* Value Meaning * 3.72 ++* 0 GZIP/7ZIP compression (compatible with V2.0 Format) * 3.73 ++* 1 no compression (incompressible data) * 3.74 ++* 2 xz compression (currently best space saver) * 3.75 ++* 3 lz4 compression * 3.76 ++* 4 lzo compression (fastest) * 3.77 ++\************************************************************************/ 3.78 ++ 3.79 ++typedef uint64_t cloop_block_ptr; 3.80 ++ 3.81 ++/* Get value of first 4 bits */ 3.82 ++#define CLOOP_BLOCK_FLAGS(x) ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60)) 3.83 ++/* Get value of last 60 bits */ 3.84 ++#define CLOOP_BLOCK_OFFSET(x) ((x) & 0x0fffffffffffffffLLU) 3.85 ++ 3.86 ++#define CLOOP_COMPRESSOR_ZLIB 0x0 3.87 ++#define CLOOP_COMPRESSOR_NONE 0x1 3.88 ++#define CLOOP_COMPRESSOR_XZ 0x2 3.89 ++#define CLOOP_COMPRESSOR_LZ4 0x3 3.90 ++#define CLOOP_COMPRESSOR_LZO1X 0x4 3.91 ++ 3.92 ++#define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X) 3.93 ++ 3.94 ++#define CLOOP_COMPRESSOR_LINK 0xF 3.95 ++ 3.96 ++ 3.97 /* data_index (num_blocks 64bit pointers, network order)... */ 3.98 /* compressed data (gzip block compressed format)... */ 3.99 3.100 +struct cloop_tail 3.101 +{ 3.102 -+ u_int32_t table_size; 3.103 -+ u_int32_t index_size; 3.104 ++ u_int32_t table_size; 3.105 ++ u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */ 3.106 ++#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF)) 3.107 ++#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4) 3.108 ++#define CLOOP3_TRUNCATED(x) ((unsigned int)((x) & 0x80) >> 7) 3.109 ++#define CLOOP3_LASTLEN(x) (unsigned int)((x) >> 8) 3.110 + u_int32_t num_blocks; 3.111 +}; 3.112 + 3.113 ++#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12) 3.114 ++ 3.115 +struct block_info 3.116 +{ 3.117 + loff_t offset; /* 64-bit offsets of compressed block */ 3.118 + u_int32_t size; /* 32-bit compressed block size */ 3.119 -+ u_int32_t optidx; /* 32-bit index number */ 3.120 ++ u_int32_t flags; /* 32-bit compression flags */ 3.121 +}; 3.122 + 3.123 -+static inline char *build_index(struct block_info *offsets, unsigned long n) 3.124 ++static inline char *build_index(struct block_info *offsets, unsigned long n, 3.125 ++ unsigned long block_size, unsigned global_flags) 3.126 +{ 3.127 + u_int32_t *ofs32 = (u_int32_t *) offsets; 3.128 + loff_t *ofs64 = (loff_t *) offsets; 3.129 -+ 3.130 ++ 3.131 ++ /* v3 64bits bug: v1 assumed */ 3.132 ++ unsigned long v3_64 = (n+1)/2; 3.133 ++ loff_t prev; 3.134 ++ 3.135 ++ if (ofs32[0] != 0 && ofs32[1] == 0) { 3.136 ++ for (prev=__le64_to_cpu(ofs64[v3_64]); 3.137 ++ v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev; 3.138 ++ prev=__le64_to_cpu(ofs64[v3_64])); 3.139 ++ } 3.140 ++ 3.141 + if (ofs32[0] == 0) { 3.142 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */ 3.143 + while (n--) { 3.144 + offsets[n].offset = __be64_to_cpu(offsets[n].offset); 3.145 + offsets[n].size = ntohl(offsets[n].size); 3.146 ++ offsets[n].flags = 0; 3.147 + } 3.148 + return (char *) "128BE accelerated knoppix 1.0"; 3.149 + } 3.150 -+ else { /* V2.0 */ 3.151 -+ loff_t last = __be64_to_cpu(ofs64[n - 1]); 3.152 -+ while (n--) { 3.153 ++ else { /* V2.0/V4.0 */ 3.154 ++ loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n])); 3.155 ++ u_int32_t flags; 3.156 ++ static char v4[11]; 3.157 ++ unsigned long i = n; 3.158 ++ 3.159 ++ for (flags = 0; n-- ;) { 3.160 ++ loff_t data = __be64_to_cpu(ofs64[n]); 3.161 ++ 3.162 + offsets[n].size = last - 3.163 -+ (offsets[n].offset = __be64_to_cpu(ofs64[n])); 3.164 ++ (offsets[n].offset = CLOOP_BLOCK_OFFSET(data)); 3.165 + last = offsets[n].offset; 3.166 ++ offsets[n].flags = CLOOP_BLOCK_FLAGS(data); 3.167 ++ flags |= 1 << offsets[n].flags; 3.168 + } 3.169 -+ return (char *) "64BE v2.0"; 3.170 ++ if (flags < 2) return (char *) "64BE v2.0"; 3.171 ++ while (i--) { 3.172 ++ if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) { 3.173 ++ offsets[i] = offsets[offsets[i].offset]; 3.174 ++ } 3.175 ++ } 3.176 ++ strcpy(v4, (char *) "64BE v4.0a"); 3.177 ++ v4[10] = 'a' + ((flags-1) & 0xF); // compressors used 3.178 ++ if (flags > 0x10) { // with links ? 3.179 ++ v4[10] += 'A' - 'a'; 3.180 ++ } 3.181 ++ return v4; 3.182 + } 3.183 + } 3.184 -+ else if (ofs32[1] == 0) { /* V1.0 */ 3.185 -+ loff_t last = __le64_to_cpu(ofs64[n - 1]); 3.186 ++ else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */ 3.187 ++ loff_t last = __le64_to_cpu(ofs64[n]); 3.188 + while (n--) { 3.189 + offsets[n].size = last - 3.190 + (offsets[n].offset = __le64_to_cpu(ofs64[n])); 3.191 + last = offsets[n].offset; 3.192 ++ offsets[n].flags = 0; 3.193 + } 3.194 + return (char *) "64LE v1.0"; 3.195 + } 3.196 -+ else if (ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */ 3.197 -+ loff_t last = ntohl(ofs32[n - 1]); 3.198 -+ while (n--) { 3.199 -+ offsets[n].size = last - 3.200 -+ (offsets[n].offset = ntohl(ofs32[n])); 3.201 -+ last = offsets[n].offset; 3.202 -+ } 3.203 -+ return (char *) "32BE v0.68"; 3.204 -+ } 3.205 -+ else { /* V3.0 */ 3.206 ++ else { /* V3.0 or V0.68 */ 3.207 + unsigned long i; 3.208 + loff_t j; 3.209 ++ static char v3[11]; 3.210 + 3.211 ++ for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++); 3.212 ++ if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */ 3.213 ++ loff_t last = ntohl(ofs32[n]); 3.214 ++ while (n--) { 3.215 ++ offsets[n].size = last - 3.216 ++ (offsets[n].offset = ntohl(ofs32[n])); 3.217 ++ last = offsets[n].offset; 3.218 ++ offsets[n].flags = 0; 3.219 ++ } 3.220 ++ return (char *) "32BE v0.68"; 3.221 ++ } 3.222 ++ 3.223 ++ v3_64 = (ofs32[1] == 0); 3.224 + for (i = n; i-- != 0; ) 3.225 -+ offsets[i].size = ntohl(ofs32[i]); 3.226 ++ offsets[i].size = ntohl(ofs32[i << v3_64]); 3.227 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) { 3.228 + offsets[i].offset = j; 3.229 ++ offsets[i].flags = global_flags; 3.230 ++ if (offsets[i].size == 0xFFFFFFFF) { 3.231 ++ offsets[i].flags = CLOOP_COMPRESSOR_NONE; 3.232 ++ offsets[i].size = block_size; 3.233 ++ } 3.234 ++ if ((offsets[i].size & 0x80000000) == 0) { 3.235 ++ j += offsets[i].size; 3.236 ++ } 3.237 ++ } 3.238 ++ for (i = 0; i < n; i++) { 3.239 + if (offsets[i].size & 0x80000000) { 3.240 -+ unsigned long k = offsets[i].size & 0x7FFFFFFF; 3.241 -+ offsets[i].offset = offsets[k].offset; 3.242 -+ offsets[i].size = offsets[k].size; 3.243 ++ offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF]; 3.244 + } 3.245 -+ else j += offsets[i].size; 3.246 + } 3.247 -+ return (char *) "32BE v3.0"; 3.248 ++ strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a"); 3.249 ++ v3[10] += global_flags; 3.250 ++ return v3; 3.251 + } 3.252 +} 3.253 + 3.254 @@ -83,187 +224,884 @@ 3.255 3.256 --- cloop.c 3.257 +++ cloop.c 3.258 -@@ -5,11 +5,18 @@ 3.259 - * A cloop file looks like this: 3.260 - * [32-bit uncompressed block size: network order] 3.261 - * [32-bit number of blocks (n_blocks): network order] 3.262 +@@ -1,26 +1,23 @@ 3.263 +-/* 3.264 +- * compressed_loop.c: Read-only compressed loop blockdevice 3.265 +- * hacked up by Rusty in 1999, extended and maintained by Klaus Knopper 3.266 +- * 3.267 +- * A cloop file looks like this: 3.268 +- * [32-bit uncompressed block size: network order] 3.269 +- * [32-bit number of blocks (n_blocks): network order] 3.270 - * [64-bit file offsets of start of blocks: network order] 3.271 -+ * [for version < 3] 3.272 -+ * [32-bit, 64-bit or 128-bit file offsets of start of blocks] 3.273 - * ... 3.274 - * (n_blocks + 1). 3.275 - * n_blocks consisting of: 3.276 - * [compressed block] 3.277 -+ * ... 3.278 -+ * [for version >= 3] 3.279 -+ * [compressed list of 32-bit block sizes] 3.280 -+ * [32-bit compressed index size: network order] 3.281 -+ * [32-bit index size = 4: network order] 3.282 -+ * [32-bit number of blocks (n_blocks): network order] 3.283 - * 3.284 - * Every version greatly inspired by code seen in loop.c 3.285 - * by Theodore Ts'o, 3/29/93. 3.286 -@@ -115,7 +122,7 @@ 3.287 +- * ... 3.288 +- * (n_blocks + 1). 3.289 +- * n_blocks consisting of: 3.290 +- * [compressed block] 3.291 +- * 3.292 +- * Every version greatly inspired by code seen in loop.c 3.293 +- * by Theodore Ts'o, 3/29/93. 3.294 +- * 3.295 +- * Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper. 3.296 +- * Redistribution of this file is permitted under the GNU Public License. 3.297 +- * 3.298 +- */ 3.299 ++/************************************************************************\ 3.300 ++* cloop.c: Read-only compressed loop blockdevice * 3.301 ++* hacked up by Rusty in 1999, extended and maintained by Klaus Knopper * 3.302 ++* * 3.303 ++* For all supported cloop file formats, please check the file "cloop.h" * 3.304 ++* New in Version 4: * 3.305 ++* - Header can be first or last in cloop file, * 3.306 ++* - Different compression algorithms supported (compression type * 3.307 ++* encoded in first 4 bytes of block offset address) * 3.308 ++* * 3.309 ++* Every version greatly inspired by code seen in loop.c * 3.310 ++* by Theodore Ts'o, 3/29/93. * 3.311 ++* * 3.312 ++* Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper. * 3.313 ++* Redistribution of this file is permitted under the GNU Public License * 3.314 ++* V2. * 3.315 ++\************************************************************************/ 3.316 + 3.317 + #define CLOOP_NAME "cloop" 3.318 +-#define CLOOP_VERSION "2.639" 3.319 ++#define CLOOP_VERSION "4.12" 3.320 + #define CLOOP_MAX 8 3.321 + 3.322 + #ifndef KBUILD_MODNAME 3.323 +@@ -47,8 +44,27 @@ 3.324 + #include <asm/div64.h> /* do_div() for 64bit division */ 3.325 + #include <asm/uaccess.h> 3.326 + #include <asm/byteorder.h> 3.327 +-/* Use zlib_inflate from lib/zlib_inflate */ 3.328 ++/* Check for ZLIB, LZO1X, LZ4 decompression algorithms in kernel. */ 3.329 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 3.330 + #include <linux/zutil.h> 3.331 ++#endif 3.332 ++#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE)) 3.333 ++#include <linux/lzo.h> 3.334 ++#endif 3.335 ++#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE)) 3.336 ++#include <linux/lz4.h> 3.337 ++#endif 3.338 ++#if (defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE)) 3.339 ++#include <linux/decompress/unlzma.h> 3.340 ++#endif 3.341 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 3.342 ++#include <linux/xz.h> 3.343 ++#endif 3.344 ++ 3.345 ++#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) || defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE) || defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE) || defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE) || defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))) 3.346 ++#error "No decompression library selected in kernel config!" 3.347 ++#endif 3.348 ++ 3.349 + #include <linux/loop.h> 3.350 + #include <linux/kthread.h> 3.351 + #include <linux/compat.h> 3.352 +@@ -92,47 +108,64 @@ 3.353 + #define DEBUGP(format, x...) 3.354 + #endif 3.355 + 3.356 ++/* Default size of buffer to keep some decompressed blocks in memory to speed up access */ 3.357 ++#define BLOCK_BUFFER_MEM (16*65536) 3.358 ++ 3.359 + /* One file can be opened at module insertion time */ 3.360 + /* insmod cloop file=/path/to/file */ 3.361 + static char *file=NULL; 3.362 + static unsigned int preload=0; 3.363 + static unsigned int cloop_max=CLOOP_MAX; 3.364 ++static unsigned int buffers=BLOCK_BUFFER_MEM; 3.365 + module_param(file, charp, 0); 3.366 + module_param(preload, uint, 0); 3.367 + module_param(cloop_max, uint, 0); 3.368 + MODULE_PARM_DESC(file, "Initial cloop image file (full path) for /dev/cloop"); 3.369 + MODULE_PARM_DESC(preload, "Preload n blocks of cloop data into memory"); 3.370 + MODULE_PARM_DESC(cloop_max, "Maximum number of cloop devices (default 8)"); 3.371 ++MODULE_PARM_DESC(buffers, "Size of buffer to keep uncompressed blocks in memory in MiB (default 1)"); 3.372 + 3.373 + static struct file *initial_file=NULL; 3.374 + static int cloop_major=MAJOR_NR; 3.375 + 3.376 +-/* Number of buffered decompressed blocks */ 3.377 +-#define BUFFERED_BLOCKS 8 3.378 + struct cloop_device 3.379 + { 3.380 +- /* Copied straight from the file */ 3.381 ++ /* Header filled from the file */ 3.382 struct cloop_head head; 3.383 ++ int header_first; 3.384 ++ int file_format; 3.385 3.386 - /* An array of offsets of compressed blocks within the file */ 3.387 +- /* An array of offsets of compressed blocks within the file */ 3.388 - loff_t *offsets; 3.389 -+ struct block_info *offsets; 3.390 ++ /* An or'd sum of all flags of each compressed block (v3) */ 3.391 ++ u_int32_t allflags; 3.392 ++ 3.393 ++ /* An array of cloop_ptr flags/offset for compressed blocks within the file */ 3.394 ++ struct block_info *block_ptrs; 3.395 3.396 /* We buffer some uncompressed blocks for performance */ 3.397 - int buffered_blocknum[BUFFERED_BLOCKS]; 3.398 -@@ -256,11 +263,11 @@ 3.399 - return i; 3.400 - } 3.401 +- int buffered_blocknum[BUFFERED_BLOCKS]; 3.402 +- int current_bufnum; 3.403 +- void *buffer[BUFFERED_BLOCKS]; 3.404 +- void *compressed_buffer; 3.405 +- size_t preload_array_size; /* Size of pointer array in blocks */ 3.406 +- size_t preload_size; /* Number of successfully allocated blocks */ 3.407 +- char **preload_cache; /* Pointers to preloaded blocks */ 3.408 ++ size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */ 3.409 ++ int *buffered_blocknum; /* list of numbers of uncompressed blocks in buffer */ 3.410 ++ int current_bufnum; /* which block is current */ 3.411 ++ unsigned char **buffer; /* cache space for num_buffered_blocks uncompressed blocks */ 3.412 ++ void *compressed_buffer; /* space for the largest compressed block */ 3.413 ++ size_t preload_array_size; /* Size of pointer array in blocks */ 3.414 ++ size_t preload_size; /* Number of successfully allocated blocks */ 3.415 ++ char **preload_cache; /* Pointers to preloaded blocks */ 3.416 3.417 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 3.418 + z_stream zstream; 3.419 ++#endif 3.420 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 3.421 ++ struct xz_dec *xzdecoderstate; 3.422 ++ struct xz_buf xz_buffer; 3.423 ++#endif 3.424 + 3.425 + struct file *backing_file; /* associated file */ 3.426 + struct inode *backing_inode; /* for bmap */ 3.427 + 3.428 ++ unsigned char *underlying_filename; 3.429 + unsigned long largest_block; 3.430 + unsigned int underlying_blksize; 3.431 ++ loff_t underlying_total_size; 3.432 + int clo_number; 3.433 + int refcnt; 3.434 + struct block_device *bdev; 3.435 +@@ -147,7 +180,6 @@ 3.436 + struct request_queue *clo_queue; 3.437 + struct gendisk *clo_disk; 3.438 + int suspended; 3.439 +- char clo_file_name[LO_NAME_SIZE]; 3.440 + }; 3.441 + 3.442 + /* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers, 3.443 +@@ -156,52 +188,113 @@ 3.444 + static const char *cloop_name=CLOOP_NAME; 3.445 + static int cloop_count = 0; 3.446 + 3.447 +-#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))) /* Must be compiled into kernel. */ 3.448 +-#error "Invalid Kernel configuration. CONFIG_ZLIB_INFLATE support is needed for cloop." 3.449 +-#endif 3.450 +- 3.451 +-/* Use __get_free_pages instead of vmalloc, allows up to 32 pages, 3.452 +- * 2MB in one piece */ 3.453 + static void *cloop_malloc(size_t size) 3.454 + { 3.455 +- int order = get_order(size); 3.456 +- if(order <= KMALLOC_MAX_ORDER) 3.457 +- return (void *)kmalloc(size, GFP_KERNEL); 3.458 +- else if(order < MAX_ORDER) 3.459 +- return (void *)__get_free_pages(GFP_KERNEL, order); 3.460 ++ /* kmalloc will fail after the system is running for a while, */ 3.461 ++ /* when large orders can't return contiguous memory. */ 3.462 ++ /* Let's just use vmalloc for now. :-/ */ 3.463 ++ /* int order = get_order(size); */ 3.464 ++ /* if(order <= KMALLOC_MAX_ORDER) */ 3.465 ++ /* return (void *)kmalloc(size, GFP_KERNEL); */ 3.466 ++ /* else if(order < MAX_ORDER) */ 3.467 ++ /* return (void *)__get_free_pages(GFP_KERNEL, order); */ 3.468 + return (void *)vmalloc(size); 3.469 + } 3.470 + 3.471 + static void cloop_free(void *mem, size_t size) 3.472 + { 3.473 +- int order = get_order(size); 3.474 +- if(order <= KMALLOC_MAX_ORDER) 3.475 +- kfree(mem); 3.476 +- else if(order < MAX_ORDER) 3.477 +- free_pages((unsigned long)mem, order); 3.478 +- else vfree(mem); 3.479 ++ /* int order = get_order(size); */ 3.480 ++ /* if(order <= KMALLOC_MAX_ORDER) */ 3.481 ++ /* kfree(mem); */ 3.482 ++ /* else if(order < MAX_ORDER) */ 3.483 ++ /* free_pages((unsigned long)mem, order); */ 3.484 ++ /* else */ 3.485 ++ vfree(mem); 3.486 + } 3.487 + 3.488 +-static int uncompress(struct cloop_device *clo, 3.489 +- unsigned char *dest, unsigned long *destLen, 3.490 +- unsigned char *source, unsigned long sourceLen) 3.491 ++static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags) 3.492 + { 3.493 +- /* Most of this code can be found in fs/cramfs/uncompress.c */ 3.494 +- int err; 3.495 +- clo->zstream.next_in = source; 3.496 +- clo->zstream.avail_in = sourceLen; 3.497 +- clo->zstream.next_out = dest; 3.498 +- clo->zstream.avail_out = *destLen; 3.499 +- err = zlib_inflateReset(&clo->zstream); 3.500 +- if (err != Z_OK) 3.501 +- { 3.502 +- printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err); 3.503 +- zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream); 3.504 +- } 3.505 +- err = zlib_inflate(&clo->zstream, Z_FINISH); 3.506 +- *destLen = clo->zstream.total_out; 3.507 +- if (err != Z_STREAM_END) return err; 3.508 +- return Z_OK; 3.509 ++ int err = -1; 3.510 ++ switch(flags) 3.511 ++ { 3.512 ++ case CLOOP_COMPRESSOR_NONE: 3.513 ++ memcpy(dest, source, *destLen = sourceLen); 3.514 ++ err = Z_OK; 3.515 ++ break; 3.516 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 3.517 ++ case CLOOP_COMPRESSOR_ZLIB: 3.518 ++ clo->zstream.next_in = source; 3.519 ++ clo->zstream.avail_in = sourceLen; 3.520 ++ clo->zstream.next_out = dest; 3.521 ++ clo->zstream.avail_out = *destLen; 3.522 ++ err = zlib_inflateReset(&clo->zstream); 3.523 ++ if (err != Z_OK) 3.524 ++ { 3.525 ++ printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err); 3.526 ++ zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream); 3.527 ++ } 3.528 ++ err = zlib_inflate(&clo->zstream, Z_FINISH); 3.529 ++ *destLen = clo->zstream.total_out; 3.530 ++ if (err == Z_STREAM_END) err = 0; 3.531 ++ DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen); 3.532 ++ break; 3.533 ++#endif 3.534 ++#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE)) 3.535 ++ case CLOOP_COMPRESSOR_LZO1X: 3.536 ++ { 3.537 ++ size_t tmp = (size_t) clo->head.block_size; 3.538 ++ err = lzo1x_decompress_safe(source, sourceLen, 3.539 ++ dest, &tmp); 3.540 ++ if (err == LZO_E_OK) *destLen = (u_int32_t) tmp; 3.541 ++ } 3.542 ++ break; 3.543 ++#endif 3.544 ++#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE)) 3.545 ++ case CLOOP_COMPRESSOR_LZ4: 3.546 ++ { 3.547 ++ size_t outputSize = *destLen; 3.548 ++ /* We should adjust outputSize here, in case the last block is smaller than block_size */ 3.549 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */ 3.550 ++ err = lz4_decompress(source, (size_t *) &sourceLen, 3.551 ++ dest, outputSize); 3.552 ++#else 3.553 ++ err = LZ4_decompress_safe(source, 3.554 ++ dest, 3.555 ++ sourceLen, outputSize); 3.556 ++#endif 3.557 ++ if (err >= 0) 3.558 ++ { 3.559 ++ err = 0; 3.560 ++ *destLen = outputSize; 3.561 ++ } 3.562 ++ } 3.563 ++ break; 3.564 ++#endif 3.565 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 3.566 ++ case CLOOP_COMPRESSOR_XZ: 3.567 ++ clo->xz_buffer.in = source; 3.568 ++ clo->xz_buffer.in_pos = 0; 3.569 ++ clo->xz_buffer.in_size = sourceLen; 3.570 ++ clo->xz_buffer.out = dest; 3.571 ++ clo->xz_buffer.out_pos = 0; 3.572 ++ clo->xz_buffer.out_size = *destLen; 3.573 ++ xz_dec_reset(clo->xzdecoderstate); 3.574 ++ err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer); 3.575 ++ if (err == XZ_STREAM_END || err == XZ_OK) 3.576 ++ { 3.577 ++ err = 0; 3.578 ++ } 3.579 ++ else 3.580 ++ { 3.581 ++ printk(KERN_ERR "%s: xz_dec_run error %d\n", cloop_name, err); 3.582 ++ err = 1; 3.583 ++ } 3.584 ++ break; 3.585 ++#endif 3.586 ++ default: 3.587 ++ printk(KERN_ERR "%s: compression method is not supported!\n", cloop_name); 3.588 ++ } 3.589 ++ return err; 3.590 + } 3.591 + 3.592 + static ssize_t cloop_read_from_file(struct cloop_device *clo, struct file *f, char *buf, 3.593 +@@ -220,7 +313,7 @@ 3.594 + 3.595 + if(size_read <= 0) 3.596 + { 3.597 +- printk(KERN_ERR "%s: Read error %d at pos %Lu in file %s, " 3.598 ++ printk(KERN_ERR "%s: Read error %d at pos %llu in file %s, " 3.599 + "%d bytes lost.\n", cloop_name, (int)size_read, pos, 3.600 + file, (int)size); 3.601 + memset(buf + buf_len - size, 0, size); 3.602 +@@ -232,72 +325,84 @@ 3.603 + } 3.604 + 3.605 + /* This looks more complicated than it is */ 3.606 +-/* Returns number of block buffer to use for this request */ 3.607 ++/* Returns number of cache block buffer to use for this request */ 3.608 + static int cloop_load_buffer(struct cloop_device *clo, int blocknum) 3.609 + { 3.610 +- unsigned int buf_done = 0; 3.611 +- unsigned long buflen; 3.612 +- unsigned int buf_length; 3.613 ++ loff_t compressed_block_offset; 3.614 ++ long compressed_block_len; 3.615 ++ long uncompressed_block_len=0; 3.616 + int ret; 3.617 + int i; 3.618 +- if(blocknum > ntohl(clo->head.num_blocks) || blocknum < 0) 3.619 +- { 3.620 +- printk(KERN_WARNING "%s: Invalid block number %d requested.\n", 3.621 +- cloop_name, blocknum); 3.622 +- return -1; 3.623 +- } 3.624 ++ if(blocknum > clo->head.num_blocks || blocknum < 0) 3.625 ++ { 3.626 ++ printk(KERN_WARNING "%s: Invalid block number %d requested.\n", 3.627 ++ cloop_name, blocknum); 3.628 ++ return -1; 3.629 ++ } 3.630 + 3.631 + /* Quick return if the block we seek is already in one of the buffers. */ 3.632 + /* Return number of buffer */ 3.633 +- for(i=0; i<BUFFERED_BLOCKS; i++) 3.634 ++ for(i=0; i<clo->num_buffered_blocks; i++) 3.635 + if (blocknum == clo->buffered_blocknum[i]) 3.636 +- { 3.637 +- DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i); 3.638 +- return i; 3.639 +- } 3.640 +- 3.641 - buf_length = be64_to_cpu(clo->offsets[blocknum+1]) - be64_to_cpu(clo->offsets[blocknum]); 3.642 -+ buf_length = clo->offsets[blocknum].size; 3.643 +- 3.644 +-/* Load one compressed block from the file. */ 3.645 +- cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 3.646 +- be64_to_cpu(clo->offsets[blocknum]), buf_length); 3.647 ++ { 3.648 ++ DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i); 3.649 ++ return i; 3.650 ++ } 3.651 3.652 - /* Load one compressed block from the file. */ 3.653 - cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 3.654 -- be64_to_cpu(clo->offsets[blocknum]), buf_length); 3.655 -+ clo->offsets[blocknum].offset, buf_length); 3.656 +- buflen = ntohl(clo->head.block_size); 3.657 ++ compressed_block_offset = clo->block_ptrs[blocknum].offset; 3.658 ++ compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ; 3.659 3.660 - buflen = ntohl(clo->head.block_size); 3.661 +- /* Go to next position in the block ring buffer */ 3.662 +- clo->current_bufnum++; 3.663 +- if(clo->current_bufnum >= BUFFERED_BLOCKS) clo->current_bufnum = 0; 3.664 ++ /* Load one compressed block from the file. */ 3.665 ++ if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */ 3.666 ++ { 3.667 ++ size_t n = cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 3.668 ++ compressed_block_offset, compressed_block_len); 3.669 ++ if (n!= compressed_block_len) 3.670 ++ { 3.671 ++ printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n", 3.672 ++ cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename); 3.673 ++ /* return -1; */ 3.674 ++ } 3.675 ++ } else { 3.676 ++ printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n", 3.677 ++ cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename); 3.678 ++ return -1; 3.679 ++ } 3.680 ++ 3.681 ++ /* Go to next position in the cache block buffer (which is used as a cyclic buffer) */ 3.682 ++ if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0; 3.683 3.684 -@@ -275,9 +282,9 @@ 3.685 + /* Do the uncompression */ 3.686 +- ret = uncompress(clo, clo->buffer[clo->current_bufnum], &buflen, clo->compressed_buffer, 3.687 +- buf_length); 3.688 ++ uncompressed_block_len = clo->head.block_size; 3.689 ++ ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len, 3.690 ++ clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags); 3.691 + /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */ 3.692 if (ret != 0) 3.693 +- { 3.694 +- printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u " 3.695 +- "%Lu-%Lu\n", cloop_name, ret, blocknum, 3.696 +- ntohl(clo->head.block_size), buflen, buf_length, buf_done, 3.697 +- be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1])); 3.698 +- clo->buffered_blocknum[clo->current_bufnum] = -1; 3.699 +- return -1; 3.700 +- } 3.701 ++ { 3.702 ++ printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n", 3.703 ++ cloop_name, ret, blocknum, 3.704 ++ compressed_block_len, clo->block_ptrs[blocknum].offset, 3.705 ++ clo->block_ptrs[blocknum].flags); 3.706 ++ clo->buffered_blocknum[clo->current_bufnum] = -1; 3.707 ++ return -1; 3.708 ++ } 3.709 + clo->buffered_blocknum[clo->current_bufnum] = blocknum; 3.710 + return clo->current_bufnum; 3.711 + } 3.712 + 3.713 + /* This function does all the real work. */ 3.714 +-/* returns "uptodate" */ 3.715 ++/* returns "uptodate" */ 3.716 + static int cloop_handle_request(struct cloop_device *clo, struct request *req) 3.717 + { 3.718 + int buffered_blocknum = -1; 3.719 + int preloaded = 0; 3.720 + loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */ 3.721 +- struct bio_vec *bvec; 3.722 ++ struct bio_vec bvec; 3.723 + struct req_iterator iter; 3.724 + rq_for_each_segment(bvec, req, iter) 3.725 { 3.726 - printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u " 3.727 -- "%Lu-%Lu\n", cloop_name, ret, blocknum, 3.728 -+ "%Lu:%u\n", cloop_name, ret, blocknum, 3.729 - ntohl(clo->head.block_size), buflen, buf_length, buf_done, 3.730 -- be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1])); 3.731 -+ clo->offsets[blocknum].offset, clo->offsets[blocknum].size); 3.732 - clo->buffered_blocknum[clo->current_bufnum] = -1; 3.733 - return -1; 3.734 +- unsigned long len = bvec->bv_len; 3.735 +- char *to_ptr = kmap(bvec->bv_page) + bvec->bv_offset; 3.736 ++ unsigned long len = bvec.bv_len; 3.737 ++ char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset; 3.738 + while(len > 0) 3.739 + { 3.740 + u_int32_t length_in_buffer; 3.741 +@@ -308,7 +413,7 @@ 3.742 + /* puts the result in the first argument, i.e. block_offset */ 3.743 + /* becomes the blocknumber to load, and offset_in_buffer the */ 3.744 + /* position in the buffer */ 3.745 +- offset_in_buffer = do_div(block_offset, ntohl(clo->head.block_size)); 3.746 ++ offset_in_buffer = do_div(block_offset, clo->head.block_size); 3.747 + /* Lookup preload cache */ 3.748 + if(block_offset < clo->preload_size && clo->preload_cache != NULL && 3.749 + clo->preload_cache[block_offset] != NULL) 3.750 +@@ -325,7 +430,7 @@ 3.751 + from_ptr = clo->buffer[buffered_blocknum]; 3.752 + } 3.753 + /* Now, at least part of what we want will be in the buffer. */ 3.754 +- length_in_buffer = ntohl(clo->head.block_size) - offset_in_buffer; 3.755 ++ length_in_buffer = clo->head.block_size - offset_in_buffer; 3.756 + if(length_in_buffer > len) 3.757 + { 3.758 + /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", 3.759 +@@ -337,18 +442,19 @@ 3.760 + len -= length_in_buffer; 3.761 + offset += length_in_buffer; 3.762 + } /* while inner loop */ 3.763 +- kunmap(bvec->bv_page); 3.764 ++ kunmap(bvec.bv_page); 3.765 ++ cond_resched(); 3.766 + } /* end rq_for_each_segment*/ 3.767 + return ((buffered_blocknum!=-1) || preloaded); 3.768 + } 3.769 + 3.770 + /* Adopted from loop.c, a kernel thread to handle physical reads and 3.771 +- * decompression. */ 3.772 ++ decompression. */ 3.773 + static int cloop_thread(void *data) 3.774 + { 3.775 + struct cloop_device *clo = data; 3.776 + current->flags |= PF_NOFREEZE; 3.777 +- set_user_nice(current, -15); 3.778 ++ set_user_nice(current, 10); 3.779 + while (!kthread_should_stop()||!list_empty(&clo->clo_list)) 3.780 + { 3.781 + int err; 3.782 +@@ -390,10 +496,18 @@ 3.783 + int rw; 3.784 + /* quick sanity checks */ 3.785 + /* blk_fs_request() was removed in 2.6.36 */ 3.786 +- if (unlikely(req == NULL || (req->cmd_type != REQ_TYPE_FS))) 3.787 ++ if (unlikely(req == NULL 3.788 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */ 3.789 ++ || (req->cmd_type != REQ_TYPE_FS) 3.790 ++#endif 3.791 ++ )) 3.792 + goto error_continue; 3.793 + rw = rq_data_dir(req); 3.794 +- if (unlikely(rw != READ && rw != READA)) 3.795 ++ if (unlikely(rw != READ 3.796 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) 3.797 ++ && rw != READA 3.798 ++#endif 3.799 ++ )) 3.800 + { 3.801 + DEBUGP("cloop_do_request: bad command\n"); 3.802 + goto error_continue; 3.803 +@@ -409,40 +523,51 @@ 3.804 + continue; /* next request */ 3.805 + error_continue: 3.806 + DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req); 3.807 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 3.808 + req->errors++; 3.809 ++#else 3.810 ++ req->error_count++; 3.811 ++#endif 3.812 + __blk_end_request_all(req, -EIO); 3.813 } 3.814 -@@ -489,30 +496,73 @@ 3.815 - cloop_name, ntohl(clo->head.block_size)); 3.816 - error=-EBADF; goto error_release; 3.817 - } 3.818 + } 3.819 + 3.820 +-/* Read header and offsets from already opened file */ 3.821 +-static int cloop_set_file(int cloop_num, struct file *file, char *filename) 3.822 ++/* Read header, flags and offsets from already opened file */ 3.823 ++static int cloop_set_file(int cloop_num, struct file *file) 3.824 + { 3.825 + struct cloop_device *clo = cloop_dev[cloop_num]; 3.826 + struct inode *inode; 3.827 + char *bbuf=NULL; 3.828 +- unsigned int i, offsets_read, total_offsets; 3.829 +- int isblkdev; 3.830 +- int error = 0; 3.831 ++ unsigned int bbuf_size = 0; 3.832 ++ const unsigned int header_size = sizeof(struct cloop_head); 3.833 ++ unsigned int i, total_offsets=0; 3.834 ++ loff_t fs_read_position = 0, header_pos[2]; 3.835 ++ int flags, isblkdev, bytes_read, error = 0; 3.836 ++ if (clo->suspended) return error; 3.837 ++ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) 3.838 + inode = file->f_dentry->d_inode; 3.839 ++ clo->underlying_filename = kstrdup(file->f_dentry->d_name.name ? file->f_dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL); 3.840 ++ #else 3.841 ++ inode = file->f_path.dentry->d_inode; 3.842 ++ clo->underlying_filename = kstrdup(file->f_path.dentry->d_name.name ? file->f_path.dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL); 3.843 ++ #endif 3.844 + isblkdev=S_ISBLK(inode->i_mode)?1:0; 3.845 + if(!isblkdev&&!S_ISREG(inode->i_mode)) 3.846 + { 3.847 + printk(KERN_ERR "%s: %s not a regular file or block device\n", 3.848 +- cloop_name, filename); 3.849 ++ cloop_name, clo->underlying_filename); 3.850 + error=-EBADF; goto error_release; 3.851 + } 3.852 + clo->backing_file = file; 3.853 + clo->backing_inode= inode ; 3.854 +- if(!isblkdev&&inode->i_size<sizeof(struct cloop_head)) 3.855 ++ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size; 3.856 ++ if(clo->underlying_total_size < header_size) 3.857 + { 3.858 +- printk(KERN_ERR "%s: %lu bytes (must be >= %u bytes)\n", 3.859 +- cloop_name, (unsigned long)inode->i_size, 3.860 +- (unsigned)sizeof(struct cloop_head)); 3.861 ++ printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n", 3.862 ++ cloop_name, clo->underlying_total_size, 3.863 ++ (unsigned int)header_size); 3.864 + error=-EBADF; goto error_release; 3.865 + } 3.866 +- /* In suspended mode, we have done all checks necessary - FF */ 3.867 +- if (clo->suspended) 3.868 +- return error; 3.869 + if(isblkdev) 3.870 + { 3.871 + struct request_queue *q = bdev_get_queue(inode->i_bdev); 3.872 +@@ -451,104 +576,225 @@ 3.873 + /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */ 3.874 + blk_queue_max_segment_size(clo->clo_queue, queue_max_segment_size(q)); 3.875 + blk_queue_segment_boundary(clo->clo_queue, queue_segment_boundary(q)); 3.876 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) 3.877 + blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn); 3.878 ++#endif 3.879 + clo->underlying_blksize = block_size(inode->i_bdev); 3.880 + } 3.881 + else 3.882 + clo->underlying_blksize = PAGE_SIZE; 3.883 +- DEBUGP("Underlying blocksize is %u\n", clo->underlying_blksize); 3.884 +- bbuf = cloop_malloc(clo->underlying_blksize); 3.885 ++ 3.886 ++ DEBUGP(KERN_INFO "Underlying blocksize of %s is %u\n", clo->underlying_filename, clo->underlying_blksize); 3.887 ++ DEBUGP(KERN_INFO "Underlying total size of %s is %llu\n", clo->underlying_filename, clo->underlying_total_size); 3.888 ++ 3.889 ++ /* clo->underlying_blksize should be larger than header_size, even if it's only PAGE_SIZE */ 3.890 ++ bbuf_size = clo->underlying_blksize; 3.891 ++ bbuf = cloop_malloc(bbuf_size); 3.892 + if(!bbuf) 3.893 + { 3.894 +- printk(KERN_ERR "%s: out of kernel mem for block buffer (%lu bytes)\n", 3.895 +- cloop_name, (unsigned long)clo->underlying_blksize); 3.896 ++ printk(KERN_ERR "%s: out of kernel mem for buffer (%u bytes)\n", 3.897 ++ cloop_name, (unsigned int) bbuf_size); 3.898 ++ error=-ENOMEM; goto error_release; 3.899 ++ } 3.900 ++ 3.901 ++ header_pos[0] = 0; /* header first */ 3.902 ++ header_pos[1] = clo->underlying_total_size - sizeof(struct cloop_head); /* header last */ 3.903 ++ for(i=0; i<2; i++) 3.904 ++ { 3.905 ++ /* Check for header */ 3.906 ++ size_t bytes_readable = MIN(clo->underlying_blksize, clo->underlying_total_size - header_pos[i]); 3.907 ++ size_t bytes_read = cloop_read_from_file(clo, file, bbuf, header_pos[i], bytes_readable); 3.908 ++ if(bytes_read != bytes_readable) 3.909 ++ { 3.910 ++ printk(KERN_ERR "%s: Bad file %s, read() of %s %u bytes returned %d.\n", 3.911 ++ cloop_name, clo->underlying_filename, (i==0)?"first":"last", 3.912 ++ (unsigned int)header_size, (int)bytes_read); 3.913 ++ error=-EBADF; 3.914 ++ goto error_release; 3.915 ++ } 3.916 ++ memcpy(&clo->head, bbuf, header_size); 3.917 ++ if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0) 3.918 ++ { 3.919 ++ clo->file_format++; 3.920 ++ clo->head.block_size=ntohl(clo->head.block_size); 3.921 ++ clo->head.num_blocks=ntohl(clo->head.num_blocks); 3.922 ++ clo->header_first = (i==0) ? 1 : 0; 3.923 ++ printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last"); 3.924 ++ break; 3.925 ++ } 3.926 ++ } 3.927 ++ if (clo->file_format == 0) 3.928 ++ { 3.929 ++ printk(KERN_ERR "%s: Cannot detect %s format.\n", 3.930 ++ cloop_name, cloop_name); 3.931 ++ error=-EBADF; goto error_release; 3.932 ++ } 3.933 ++ if (clo->head.block_size % 512 != 0) 3.934 ++ { 3.935 ++ printk(KERN_ERR "%s: blocksize %u not multiple of 512\n", 3.936 ++ cloop_name, clo->head.block_size); 3.937 ++ error=-EBADF; goto error_release; 3.938 ++ } 3.939 ++ total_offsets=clo->head.num_blocks; 3.940 ++ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)* 3.941 ++ total_offsets > inode->i_size)) 3.942 ++ { 3.943 ++ printk(KERN_ERR "%s: file %s too small for %u blocks\n", 3.944 ++ cloop_name, clo->underlying_filename, clo->head.num_blocks); 3.945 ++ error=-EBADF; goto error_release; 3.946 ++ } 3.947 ++ /* Allocate Memory for decompressors */ 3.948 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 3.949 ++ clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 3.950 ++ if(!clo->zstream.workspace) 3.951 ++ { 3.952 ++ printk(KERN_ERR "%s: out of mem for zlib working area %u\n", 3.953 ++ cloop_name, zlib_inflate_workspacesize()); 3.954 + error=-ENOMEM; goto error_release; 3.955 + } 3.956 +- total_offsets = 1; /* Dummy total_offsets: will be filled in first time around */ 3.957 +- for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++) 3.958 ++ zlib_inflateInit(&clo->zstream); 3.959 ++#endif 3.960 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 3.961 ++#if XZ_INTERNAL_CRC32 3.962 ++ /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */ 3.963 ++ xz_crc32_init(void); 3.964 ++#endif 3.965 ++ clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0); 3.966 ++#endif 3.967 ++ if (total_offsets + 1 == 0) /* Version 3 */ 3.968 + { 3.969 +- unsigned int offset = 0, num_readable; 3.970 +- size_t bytes_read = cloop_read_from_file(clo, file, bbuf, 3.971 +- i*clo->underlying_blksize, 3.972 +- clo->underlying_blksize); 3.973 +- if(bytes_read != clo->underlying_blksize) 3.974 ++ struct cloop_tail tail; 3.975 ++ if (isblkdev) 3.976 + { 3.977 +- printk(KERN_ERR "%s: Bad file, read() of first %lu bytes returned %d.\n", 3.978 +- cloop_name, (unsigned long)clo->underlying_blksize, (int)bytes_read); 3.979 +- error=-EBADF; 3.980 +- goto error_release; 3.981 ++ /* No end of file: can't find index */ 3.982 ++ printk(KERN_ERR "%s: no V3 support for block device\n", 3.983 ++ cloop_name); 3.984 ++ error=-EBADF; goto error_release; 3.985 + } 3.986 +- /* Header will be in block zero */ 3.987 +- if(i==0) 3.988 ++ bytes_read = cloop_read_from_file(clo, file, (void *) &tail, 3.989 ++ inode->i_size - sizeof(struct cloop_tail), 3.990 ++ sizeof(struct cloop_tail)); 3.991 ++ if (bytes_read == sizeof(struct cloop_tail)) 3.992 + { 3.993 +- memcpy(&clo->head, bbuf, sizeof(struct cloop_head)); 3.994 +- offset = sizeof(struct cloop_head); 3.995 +- if (ntohl(clo->head.block_size) % 512 != 0) 3.996 ++ unsigned long len, zlen; 3.997 ++ int ret; 3.998 ++ void *zbuf; 3.999 ++ clo->head.num_blocks = ntohl(tail.num_blocks); 3.1000 ++ total_offsets = clo->head.num_blocks; 3.1001 ++ clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets); 3.1002 ++ zlen = ntohl(tail.table_size); 3.1003 ++ zbuf = cloop_malloc(zlen); 3.1004 ++ if (!clo->block_ptrs || !zbuf) 3.1005 + { 3.1006 +- printk(KERN_ERR "%s: blocksize %u not multiple of 512\n", 3.1007 +- cloop_name, ntohl(clo->head.block_size)); 3.1008 +- error=-EBADF; goto error_release; 3.1009 +- } 3.1010 - if (clo->head.preamble[0x0B]!='V'||clo->head.preamble[0x0C]<'1') 3.1011 - { 3.1012 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, " 3.1013 - "please use an older version of %s for this file.\n", 3.1014 - cloop_name, cloop_name); 3.1015 - error=-EBADF; goto error_release; 3.1016 -- } 3.1017 ++ printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name); 3.1018 ++ error=-ENOMEM; goto error_release; 3.1019 + } 3.1020 - if (clo->head.preamble[0x0C]<'2') 3.1021 -- { 3.1022 ++ bytes_read = cloop_read_from_file(clo, file, zbuf, 3.1023 ++ inode->i_size - zlen - sizeof(struct cloop_tail), 3.1024 ++ zlen); 3.1025 ++ if (bytes_read != zlen) 3.1026 + { 3.1027 - printk(KERN_ERR "%s: Cannot read old architecture-dependent " 3.1028 - "(format <= 1.0) images, please use an older " 3.1029 - "version of %s for this file.\n", 3.1030 - cloop_name, cloop_name); 3.1031 -- error=-EBADF; goto error_release; 3.1032 -- } 3.1033 ++ printk(KERN_ERR "%s: can't read index\n", cloop_name); 3.1034 + error=-EBADF; goto error_release; 3.1035 + } 3.1036 - total_offsets=ntohl(clo->head.num_blocks)+1; 3.1037 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)* 3.1038 -+ total_offsets=ntohl(clo->head.num_blocks); 3.1039 -+ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)* 3.1040 - total_offsets > inode->i_size)) 3.1041 +- total_offsets > inode->i_size)) 3.1042 ++ len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets; 3.1043 ++ flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size)); 3.1044 ++// May 3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0) 3.1045 ++printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 3.1046 ++ clo, clo->block_ptrs, len, &len, zbuf, zlen, flags); 3.1047 ++ ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags); 3.1048 ++// May 3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at (null) 3.1049 ++printk(KERN_INFO "%s: uncompressed !\n", cloop_name); 3.1050 ++ cloop_free(zbuf, zlen); 3.1051 ++ if (ret != 0) 3.1052 { 3.1053 - printk(KERN_ERR "%s: file too small for %u blocks\n", 3.1054 - cloop_name, ntohl(clo->head.num_blocks)); 3.1055 +- printk(KERN_ERR "%s: file too small for %u blocks\n", 3.1056 +- cloop_name, ntohl(clo->head.num_blocks)); 3.1057 ++ printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n", 3.1058 ++ cloop_name, ret, flags); 3.1059 error=-EBADF; goto error_release; 3.1060 } 3.1061 - clo->offsets = cloop_malloc(sizeof(loff_t) * total_offsets); 3.1062 -+ if (total_offsets + 1 == 0) /* Version >= 3.0 */ 3.1063 -+ { 3.1064 -+ struct cloop_tail tail; 3.1065 -+ if(isblkdev) 3.1066 -+ { 3.1067 -+ /* No end of file: can't find index */ 3.1068 -+ printk(KERN_ERR "%s: no V3 support for block device\n", 3.1069 -+ cloop_name); 3.1070 -+ error=-EBADF; goto error_release; 3.1071 -+ } 3.1072 -+ bytes_read = cloop_read_from_file(clo, file, (void *) &tail, 3.1073 -+ inode->i_size - sizeof(struct cloop_tail), 3.1074 -+ sizeof(struct cloop_tail)); 3.1075 -+ if(bytes_read == sizeof(struct cloop_tail)) 3.1076 -+ { 3.1077 -+ unsigned long len, zlen; 3.1078 -+ void *zbuf; 3.1079 -+ clo->head.num_blocks = tail.num_blocks; 3.1080 -+ total_offsets = ntohl(clo->head.num_blocks); 3.1081 -+ clo->offsets = cloop_malloc(sizeof(struct block_info) * total_offsets); 3.1082 -+ if (!clo->offsets) 3.1083 -+ { 3.1084 -+ printk(KERN_ERR "%s: can't alloc index\n", 3.1085 -+ cloop_name); 3.1086 -+ error=-EBADF; goto error_release; 3.1087 -+ } 3.1088 -+ zbuf = &clo->offsets[total_offsets/2]; 3.1089 -+ zlen = ntohl(tail.table_size); 3.1090 -+ len = ntohl(tail.index_size) * total_offsets; 3.1091 -+ bytes_read = cloop_read_from_file(clo, file, zbuf, 3.1092 -+ inode->i_size - zlen - sizeof(struct cloop_tail), 3.1093 -+ zlen); 3.1094 -+ if (bytes_read != zlen) 3.1095 -+ { 3.1096 -+ printk(KERN_ERR "%s: can't read index\n", 3.1097 -+ cloop_name); 3.1098 -+ error=-EBADF; goto error_release; 3.1099 -+ } 3.1100 -+ clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 3.1101 -+ if(!clo->zstream.workspace) 3.1102 -+ { 3.1103 -+ printk(KERN_ERR "%s: can't alloc index workspace\n", 3.1104 -+ cloop_name); 3.1105 -+ error=-EBADF; goto error_release; 3.1106 -+ } 3.1107 -+ zlib_inflateInit(&clo->zstream); 3.1108 -+ uncompress(clo, (void *) clo->offsets, &len, zbuf, zlen); 3.1109 -+ cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); 3.1110 -+ clo->zstream.workspace = NULL; 3.1111 -+ break; 3.1112 -+ } 3.1113 -+ else 3.1114 -+ { 3.1115 -+ printk(KERN_ERR "%s: can't find index\n", 3.1116 -+ cloop_name); 3.1117 -+ error=-EBADF; goto error_release; 3.1118 -+ } 3.1119 -+ } 3.1120 -+ clo->offsets = cloop_malloc(sizeof(struct block_info) * total_offsets); 3.1121 - if (!clo->offsets) 3.1122 - { 3.1123 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 3.1124 -@@ -521,19 +571,22 @@ 3.1125 +- if (!clo->offsets) 3.1126 +- { 3.1127 +- printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 3.1128 +- error=-ENOMEM; goto error_release; 3.1129 +- } 3.1130 } 3.1131 - num_readable = MIN(total_offsets - offsets_read, 3.1132 - (clo->underlying_blksize - offset) 3.1133 +- num_readable = MIN(total_offsets - offsets_read, 3.1134 +- (clo->underlying_blksize - offset) 3.1135 - / sizeof(loff_t)); 3.1136 - memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(loff_t)); 3.1137 -+ / sizeof(struct block_info)); 3.1138 -+ memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(struct block_info)); 3.1139 - offsets_read += num_readable; 3.1140 - } 3.1141 - { /* Search for largest block rather than estimate. KK. */ 3.1142 - int i; 3.1143 +- offsets_read += num_readable; 3.1144 +- } 3.1145 +- { /* Search for largest block rather than estimate. KK. */ 3.1146 +- int i; 3.1147 - for(i=0;i<total_offsets-1;i++) 3.1148 -+ char *version = build_index(clo->offsets, ntohl(clo->head.num_blocks)); 3.1149 -+ for(i=0,clo->largest_block=0;i<total_offsets;i++) 3.1150 ++ else 3.1151 ++ { 3.1152 ++ printk(KERN_ERR "%s: can't find index\n", cloop_name); 3.1153 ++ error=-ENOMEM; goto error_release; 3.1154 ++ } 3.1155 ++ } 3.1156 ++ else 3.1157 ++ { 3.1158 ++ unsigned int n, total_bytes; 3.1159 ++ flags = 0; 3.1160 ++ clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets); 3.1161 ++ if (!clo->block_ptrs) 3.1162 ++ { 3.1163 ++ printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 3.1164 ++ error=-ENOMEM; goto error_release; 3.1165 ++ } 3.1166 ++ /* Read them offsets! */ 3.1167 ++ if(clo->header_first) 3.1168 ++ { 3.1169 ++ total_bytes = total_offsets * sizeof(struct block_info); 3.1170 ++ fs_read_position = sizeof(struct cloop_head); 3.1171 ++ } 3.1172 ++ else 3.1173 { 3.1174 - loff_t d=be64_to_cpu(clo->offsets[i+1]) - be64_to_cpu(clo->offsets[i]); 3.1175 - clo->largest_block=MAX(clo->largest_block,d); 3.1176 -+ clo->largest_block=MAX(clo->largest_block,clo->offsets[i].size); 3.1177 ++ total_bytes = total_offsets * sizeof(loff_t); 3.1178 ++ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 3.1179 ++ } 3.1180 ++ for(n=0;n<total_bytes;) 3.1181 ++ { 3.1182 ++ size_t bytes_readable; 3.1183 ++ bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position); 3.1184 ++ if(bytes_readable <= 0) break; /* Done */ 3.1185 ++ bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable); 3.1186 ++ if(bytes_read != bytes_readable) 3.1187 ++ { 3.1188 ++ printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n", 3.1189 ++ cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read); 3.1190 ++ error=-EBADF; 3.1191 ++ goto error_release; 3.1192 ++ } 3.1193 ++ memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read); 3.1194 ++ /* remember where to read the next blk from file */ 3.1195 ++ fs_read_position += bytes_read; 3.1196 ++ n += bytes_read; 3.1197 } 3.1198 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n", 3.1199 - cloop_name, filename, ntohl(clo->head.num_blocks), 3.1200 -+ i = ntohl(clo->head.block_size); 3.1201 -+ i += i/1000 + 12 + 4; /* max gzip block size */ 3.1202 -+ if (clo->largest_block > i) clo->largest_block = i; /* broken index ? */ 3.1203 -+ printk(KERN_INFO "%s: %s: %s, %u blocks, %u bytes/block, largest block is %lu bytes.\n", 3.1204 -+ cloop_name, filename, version, ntohl(clo->head.num_blocks), 3.1205 - ntohl(clo->head.block_size), clo->largest_block); 3.1206 +- ntohl(clo->head.block_size), clo->largest_block); 3.1207 } 3.1208 - /* Combo kmalloc used too large chunks (>130000). */ 3.1209 -@@ -565,16 +618,6 @@ 3.1210 - error=-ENOMEM; goto error_release_free_all; 3.1211 +-/* Combo kmalloc used too large chunks (>130000). */ 3.1212 + { 3.1213 + int i; 3.1214 +- for(i=0;i<BUFFERED_BLOCKS;i++) 3.1215 +- { 3.1216 +- clo->buffer[i] = cloop_malloc(ntohl(clo->head.block_size)); 3.1217 +- if(!clo->buffer[i]) 3.1218 +- { 3.1219 +- printk(KERN_ERR "%s: out of memory for buffer %lu\n", 3.1220 +- cloop_name, (unsigned long) ntohl(clo->head.block_size)); 3.1221 +- error=-ENOMEM; goto error_release_free; 3.1222 +- } 3.1223 +- } 3.1224 ++ char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags); 3.1225 ++ clo->largest_block = 0; 3.1226 ++ for (i = 0; i < clo->head.num_blocks; i++) 3.1227 ++ if (clo->block_ptrs[i].size > clo->largest_block) 3.1228 ++ clo->largest_block = clo->block_ptrs[i].size; 3.1229 ++ printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n", 3.1230 ++ cloop_name, clo->underlying_filename, version, clo->head.num_blocks, 3.1231 ++ clo->head.block_size, clo->largest_block); 3.1232 ++ } 3.1233 ++ { 3.1234 ++ int i; 3.1235 ++ clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ? 3.1236 ++ (buffers / clo->head.block_size) : 1; 3.1237 ++ clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t)); 3.1238 ++ clo->buffer = cloop_malloc(clo->num_buffered_blocks * sizeof (char*)); 3.1239 ++ if (!clo->buffered_blocknum || !clo->buffer) 3.1240 ++ { 3.1241 ++ printk(KERN_ERR "%s: out of memory for index of cache buffer (%lu bytes)\n", 3.1242 ++ cloop_name, (unsigned long)clo->num_buffered_blocks * sizeof (u_int32_t) + sizeof(char*) ); 3.1243 ++ error=-ENOMEM; goto error_release; 3.1244 ++ } 3.1245 ++ memset(clo->buffer, 0, clo->num_buffered_blocks * sizeof (char*)); 3.1246 ++ for(i=0;i<clo->num_buffered_blocks;i++) 3.1247 ++ { 3.1248 ++ clo->buffered_blocknum[i] = -1; 3.1249 ++ clo->buffer[i] = cloop_malloc(clo->head.block_size); 3.1250 ++ if(!clo->buffer[i]) 3.1251 ++ { 3.1252 ++ printk(KERN_ERR "%s: out of memory for cache buffer %lu\n", 3.1253 ++ cloop_name, (unsigned long) clo->head.block_size); 3.1254 ++ error=-ENOMEM; goto error_release_free; 3.1255 ++ } 3.1256 ++ } 3.1257 ++ clo->current_bufnum = 0; 3.1258 + } 3.1259 + clo->compressed_buffer = cloop_malloc(clo->largest_block); 3.1260 + if(!clo->compressed_buffer) 3.1261 +@@ -557,31 +803,7 @@ 3.1262 + cloop_name, clo->largest_block); 3.1263 + error=-ENOMEM; goto error_release_free_buffer; 3.1264 } 3.1265 - zlib_inflateInit(&clo->zstream); 3.1266 +- clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 3.1267 +- if(!clo->zstream.workspace) 3.1268 +- { 3.1269 +- printk(KERN_ERR "%s: out of mem for zlib working area %u\n", 3.1270 +- cloop_name, zlib_inflate_workspacesize()); 3.1271 +- error=-ENOMEM; goto error_release_free_all; 3.1272 +- } 3.1273 +- zlib_inflateInit(&clo->zstream); 3.1274 - if(!isblkdev && 3.1275 - be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]) != inode->i_size) 3.1276 - { 3.1277 @@ -274,15 +1112,264 @@ 3.1278 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL; 3.1279 - goto error_release_free_all; 3.1280 - } 3.1281 +- { 3.1282 +- int i; 3.1283 +- for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1; 3.1284 +- clo->current_bufnum=0; 3.1285 +- } 3.1286 +- set_capacity(clo->clo_disk, (sector_t)(ntohl(clo->head.num_blocks)* 3.1287 +- (ntohl(clo->head.block_size)>>9))); 3.1288 ++ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9))); 3.1289 + clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num); 3.1290 + if(IS_ERR(clo->clo_thread)) 3.1291 + { 3.1292 +@@ -591,17 +813,17 @@ 3.1293 + } 3.1294 + if(preload > 0) 3.1295 + { 3.1296 +- clo->preload_array_size = ((preload<=ntohl(clo->head.num_blocks))?preload:ntohl(clo->head.num_blocks)); 3.1297 ++ clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks); 3.1298 + clo->preload_size = 0; 3.1299 + if((clo->preload_cache = cloop_malloc(clo->preload_array_size * sizeof(char *))) != NULL) 3.1300 + { 3.1301 + int i; 3.1302 + for(i=0; i<clo->preload_array_size; i++) 3.1303 + { 3.1304 +- if((clo->preload_cache[i] = cloop_malloc(ntohl(clo->head.block_size))) == NULL) 3.1305 ++ if((clo->preload_cache[i] = cloop_malloc(clo->head.block_size)) == NULL) 3.1306 + { /* Out of memory */ 3.1307 + printk(KERN_WARNING "%s: cloop_malloc(%d) failed for preload_cache[%d] (ignored).\n", 3.1308 +- cloop_name, ntohl(clo->head.block_size), i); 3.1309 ++ cloop_name, clo->head.block_size, i); 3.1310 + break; 3.1311 + } 3.1312 + } 3.1313 +@@ -612,13 +834,13 @@ 3.1314 + if(buffered_blocknum >= 0) 3.1315 + { 3.1316 + memcpy(clo->preload_cache[i], clo->buffer[buffered_blocknum], 3.1317 +- ntohl(clo->head.block_size)); 3.1318 ++ clo->head.block_size); 3.1319 + } 3.1320 + else 3.1321 + { 3.1322 + printk(KERN_WARNING "%s: can't read block %d into preload cache, set to zero.\n", 3.1323 + cloop_name, i); 3.1324 +- memset(clo->preload_cache[i], 0, ntohl(clo->head.block_size)); 3.1325 ++ memset(clo->preload_cache[i], 0, clo->head.block_size); 3.1326 + } 3.1327 + } 3.1328 + printk(KERN_INFO "%s: preloaded %d blocks into cache.\n", cloop_name, 3.1329 +@@ -641,22 +863,19 @@ 3.1330 + cloop_free(clo->compressed_buffer, clo->largest_block); 3.1331 + clo->compressed_buffer=NULL; 3.1332 + error_release_free_buffer: 3.1333 ++ if(clo->buffer) 3.1334 { 3.1335 int i; 3.1336 - for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1; 3.1337 -@@ -653,7 +696,7 @@ 3.1338 - } 3.1339 +- for(i=0; i<BUFFERED_BLOCKS; i++) 3.1340 +- { 3.1341 +- if(clo->buffer[i]) 3.1342 +- { 3.1343 +- cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); 3.1344 +- clo->buffer[i]=NULL; 3.1345 +- } 3.1346 +- } 3.1347 ++ for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) { cloop_free(clo->buffer[i], clo->head.block_size); clo->buffer[i]=NULL; }} 3.1348 ++ cloop_free(clo->buffer, clo->num_buffered_blocks*sizeof(char*)); clo->buffer=NULL; 3.1349 } 3.1350 ++ if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; } 3.1351 error_release_free: 3.1352 - cloop_free(clo->offsets, sizeof(loff_t) * total_offsets); 3.1353 -+ cloop_free(clo->offsets, sizeof(struct block_info) * total_offsets); 3.1354 - clo->offsets=NULL; 3.1355 +- clo->offsets=NULL; 3.1356 ++ cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets); 3.1357 ++ clo->block_ptrs=NULL; 3.1358 error_release: 3.1359 if(bbuf) cloop_free(bbuf, clo->underlying_blksize); 3.1360 ++ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 3.1361 + clo->backing_file=NULL; 3.1362 + return error; 3.1363 + } 3.1364 +@@ -673,7 +892,7 @@ 3.1365 + if(clo->backing_file) return -EBUSY; 3.1366 + file = fget(arg); /* get filp struct from ioctl arg fd */ 3.1367 + if(!file) return -EBADF; 3.1368 +- error=cloop_set_file(cloop_num,file,"losetup_file"); 3.1369 ++ error=cloop_set_file(cloop_num,file); 3.1370 + set_device_ro(bdev, 1); 3.1371 + if(error) fput(file); 3.1372 + return error; 3.1373 +@@ -684,29 +903,48 @@ 3.1374 + { 3.1375 + struct cloop_device *clo = cloop_dev[cloop_num]; 3.1376 + struct file *filp = clo->backing_file; 3.1377 +- int i; 3.1378 + if(clo->refcnt > 1) /* we needed one fd for the ioctl */ 3.1379 + return -EBUSY; 3.1380 + if(filp==NULL) return -EINVAL; 3.1381 + if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; } 3.1382 +- if(filp!=initial_file) fput(filp); 3.1383 +- else { filp_close(initial_file,0); initial_file=NULL; } 3.1384 ++ if(filp!=initial_file) 3.1385 ++ fput(filp); 3.1386 ++ else 3.1387 ++ { 3.1388 ++ filp_close(initial_file,0); 3.1389 ++ initial_file=NULL; 3.1390 ++ } 3.1391 + clo->backing_file = NULL; 3.1392 + clo->backing_inode = NULL; 3.1393 +- if(clo->offsets) { cloop_free(clo->offsets, clo->underlying_blksize); clo->offsets = NULL; } 3.1394 ++ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 3.1395 ++ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; } 3.1396 + if(clo->preload_cache) 3.1397 +- { 3.1398 +- for(i=0; i < clo->preload_size; i++) 3.1399 +- cloop_free(clo->preload_cache[i], ntohl(clo->head.block_size)); 3.1400 +- cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *)); 3.1401 +- clo->preload_cache = NULL; 3.1402 +- clo->preload_size = clo->preload_array_size = 0; 3.1403 +- } 3.1404 +- for(i=0; i<BUFFERED_BLOCKS; i++) 3.1405 +- if(clo->buffer[i]) { cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); clo->buffer[i]=NULL; } 3.1406 ++ { 3.1407 ++ int i; 3.1408 ++ for(i=0; i < clo->preload_size; i++) 3.1409 ++ cloop_free(clo->preload_cache[i], clo->head.block_size); 3.1410 ++ cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *)); 3.1411 ++ clo->preload_cache = NULL; 3.1412 ++ clo->preload_size = clo->preload_array_size = 0; 3.1413 ++ } 3.1414 ++ if (clo->buffered_blocknum) 3.1415 ++ { 3.1416 ++ cloop_free(clo->buffered_blocknum, sizeof(int) * clo->num_buffered_blocks); clo->buffered_blocknum = NULL; 3.1417 ++ } 3.1418 ++ if (clo->buffer) 3.1419 ++ { 3.1420 ++ int i; 3.1421 ++ for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) cloop_free(clo->buffer[i], clo->head.block_size); } 3.1422 ++ cloop_free(clo->buffer, sizeof(char*) * clo->num_buffered_blocks); clo->buffer = NULL; 3.1423 ++ } 3.1424 + if(clo->compressed_buffer) { cloop_free(clo->compressed_buffer, clo->largest_block); clo->compressed_buffer = NULL; } 3.1425 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 3.1426 + zlib_inflateEnd(&clo->zstream); 3.1427 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; } 3.1428 ++#endif 3.1429 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 3.1430 ++ xz_dec_end(clo->xzdecoderstate); 3.1431 ++#endif 3.1432 + if(bdev) invalidate_bdev(bdev); 3.1433 + if(clo->clo_disk) set_capacity(clo->clo_disk, 0); 3.1434 + return 0; 3.1435 +@@ -731,8 +969,8 @@ 3.1436 + const struct loop_info64 *info) 3.1437 + { 3.1438 + if (!clo->backing_file) return -ENXIO; 3.1439 +- memcpy(clo->clo_file_name, info->lo_file_name, LO_NAME_SIZE); 3.1440 +- clo->clo_file_name[LO_NAME_SIZE-1] = 0; 3.1441 ++ if(clo->underlying_filename) kfree(clo->underlying_filename); 3.1442 ++ clo->underlying_filename = kstrdup(info->lo_file_name, GFP_KERNEL); 3.1443 + return 0; 3.1444 + } 3.1445 + 3.1446 +@@ -743,7 +981,11 @@ 3.1447 + struct kstat stat; 3.1448 + int err; 3.1449 + if (!file) return -ENXIO; 3.1450 +- err = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat); 3.1451 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 3.1452 ++ err = vfs_getattr(&file->f_path, &stat); 3.1453 ++#else 3.1454 ++ err = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); 3.1455 ++#endif 3.1456 + if (err) return err; 3.1457 + memset(info, 0, sizeof(*info)); 3.1458 + info->lo_number = clo->clo_number; 3.1459 +@@ -753,7 +995,8 @@ 3.1460 + info->lo_offset = 0; 3.1461 + info->lo_sizelimit = 0; 3.1462 + info->lo_flags = 0; 3.1463 +- memcpy(info->lo_file_name, clo->clo_file_name, LO_NAME_SIZE); 3.1464 ++ strncpy(info->lo_file_name, clo->underlying_filename, LO_NAME_SIZE); 3.1465 ++ info->lo_file_name[LO_NAME_SIZE-1]=0; 3.1466 + return 0; 3.1467 + } 3.1468 + 3.1469 +@@ -833,8 +1076,6 @@ 3.1470 + if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; 3.1471 + return err; 3.1472 + } 3.1473 +-/* EOF get/set_status */ 3.1474 +- 3.1475 + 3.1476 + static int cloop_ioctl(struct block_device *bdev, fmode_t mode, 3.1477 + unsigned int cmd, unsigned long arg) 3.1478 +@@ -914,21 +1155,20 @@ 3.1479 + /* losetup uses write-open and flags=0x8002 to set a new file */ 3.1480 + if(mode & FMODE_WRITE) 3.1481 + { 3.1482 +- printk(KERN_WARNING "%s: Can't open device read-write in mode 0x%x\n", cloop_name, mode); 3.1483 ++ printk(KERN_INFO "%s: Open in read-write mode 0x%x requested, ignored.\n", cloop_name, mode); 3.1484 + return -EROFS; 3.1485 + } 3.1486 + cloop_dev[cloop_num]->refcnt+=1; 3.1487 + return 0; 3.1488 + } 3.1489 + 3.1490 +-static int cloop_close(struct gendisk *disk, fmode_t mode) 3.1491 ++static void cloop_close(struct gendisk *disk, fmode_t mode) 3.1492 + { 3.1493 +- int cloop_num, err=0; 3.1494 +- if(!disk) return 0; 3.1495 ++ int cloop_num; 3.1496 ++ if(!disk) return; 3.1497 + cloop_num=((struct cloop_device *)disk->private_data)->clo_number; 3.1498 +- if(cloop_num < 0 || cloop_num > (cloop_count-1)) return 0; 3.1499 ++ if(cloop_num < 0 || cloop_num > (cloop_count-1)) return; 3.1500 + cloop_dev[cloop_num]->refcnt-=1; 3.1501 +- return err; 3.1502 + } 3.1503 + 3.1504 + static struct block_device_operations clo_fops = 3.1505 +@@ -973,6 +1213,10 @@ 3.1506 + goto error_out; 3.1507 + } 3.1508 + clo->clo_queue->queuedata = clo; 3.1509 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 3.1510 ++ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue); 3.1511 ++ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue); 3.1512 ++#endif 3.1513 + clo->clo_disk = alloc_disk(1); 3.1514 + if(!clo->clo_disk) 3.1515 + { 3.1516 +@@ -1004,6 +1248,11 @@ 3.1517 + cloop_dev[cloop_num] = NULL; 3.1518 + } 3.1519 + 3.1520 ++/* LZ4 Stuff */ 3.1521 ++#if (defined USE_LZ4_INTERNAL) 3.1522 ++#include "lz4_kmod.c" 3.1523 ++#endif 3.1524 ++ 3.1525 + static int __init cloop_init(void) 3.1526 + { 3.1527 + int error=0; 3.1528 +@@ -1044,7 +1293,7 @@ 3.1529 + initial_file=NULL; /* if IS_ERR, it's NOT open. */ 3.1530 + } 3.1531 + else 3.1532 +- error=cloop_set_file(0,initial_file,file); 3.1533 ++ error=cloop_set_file(0,initial_file); 3.1534 + if(error) 3.1535 + { 3.1536 + printk(KERN_ERR 3.1537 +@@ -1052,9 +1301,6 @@ 3.1538 + cloop_name, file, error); 3.1539 + goto init_out_dealloc; 3.1540 + } 3.1541 +- if(namelen >= LO_NAME_SIZE) namelen = LO_NAME_SIZE-1; 3.1542 +- memcpy(cloop_dev[0]->clo_file_name, file, namelen); 3.1543 +- cloop_dev[0]->clo_file_name[namelen] = 0; 3.1544 + } 3.1545 + return 0; 3.1546 + init_out_dealloc:
4.1 --- a/linux64-cloop/receipt Mon May 04 07:56:50 2020 +0100 4.2 +++ b/linux64-cloop/receipt Mon May 04 09:05:12 2020 +0000 4.3 @@ -2,13 +2,15 @@ 4.4 4.5 PACKAGE="linux64-cloop" 4.6 SOURCE="cloop" 4.7 -VERSION="2.639-2" 4.8 +_VERSION="2.639-2" 4.9 +#VERSION="$(sed '/+#define CLOOP_VERSION/!d;s|.* "\(.*\)"|\1|' stuff/cloop.u)" 4.10 +VERSION="4.12" 4.11 CATEGORY="base-system" 4.12 MAINTAINER="pascal.bellard@slitaz.org" 4.13 LICENSE="GPL2" 4.14 SHORT_DESC="The read-only compressed loop device kernel module." 4.15 WEB_SITE="http://knoppix.net/wiki/Cloop" 4.16 -TARBALL="${SOURCE}_${VERSION}.tar.gz" 4.17 +TARBALL="${SOURCE}_${_VERSION}.tar.gz" 4.18 WGET_URL="http://debian-knoppix.alioth.debian.org/packages/$SOURCE/$TARBALL" 4.19 PROVIDE="linux-cloop:linux64" 4.20 4.21 @@ -22,9 +24,7 @@ 4.22 4.23 compile_rules() 4.24 { 4.25 - patch -p0 < $stuff/cloop.u # 3.2.98 4.26 - sed -i -e 's|file->f_path.mnt, file->f_path.dentry|\&file->f_path|' \ 4.27 - -e 's|bvec->|bvec.|g;s|*bvec|bvec|' cloop.c 4.28 + patch -p0 < $stuff/cloop.u 4.29 make KERNEL_DIR="/usr/src/linux" cloop.ko && xz cloop.ko 4.30 } 4.31
5.1 --- a/linux64-cloop/stuff/cloop.u Mon May 04 07:56:50 2020 +0100 5.2 +++ b/linux64-cloop/stuff/cloop.u Mon May 04 09:05:12 2020 +0000 5.3 @@ -1,80 +1,221 @@ 5.4 --- cloop.h 5.5 +++ cloop.h 5.6 -@@ -20,6 +20,80 @@ 5.7 +@@ -1,15 +1,50 @@ 5.8 ++#define CLOOP_SIGNATURE "#!/bin/sh" /* @ offset 0 */ 5.9 ++#define CLOOP_SIGNATURE_SIZE 9 5.10 ++#define CLOOP_SIGNATURE_OFFSET 0x0 5.11 ++ 5.12 + #ifndef _COMPRESSED_LOOP_H 5.13 + #define _COMPRESSED_LOOP_H 5.14 + 5.15 +-#define CLOOP_HEADROOM 128 5.16 ++/*************************************************************************\ 5.17 ++* Starting with Format V4.0 (cloop version 4.x), cloop can now have two * 5.18 ++* alternative structures: * 5.19 ++* * 5.20 ++* 1. Header first: "robust" format, handles missing blocks well * 5.21 ++* 2. Footer (header last): "streaming" format, easier to create * 5.22 ++* * 5.23 ++* The cloop kernel module autodetects both formats, and can (currently) * 5.24 ++* still handle the V2.0 format as well. * 5.25 ++* * 5.26 ++* 1. Header first: * 5.27 ++* +---------------------------- FIXED SIZE ---------------------------+ * 5.28 ++* |Signature (128 bytes) | * 5.29 ++* |block_size (32bit number, network order) | * 5.30 ++* |num_blocks (32bit number, network order) | * 5.31 ++* +--------------------------- VARIABLE SIZE -------------------------+ * 5.32 ++* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| * 5.33 ++* |compressed data blocks of variable size ... | * 5.34 ++* +-------------------------------------------------------------------+ * 5.35 ++* * 5.36 ++* 2. Footer (header last): * 5.37 ++* +--------------------------- VARIABLE SIZE -------------------------+ * 5.38 ++* |compressed data blocks of variable size ... | * 5.39 ++* |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| * 5.40 ++* +---------------------------- FIXED SIZE ---------------------------+ * 5.41 ++* |Signature (128 bytes) | * 5.42 ++* |block_size (32bit number, network order) | * 5.43 ++* |num_blocks (32bit number, network order) | * 5.44 ++* +-------------------------------------------------------------------+ * 5.45 ++* * 5.46 ++* Offsets are always relative to beginning of file, in all formats. * 5.47 ++* The block index contains num_blocks+1 offsets, followed (1) or * 5.48 ++* preceded (2) by the compressed blocks. * 5.49 ++\*************************************************************************/ 5.50 + 5.51 +-/* The cloop header usually looks like this: */ 5.52 +-/* #!/bin/sh */ 5.53 +-/* #V2.00 Format */ 5.54 +-/* ...padding up to CLOOP_HEADROOM... */ 5.55 +-/* block_size (32bit number, network order) */ 5.56 +-/* num_blocks (32bit number, network order) */ 5.57 ++#include <linux/types.h> /* u_int32_t */ 5.58 ++ 5.59 ++#define CLOOP_HEADROOM 128 5.60 + 5.61 ++/* Header of fixed length, can be located at beginning or end of file */ 5.62 + struct cloop_head 5.63 + { 5.64 + char preamble[CLOOP_HEADROOM]; 5.65 +@@ -17,9 +52,163 @@ 5.66 + u_int32_t num_blocks; 5.67 + }; 5.68 + 5.69 ++/************************************************************************\ 5.70 ++* CLOOP4 flags for each compressed block * 5.71 ++* Value Meaning * 5.72 ++* 0 GZIP/7ZIP compression (compatible with V2.0 Format) * 5.73 ++* 1 no compression (incompressible data) * 5.74 ++* 2 xz compression (currently best space saver) * 5.75 ++* 3 lz4 compression * 5.76 ++* 4 lzo compression (fastest) * 5.77 ++\************************************************************************/ 5.78 ++ 5.79 ++typedef uint64_t cloop_block_ptr; 5.80 ++ 5.81 ++/* Get value of first 4 bits */ 5.82 ++#define CLOOP_BLOCK_FLAGS(x) ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60)) 5.83 ++/* Get value of last 60 bits */ 5.84 ++#define CLOOP_BLOCK_OFFSET(x) ((x) & 0x0fffffffffffffffLLU) 5.85 ++ 5.86 ++#define CLOOP_COMPRESSOR_ZLIB 0x0 5.87 ++#define CLOOP_COMPRESSOR_NONE 0x1 5.88 ++#define CLOOP_COMPRESSOR_XZ 0x2 5.89 ++#define CLOOP_COMPRESSOR_LZ4 0x3 5.90 ++#define CLOOP_COMPRESSOR_LZO1X 0x4 5.91 ++ 5.92 ++#define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X) 5.93 ++ 5.94 ++#define CLOOP_COMPRESSOR_LINK 0xF 5.95 ++ 5.96 ++ 5.97 /* data_index (num_blocks 64bit pointers, network order)... */ 5.98 /* compressed data (gzip block compressed format)... */ 5.99 5.100 +struct cloop_tail 5.101 +{ 5.102 -+ u_int32_t table_size; 5.103 -+ u_int32_t index_size; 5.104 ++ u_int32_t table_size; 5.105 ++ u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */ 5.106 ++#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF)) 5.107 ++#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4) 5.108 ++#define CLOOP3_TRUNCATED(x) ((unsigned int)((x) & 0x80) >> 7) 5.109 ++#define CLOOP3_LASTLEN(x) (unsigned int)((x) >> 8) 5.110 + u_int32_t num_blocks; 5.111 +}; 5.112 + 5.113 ++#define GZIP_MAX_BUFFER(n) ((n) + (n)/1000 + 12) 5.114 ++ 5.115 +struct block_info 5.116 +{ 5.117 + loff_t offset; /* 64-bit offsets of compressed block */ 5.118 + u_int32_t size; /* 32-bit compressed block size */ 5.119 -+ u_int32_t optidx; /* 32-bit index number */ 5.120 ++ u_int32_t flags; /* 32-bit compression flags */ 5.121 +}; 5.122 + 5.123 -+static inline char *build_index(struct block_info *offsets, unsigned long n) 5.124 ++static inline char *build_index(struct block_info *offsets, unsigned long n, 5.125 ++ unsigned long block_size, unsigned global_flags) 5.126 +{ 5.127 + u_int32_t *ofs32 = (u_int32_t *) offsets; 5.128 + loff_t *ofs64 = (loff_t *) offsets; 5.129 -+ 5.130 ++ 5.131 ++ /* v3 64bits bug: v1 assumed */ 5.132 ++ unsigned long v3_64 = (n+1)/2; 5.133 ++ loff_t prev; 5.134 ++ 5.135 ++ if (ofs32[0] != 0 && ofs32[1] == 0) { 5.136 ++ for (prev=__le64_to_cpu(ofs64[v3_64]); 5.137 ++ v3_64 > 0 && __le64_to_cpu(ofs64[--v3_64]) < prev; 5.138 ++ prev=__le64_to_cpu(ofs64[v3_64])); 5.139 ++ } 5.140 ++ 5.141 + if (ofs32[0] == 0) { 5.142 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */ 5.143 + while (n--) { 5.144 + offsets[n].offset = __be64_to_cpu(offsets[n].offset); 5.145 + offsets[n].size = ntohl(offsets[n].size); 5.146 ++ offsets[n].flags = 0; 5.147 + } 5.148 + return (char *) "128BE accelerated knoppix 1.0"; 5.149 + } 5.150 -+ else { /* V2.0 */ 5.151 -+ loff_t last = __be64_to_cpu(ofs64[n - 1]); 5.152 -+ while (n--) { 5.153 ++ else { /* V2.0/V4.0 */ 5.154 ++ loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n])); 5.155 ++ u_int32_t flags; 5.156 ++ static char v4[11]; 5.157 ++ unsigned long i = n; 5.158 ++ 5.159 ++ for (flags = 0; n-- ;) { 5.160 ++ loff_t data = __be64_to_cpu(ofs64[n]); 5.161 ++ 5.162 + offsets[n].size = last - 5.163 -+ (offsets[n].offset = __be64_to_cpu(ofs64[n])); 5.164 ++ (offsets[n].offset = CLOOP_BLOCK_OFFSET(data)); 5.165 + last = offsets[n].offset; 5.166 ++ offsets[n].flags = CLOOP_BLOCK_FLAGS(data); 5.167 ++ flags |= 1 << offsets[n].flags; 5.168 + } 5.169 -+ return (char *) "64BE v2.0"; 5.170 ++ if (flags < 2) return (char *) "64BE v2.0"; 5.171 ++ while (i--) { 5.172 ++ if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) { 5.173 ++ offsets[i] = offsets[offsets[i].offset]; 5.174 ++ } 5.175 ++ } 5.176 ++ strcpy(v4, (char *) "64BE v4.0a"); 5.177 ++ v4[10] = 'a' + ((flags-1) & 0xF); // compressors used 5.178 ++ if (flags > 0x10) { // with links ? 5.179 ++ v4[10] += 'A' - 'a'; 5.180 ++ } 5.181 ++ return v4; 5.182 + } 5.183 + } 5.184 -+ else if (ofs32[1] == 0) { /* V1.0 */ 5.185 -+ loff_t last = __le64_to_cpu(ofs64[n - 1]); 5.186 ++ else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */ 5.187 ++ loff_t last = __le64_to_cpu(ofs64[n]); 5.188 + while (n--) { 5.189 + offsets[n].size = last - 5.190 + (offsets[n].offset = __le64_to_cpu(ofs64[n])); 5.191 + last = offsets[n].offset; 5.192 ++ offsets[n].flags = 0; 5.193 + } 5.194 + return (char *) "64LE v1.0"; 5.195 + } 5.196 -+ else if (ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */ 5.197 -+ loff_t last = ntohl(ofs32[n - 1]); 5.198 -+ while (n--) { 5.199 -+ offsets[n].size = last - 5.200 -+ (offsets[n].offset = ntohl(ofs32[n])); 5.201 -+ last = offsets[n].offset; 5.202 -+ } 5.203 -+ return (char *) "32BE v0.68"; 5.204 -+ } 5.205 -+ else { /* V3.0 */ 5.206 ++ else { /* V3.0 or V0.68 */ 5.207 + unsigned long i; 5.208 + loff_t j; 5.209 ++ static char v3[11]; 5.210 + 5.211 ++ for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++); 5.212 ++ if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */ 5.213 ++ loff_t last = ntohl(ofs32[n]); 5.214 ++ while (n--) { 5.215 ++ offsets[n].size = last - 5.216 ++ (offsets[n].offset = ntohl(ofs32[n])); 5.217 ++ last = offsets[n].offset; 5.218 ++ offsets[n].flags = 0; 5.219 ++ } 5.220 ++ return (char *) "32BE v0.68"; 5.221 ++ } 5.222 ++ 5.223 ++ v3_64 = (ofs32[1] == 0); 5.224 + for (i = n; i-- != 0; ) 5.225 -+ offsets[i].size = ntohl(ofs32[i]); 5.226 ++ offsets[i].size = ntohl(ofs32[i << v3_64]); 5.227 + for (i = 0, j = sizeof(struct cloop_head); i < n; i++) { 5.228 + offsets[i].offset = j; 5.229 ++ offsets[i].flags = global_flags; 5.230 ++ if (offsets[i].size == 0xFFFFFFFF) { 5.231 ++ offsets[i].flags = CLOOP_COMPRESSOR_NONE; 5.232 ++ offsets[i].size = block_size; 5.233 ++ } 5.234 ++ if ((offsets[i].size & 0x80000000) == 0) { 5.235 ++ j += offsets[i].size; 5.236 ++ } 5.237 ++ } 5.238 ++ for (i = 0; i < n; i++) { 5.239 + if (offsets[i].size & 0x80000000) { 5.240 -+ unsigned long k = offsets[i].size & 0x7FFFFFFF; 5.241 -+ offsets[i].offset = offsets[k].offset; 5.242 -+ offsets[i].size = offsets[k].size; 5.243 ++ offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF]; 5.244 + } 5.245 -+ else j += offsets[i].size; 5.246 + } 5.247 -+ return (char *) "32BE v3.0"; 5.248 ++ strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a"); 5.249 ++ v3[10] += global_flags; 5.250 ++ return v3; 5.251 + } 5.252 +} 5.253 + 5.254 @@ -83,187 +224,884 @@ 5.255 5.256 --- cloop.c 5.257 +++ cloop.c 5.258 -@@ -5,11 +5,18 @@ 5.259 - * A cloop file looks like this: 5.260 - * [32-bit uncompressed block size: network order] 5.261 - * [32-bit number of blocks (n_blocks): network order] 5.262 +@@ -1,26 +1,23 @@ 5.263 +-/* 5.264 +- * compressed_loop.c: Read-only compressed loop blockdevice 5.265 +- * hacked up by Rusty in 1999, extended and maintained by Klaus Knopper 5.266 +- * 5.267 +- * A cloop file looks like this: 5.268 +- * [32-bit uncompressed block size: network order] 5.269 +- * [32-bit number of blocks (n_blocks): network order] 5.270 - * [64-bit file offsets of start of blocks: network order] 5.271 -+ * [for version < 3] 5.272 -+ * [32-bit, 64-bit or 128-bit file offsets of start of blocks] 5.273 - * ... 5.274 - * (n_blocks + 1). 5.275 - * n_blocks consisting of: 5.276 - * [compressed block] 5.277 -+ * ... 5.278 -+ * [for version >= 3] 5.279 -+ * [compressed list of 32-bit block sizes] 5.280 -+ * [32-bit compressed index size: network order] 5.281 -+ * [32-bit index size = 4: network order] 5.282 -+ * [32-bit number of blocks (n_blocks): network order] 5.283 - * 5.284 - * Every version greatly inspired by code seen in loop.c 5.285 - * by Theodore Ts'o, 3/29/93. 5.286 -@@ -115,7 +122,7 @@ 5.287 +- * ... 5.288 +- * (n_blocks + 1). 5.289 +- * n_blocks consisting of: 5.290 +- * [compressed block] 5.291 +- * 5.292 +- * Every version greatly inspired by code seen in loop.c 5.293 +- * by Theodore Ts'o, 3/29/93. 5.294 +- * 5.295 +- * Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper. 5.296 +- * Redistribution of this file is permitted under the GNU Public License. 5.297 +- * 5.298 +- */ 5.299 ++/************************************************************************\ 5.300 ++* cloop.c: Read-only compressed loop blockdevice * 5.301 ++* hacked up by Rusty in 1999, extended and maintained by Klaus Knopper * 5.302 ++* * 5.303 ++* For all supported cloop file formats, please check the file "cloop.h" * 5.304 ++* New in Version 4: * 5.305 ++* - Header can be first or last in cloop file, * 5.306 ++* - Different compression algorithms supported (compression type * 5.307 ++* encoded in first 4 bytes of block offset address) * 5.308 ++* * 5.309 ++* Every version greatly inspired by code seen in loop.c * 5.310 ++* by Theodore Ts'o, 3/29/93. * 5.311 ++* * 5.312 ++* Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper. * 5.313 ++* Redistribution of this file is permitted under the GNU Public License * 5.314 ++* V2. * 5.315 ++\************************************************************************/ 5.316 + 5.317 + #define CLOOP_NAME "cloop" 5.318 +-#define CLOOP_VERSION "2.639" 5.319 ++#define CLOOP_VERSION "4.12" 5.320 + #define CLOOP_MAX 8 5.321 + 5.322 + #ifndef KBUILD_MODNAME 5.323 +@@ -47,8 +44,27 @@ 5.324 + #include <asm/div64.h> /* do_div() for 64bit division */ 5.325 + #include <asm/uaccess.h> 5.326 + #include <asm/byteorder.h> 5.327 +-/* Use zlib_inflate from lib/zlib_inflate */ 5.328 ++/* Check for ZLIB, LZO1X, LZ4 decompression algorithms in kernel. */ 5.329 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 5.330 + #include <linux/zutil.h> 5.331 ++#endif 5.332 ++#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE)) 5.333 ++#include <linux/lzo.h> 5.334 ++#endif 5.335 ++#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE)) 5.336 ++#include <linux/lz4.h> 5.337 ++#endif 5.338 ++#if (defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE)) 5.339 ++#include <linux/decompress/unlzma.h> 5.340 ++#endif 5.341 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 5.342 ++#include <linux/xz.h> 5.343 ++#endif 5.344 ++ 5.345 ++#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) || defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE) || defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE) || defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE) || defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))) 5.346 ++#error "No decompression library selected in kernel config!" 5.347 ++#endif 5.348 ++ 5.349 + #include <linux/loop.h> 5.350 + #include <linux/kthread.h> 5.351 + #include <linux/compat.h> 5.352 +@@ -92,47 +108,64 @@ 5.353 + #define DEBUGP(format, x...) 5.354 + #endif 5.355 + 5.356 ++/* Default size of buffer to keep some decompressed blocks in memory to speed up access */ 5.357 ++#define BLOCK_BUFFER_MEM (16*65536) 5.358 ++ 5.359 + /* One file can be opened at module insertion time */ 5.360 + /* insmod cloop file=/path/to/file */ 5.361 + static char *file=NULL; 5.362 + static unsigned int preload=0; 5.363 + static unsigned int cloop_max=CLOOP_MAX; 5.364 ++static unsigned int buffers=BLOCK_BUFFER_MEM; 5.365 + module_param(file, charp, 0); 5.366 + module_param(preload, uint, 0); 5.367 + module_param(cloop_max, uint, 0); 5.368 + MODULE_PARM_DESC(file, "Initial cloop image file (full path) for /dev/cloop"); 5.369 + MODULE_PARM_DESC(preload, "Preload n blocks of cloop data into memory"); 5.370 + MODULE_PARM_DESC(cloop_max, "Maximum number of cloop devices (default 8)"); 5.371 ++MODULE_PARM_DESC(buffers, "Size of buffer to keep uncompressed blocks in memory in MiB (default 1)"); 5.372 + 5.373 + static struct file *initial_file=NULL; 5.374 + static int cloop_major=MAJOR_NR; 5.375 + 5.376 +-/* Number of buffered decompressed blocks */ 5.377 +-#define BUFFERED_BLOCKS 8 5.378 + struct cloop_device 5.379 + { 5.380 +- /* Copied straight from the file */ 5.381 ++ /* Header filled from the file */ 5.382 struct cloop_head head; 5.383 ++ int header_first; 5.384 ++ int file_format; 5.385 5.386 - /* An array of offsets of compressed blocks within the file */ 5.387 +- /* An array of offsets of compressed blocks within the file */ 5.388 - loff_t *offsets; 5.389 -+ struct block_info *offsets; 5.390 ++ /* An or'd sum of all flags of each compressed block (v3) */ 5.391 ++ u_int32_t allflags; 5.392 ++ 5.393 ++ /* An array of cloop_ptr flags/offset for compressed blocks within the file */ 5.394 ++ struct block_info *block_ptrs; 5.395 5.396 /* We buffer some uncompressed blocks for performance */ 5.397 - int buffered_blocknum[BUFFERED_BLOCKS]; 5.398 -@@ -256,11 +263,11 @@ 5.399 - return i; 5.400 - } 5.401 +- int buffered_blocknum[BUFFERED_BLOCKS]; 5.402 +- int current_bufnum; 5.403 +- void *buffer[BUFFERED_BLOCKS]; 5.404 +- void *compressed_buffer; 5.405 +- size_t preload_array_size; /* Size of pointer array in blocks */ 5.406 +- size_t preload_size; /* Number of successfully allocated blocks */ 5.407 +- char **preload_cache; /* Pointers to preloaded blocks */ 5.408 ++ size_t num_buffered_blocks; /* how many uncompressed blocks buffered for performance */ 5.409 ++ int *buffered_blocknum; /* list of numbers of uncompressed blocks in buffer */ 5.410 ++ int current_bufnum; /* which block is current */ 5.411 ++ unsigned char **buffer; /* cache space for num_buffered_blocks uncompressed blocks */ 5.412 ++ void *compressed_buffer; /* space for the largest compressed block */ 5.413 ++ size_t preload_array_size; /* Size of pointer array in blocks */ 5.414 ++ size_t preload_size; /* Number of successfully allocated blocks */ 5.415 ++ char **preload_cache; /* Pointers to preloaded blocks */ 5.416 5.417 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 5.418 + z_stream zstream; 5.419 ++#endif 5.420 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 5.421 ++ struct xz_dec *xzdecoderstate; 5.422 ++ struct xz_buf xz_buffer; 5.423 ++#endif 5.424 + 5.425 + struct file *backing_file; /* associated file */ 5.426 + struct inode *backing_inode; /* for bmap */ 5.427 + 5.428 ++ unsigned char *underlying_filename; 5.429 + unsigned long largest_block; 5.430 + unsigned int underlying_blksize; 5.431 ++ loff_t underlying_total_size; 5.432 + int clo_number; 5.433 + int refcnt; 5.434 + struct block_device *bdev; 5.435 +@@ -147,7 +180,6 @@ 5.436 + struct request_queue *clo_queue; 5.437 + struct gendisk *clo_disk; 5.438 + int suspended; 5.439 +- char clo_file_name[LO_NAME_SIZE]; 5.440 + }; 5.441 + 5.442 + /* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers, 5.443 +@@ -156,52 +188,113 @@ 5.444 + static const char *cloop_name=CLOOP_NAME; 5.445 + static int cloop_count = 0; 5.446 + 5.447 +-#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))) /* Must be compiled into kernel. */ 5.448 +-#error "Invalid Kernel configuration. CONFIG_ZLIB_INFLATE support is needed for cloop." 5.449 +-#endif 5.450 +- 5.451 +-/* Use __get_free_pages instead of vmalloc, allows up to 32 pages, 5.452 +- * 2MB in one piece */ 5.453 + static void *cloop_malloc(size_t size) 5.454 + { 5.455 +- int order = get_order(size); 5.456 +- if(order <= KMALLOC_MAX_ORDER) 5.457 +- return (void *)kmalloc(size, GFP_KERNEL); 5.458 +- else if(order < MAX_ORDER) 5.459 +- return (void *)__get_free_pages(GFP_KERNEL, order); 5.460 ++ /* kmalloc will fail after the system is running for a while, */ 5.461 ++ /* when large orders can't return contiguous memory. */ 5.462 ++ /* Let's just use vmalloc for now. :-/ */ 5.463 ++ /* int order = get_order(size); */ 5.464 ++ /* if(order <= KMALLOC_MAX_ORDER) */ 5.465 ++ /* return (void *)kmalloc(size, GFP_KERNEL); */ 5.466 ++ /* else if(order < MAX_ORDER) */ 5.467 ++ /* return (void *)__get_free_pages(GFP_KERNEL, order); */ 5.468 + return (void *)vmalloc(size); 5.469 + } 5.470 + 5.471 + static void cloop_free(void *mem, size_t size) 5.472 + { 5.473 +- int order = get_order(size); 5.474 +- if(order <= KMALLOC_MAX_ORDER) 5.475 +- kfree(mem); 5.476 +- else if(order < MAX_ORDER) 5.477 +- free_pages((unsigned long)mem, order); 5.478 +- else vfree(mem); 5.479 ++ /* int order = get_order(size); */ 5.480 ++ /* if(order <= KMALLOC_MAX_ORDER) */ 5.481 ++ /* kfree(mem); */ 5.482 ++ /* else if(order < MAX_ORDER) */ 5.483 ++ /* free_pages((unsigned long)mem, order); */ 5.484 ++ /* else */ 5.485 ++ vfree(mem); 5.486 + } 5.487 + 5.488 +-static int uncompress(struct cloop_device *clo, 5.489 +- unsigned char *dest, unsigned long *destLen, 5.490 +- unsigned char *source, unsigned long sourceLen) 5.491 ++static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags) 5.492 + { 5.493 +- /* Most of this code can be found in fs/cramfs/uncompress.c */ 5.494 +- int err; 5.495 +- clo->zstream.next_in = source; 5.496 +- clo->zstream.avail_in = sourceLen; 5.497 +- clo->zstream.next_out = dest; 5.498 +- clo->zstream.avail_out = *destLen; 5.499 +- err = zlib_inflateReset(&clo->zstream); 5.500 +- if (err != Z_OK) 5.501 +- { 5.502 +- printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err); 5.503 +- zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream); 5.504 +- } 5.505 +- err = zlib_inflate(&clo->zstream, Z_FINISH); 5.506 +- *destLen = clo->zstream.total_out; 5.507 +- if (err != Z_STREAM_END) return err; 5.508 +- return Z_OK; 5.509 ++ int err = -1; 5.510 ++ switch(flags) 5.511 ++ { 5.512 ++ case CLOOP_COMPRESSOR_NONE: 5.513 ++ memcpy(dest, source, *destLen = sourceLen); 5.514 ++ err = Z_OK; 5.515 ++ break; 5.516 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 5.517 ++ case CLOOP_COMPRESSOR_ZLIB: 5.518 ++ clo->zstream.next_in = source; 5.519 ++ clo->zstream.avail_in = sourceLen; 5.520 ++ clo->zstream.next_out = dest; 5.521 ++ clo->zstream.avail_out = *destLen; 5.522 ++ err = zlib_inflateReset(&clo->zstream); 5.523 ++ if (err != Z_OK) 5.524 ++ { 5.525 ++ printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err); 5.526 ++ zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream); 5.527 ++ } 5.528 ++ err = zlib_inflate(&clo->zstream, Z_FINISH); 5.529 ++ *destLen = clo->zstream.total_out; 5.530 ++ if (err == Z_STREAM_END) err = 0; 5.531 ++ DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen); 5.532 ++ break; 5.533 ++#endif 5.534 ++#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE)) 5.535 ++ case CLOOP_COMPRESSOR_LZO1X: 5.536 ++ { 5.537 ++ size_t tmp = (size_t) clo->head.block_size; 5.538 ++ err = lzo1x_decompress_safe(source, sourceLen, 5.539 ++ dest, &tmp); 5.540 ++ if (err == LZO_E_OK) *destLen = (u_int32_t) tmp; 5.541 ++ } 5.542 ++ break; 5.543 ++#endif 5.544 ++#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE)) 5.545 ++ case CLOOP_COMPRESSOR_LZ4: 5.546 ++ { 5.547 ++ size_t outputSize = *destLen; 5.548 ++ /* We should adjust outputSize here, in case the last block is smaller than block_size */ 5.549 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */ 5.550 ++ err = lz4_decompress(source, (size_t *) &sourceLen, 5.551 ++ dest, outputSize); 5.552 ++#else 5.553 ++ err = LZ4_decompress_safe(source, 5.554 ++ dest, 5.555 ++ sourceLen, outputSize); 5.556 ++#endif 5.557 ++ if (err >= 0) 5.558 ++ { 5.559 ++ err = 0; 5.560 ++ *destLen = outputSize; 5.561 ++ } 5.562 ++ } 5.563 ++ break; 5.564 ++#endif 5.565 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 5.566 ++ case CLOOP_COMPRESSOR_XZ: 5.567 ++ clo->xz_buffer.in = source; 5.568 ++ clo->xz_buffer.in_pos = 0; 5.569 ++ clo->xz_buffer.in_size = sourceLen; 5.570 ++ clo->xz_buffer.out = dest; 5.571 ++ clo->xz_buffer.out_pos = 0; 5.572 ++ clo->xz_buffer.out_size = *destLen; 5.573 ++ xz_dec_reset(clo->xzdecoderstate); 5.574 ++ err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer); 5.575 ++ if (err == XZ_STREAM_END || err == XZ_OK) 5.576 ++ { 5.577 ++ err = 0; 5.578 ++ } 5.579 ++ else 5.580 ++ { 5.581 ++ printk(KERN_ERR "%s: xz_dec_run error %d\n", cloop_name, err); 5.582 ++ err = 1; 5.583 ++ } 5.584 ++ break; 5.585 ++#endif 5.586 ++ default: 5.587 ++ printk(KERN_ERR "%s: compression method is not supported!\n", cloop_name); 5.588 ++ } 5.589 ++ return err; 5.590 + } 5.591 + 5.592 + static ssize_t cloop_read_from_file(struct cloop_device *clo, struct file *f, char *buf, 5.593 +@@ -220,7 +313,7 @@ 5.594 + 5.595 + if(size_read <= 0) 5.596 + { 5.597 +- printk(KERN_ERR "%s: Read error %d at pos %Lu in file %s, " 5.598 ++ printk(KERN_ERR "%s: Read error %d at pos %llu in file %s, " 5.599 + "%d bytes lost.\n", cloop_name, (int)size_read, pos, 5.600 + file, (int)size); 5.601 + memset(buf + buf_len - size, 0, size); 5.602 +@@ -232,72 +325,84 @@ 5.603 + } 5.604 + 5.605 + /* This looks more complicated than it is */ 5.606 +-/* Returns number of block buffer to use for this request */ 5.607 ++/* Returns number of cache block buffer to use for this request */ 5.608 + static int cloop_load_buffer(struct cloop_device *clo, int blocknum) 5.609 + { 5.610 +- unsigned int buf_done = 0; 5.611 +- unsigned long buflen; 5.612 +- unsigned int buf_length; 5.613 ++ loff_t compressed_block_offset; 5.614 ++ long compressed_block_len; 5.615 ++ long uncompressed_block_len=0; 5.616 + int ret; 5.617 + int i; 5.618 +- if(blocknum > ntohl(clo->head.num_blocks) || blocknum < 0) 5.619 +- { 5.620 +- printk(KERN_WARNING "%s: Invalid block number %d requested.\n", 5.621 +- cloop_name, blocknum); 5.622 +- return -1; 5.623 +- } 5.624 ++ if(blocknum > clo->head.num_blocks || blocknum < 0) 5.625 ++ { 5.626 ++ printk(KERN_WARNING "%s: Invalid block number %d requested.\n", 5.627 ++ cloop_name, blocknum); 5.628 ++ return -1; 5.629 ++ } 5.630 + 5.631 + /* Quick return if the block we seek is already in one of the buffers. */ 5.632 + /* Return number of buffer */ 5.633 +- for(i=0; i<BUFFERED_BLOCKS; i++) 5.634 ++ for(i=0; i<clo->num_buffered_blocks; i++) 5.635 + if (blocknum == clo->buffered_blocknum[i]) 5.636 +- { 5.637 +- DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i); 5.638 +- return i; 5.639 +- } 5.640 +- 5.641 - buf_length = be64_to_cpu(clo->offsets[blocknum+1]) - be64_to_cpu(clo->offsets[blocknum]); 5.642 -+ buf_length = clo->offsets[blocknum].size; 5.643 +- 5.644 +-/* Load one compressed block from the file. */ 5.645 +- cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 5.646 +- be64_to_cpu(clo->offsets[blocknum]), buf_length); 5.647 ++ { 5.648 ++ DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i); 5.649 ++ return i; 5.650 ++ } 5.651 5.652 - /* Load one compressed block from the file. */ 5.653 - cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 5.654 -- be64_to_cpu(clo->offsets[blocknum]), buf_length); 5.655 -+ clo->offsets[blocknum].offset, buf_length); 5.656 +- buflen = ntohl(clo->head.block_size); 5.657 ++ compressed_block_offset = clo->block_ptrs[blocknum].offset; 5.658 ++ compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ; 5.659 5.660 - buflen = ntohl(clo->head.block_size); 5.661 +- /* Go to next position in the block ring buffer */ 5.662 +- clo->current_bufnum++; 5.663 +- if(clo->current_bufnum >= BUFFERED_BLOCKS) clo->current_bufnum = 0; 5.664 ++ /* Load one compressed block from the file. */ 5.665 ++ if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */ 5.666 ++ { 5.667 ++ size_t n = cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer, 5.668 ++ compressed_block_offset, compressed_block_len); 5.669 ++ if (n!= compressed_block_len) 5.670 ++ { 5.671 ++ printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n", 5.672 ++ cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename); 5.673 ++ /* return -1; */ 5.674 ++ } 5.675 ++ } else { 5.676 ++ printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n", 5.677 ++ cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename); 5.678 ++ return -1; 5.679 ++ } 5.680 ++ 5.681 ++ /* Go to next position in the cache block buffer (which is used as a cyclic buffer) */ 5.682 ++ if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0; 5.683 5.684 -@@ -275,9 +282,9 @@ 5.685 + /* Do the uncompression */ 5.686 +- ret = uncompress(clo, clo->buffer[clo->current_bufnum], &buflen, clo->compressed_buffer, 5.687 +- buf_length); 5.688 ++ uncompressed_block_len = clo->head.block_size; 5.689 ++ ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len, 5.690 ++ clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags); 5.691 + /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */ 5.692 if (ret != 0) 5.693 +- { 5.694 +- printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u " 5.695 +- "%Lu-%Lu\n", cloop_name, ret, blocknum, 5.696 +- ntohl(clo->head.block_size), buflen, buf_length, buf_done, 5.697 +- be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1])); 5.698 +- clo->buffered_blocknum[clo->current_bufnum] = -1; 5.699 +- return -1; 5.700 +- } 5.701 ++ { 5.702 ++ printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n", 5.703 ++ cloop_name, ret, blocknum, 5.704 ++ compressed_block_len, clo->block_ptrs[blocknum].offset, 5.705 ++ clo->block_ptrs[blocknum].flags); 5.706 ++ clo->buffered_blocknum[clo->current_bufnum] = -1; 5.707 ++ return -1; 5.708 ++ } 5.709 + clo->buffered_blocknum[clo->current_bufnum] = blocknum; 5.710 + return clo->current_bufnum; 5.711 + } 5.712 + 5.713 + /* This function does all the real work. */ 5.714 +-/* returns "uptodate" */ 5.715 ++/* returns "uptodate" */ 5.716 + static int cloop_handle_request(struct cloop_device *clo, struct request *req) 5.717 + { 5.718 + int buffered_blocknum = -1; 5.719 + int preloaded = 0; 5.720 + loff_t offset = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */ 5.721 +- struct bio_vec *bvec; 5.722 ++ struct bio_vec bvec; 5.723 + struct req_iterator iter; 5.724 + rq_for_each_segment(bvec, req, iter) 5.725 { 5.726 - printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u " 5.727 -- "%Lu-%Lu\n", cloop_name, ret, blocknum, 5.728 -+ "%Lu:%u\n", cloop_name, ret, blocknum, 5.729 - ntohl(clo->head.block_size), buflen, buf_length, buf_done, 5.730 -- be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1])); 5.731 -+ clo->offsets[blocknum].offset, clo->offsets[blocknum].size); 5.732 - clo->buffered_blocknum[clo->current_bufnum] = -1; 5.733 - return -1; 5.734 +- unsigned long len = bvec->bv_len; 5.735 +- char *to_ptr = kmap(bvec->bv_page) + bvec->bv_offset; 5.736 ++ unsigned long len = bvec.bv_len; 5.737 ++ char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset; 5.738 + while(len > 0) 5.739 + { 5.740 + u_int32_t length_in_buffer; 5.741 +@@ -308,7 +413,7 @@ 5.742 + /* puts the result in the first argument, i.e. block_offset */ 5.743 + /* becomes the blocknumber to load, and offset_in_buffer the */ 5.744 + /* position in the buffer */ 5.745 +- offset_in_buffer = do_div(block_offset, ntohl(clo->head.block_size)); 5.746 ++ offset_in_buffer = do_div(block_offset, clo->head.block_size); 5.747 + /* Lookup preload cache */ 5.748 + if(block_offset < clo->preload_size && clo->preload_cache != NULL && 5.749 + clo->preload_cache[block_offset] != NULL) 5.750 +@@ -325,7 +430,7 @@ 5.751 + from_ptr = clo->buffer[buffered_blocknum]; 5.752 + } 5.753 + /* Now, at least part of what we want will be in the buffer. */ 5.754 +- length_in_buffer = ntohl(clo->head.block_size) - offset_in_buffer; 5.755 ++ length_in_buffer = clo->head.block_size - offset_in_buffer; 5.756 + if(length_in_buffer > len) 5.757 + { 5.758 + /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", 5.759 +@@ -337,18 +442,19 @@ 5.760 + len -= length_in_buffer; 5.761 + offset += length_in_buffer; 5.762 + } /* while inner loop */ 5.763 +- kunmap(bvec->bv_page); 5.764 ++ kunmap(bvec.bv_page); 5.765 ++ cond_resched(); 5.766 + } /* end rq_for_each_segment*/ 5.767 + return ((buffered_blocknum!=-1) || preloaded); 5.768 + } 5.769 + 5.770 + /* Adopted from loop.c, a kernel thread to handle physical reads and 5.771 +- * decompression. */ 5.772 ++ decompression. */ 5.773 + static int cloop_thread(void *data) 5.774 + { 5.775 + struct cloop_device *clo = data; 5.776 + current->flags |= PF_NOFREEZE; 5.777 +- set_user_nice(current, -15); 5.778 ++ set_user_nice(current, 10); 5.779 + while (!kthread_should_stop()||!list_empty(&clo->clo_list)) 5.780 + { 5.781 + int err; 5.782 +@@ -390,10 +496,18 @@ 5.783 + int rw; 5.784 + /* quick sanity checks */ 5.785 + /* blk_fs_request() was removed in 2.6.36 */ 5.786 +- if (unlikely(req == NULL || (req->cmd_type != REQ_TYPE_FS))) 5.787 ++ if (unlikely(req == NULL 5.788 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */ 5.789 ++ || (req->cmd_type != REQ_TYPE_FS) 5.790 ++#endif 5.791 ++ )) 5.792 + goto error_continue; 5.793 + rw = rq_data_dir(req); 5.794 +- if (unlikely(rw != READ && rw != READA)) 5.795 ++ if (unlikely(rw != READ 5.796 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) 5.797 ++ && rw != READA 5.798 ++#endif 5.799 ++ )) 5.800 + { 5.801 + DEBUGP("cloop_do_request: bad command\n"); 5.802 + goto error_continue; 5.803 +@@ -409,40 +523,51 @@ 5.804 + continue; /* next request */ 5.805 + error_continue: 5.806 + DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req); 5.807 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 5.808 + req->errors++; 5.809 ++#else 5.810 ++ req->error_count++; 5.811 ++#endif 5.812 + __blk_end_request_all(req, -EIO); 5.813 } 5.814 -@@ -489,30 +496,73 @@ 5.815 - cloop_name, ntohl(clo->head.block_size)); 5.816 - error=-EBADF; goto error_release; 5.817 - } 5.818 + } 5.819 + 5.820 +-/* Read header and offsets from already opened file */ 5.821 +-static int cloop_set_file(int cloop_num, struct file *file, char *filename) 5.822 ++/* Read header, flags and offsets from already opened file */ 5.823 ++static int cloop_set_file(int cloop_num, struct file *file) 5.824 + { 5.825 + struct cloop_device *clo = cloop_dev[cloop_num]; 5.826 + struct inode *inode; 5.827 + char *bbuf=NULL; 5.828 +- unsigned int i, offsets_read, total_offsets; 5.829 +- int isblkdev; 5.830 +- int error = 0; 5.831 ++ unsigned int bbuf_size = 0; 5.832 ++ const unsigned int header_size = sizeof(struct cloop_head); 5.833 ++ unsigned int i, total_offsets=0; 5.834 ++ loff_t fs_read_position = 0, header_pos[2]; 5.835 ++ int flags, isblkdev, bytes_read, error = 0; 5.836 ++ if (clo->suspended) return error; 5.837 ++ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) 5.838 + inode = file->f_dentry->d_inode; 5.839 ++ clo->underlying_filename = kstrdup(file->f_dentry->d_name.name ? file->f_dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL); 5.840 ++ #else 5.841 ++ inode = file->f_path.dentry->d_inode; 5.842 ++ clo->underlying_filename = kstrdup(file->f_path.dentry->d_name.name ? file->f_path.dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL); 5.843 ++ #endif 5.844 + isblkdev=S_ISBLK(inode->i_mode)?1:0; 5.845 + if(!isblkdev&&!S_ISREG(inode->i_mode)) 5.846 + { 5.847 + printk(KERN_ERR "%s: %s not a regular file or block device\n", 5.848 +- cloop_name, filename); 5.849 ++ cloop_name, clo->underlying_filename); 5.850 + error=-EBADF; goto error_release; 5.851 + } 5.852 + clo->backing_file = file; 5.853 + clo->backing_inode= inode ; 5.854 +- if(!isblkdev&&inode->i_size<sizeof(struct cloop_head)) 5.855 ++ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size; 5.856 ++ if(clo->underlying_total_size < header_size) 5.857 + { 5.858 +- printk(KERN_ERR "%s: %lu bytes (must be >= %u bytes)\n", 5.859 +- cloop_name, (unsigned long)inode->i_size, 5.860 +- (unsigned)sizeof(struct cloop_head)); 5.861 ++ printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n", 5.862 ++ cloop_name, clo->underlying_total_size, 5.863 ++ (unsigned int)header_size); 5.864 + error=-EBADF; goto error_release; 5.865 + } 5.866 +- /* In suspended mode, we have done all checks necessary - FF */ 5.867 +- if (clo->suspended) 5.868 +- return error; 5.869 + if(isblkdev) 5.870 + { 5.871 + struct request_queue *q = bdev_get_queue(inode->i_bdev); 5.872 +@@ -451,104 +576,225 @@ 5.873 + /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */ 5.874 + blk_queue_max_segment_size(clo->clo_queue, queue_max_segment_size(q)); 5.875 + blk_queue_segment_boundary(clo->clo_queue, queue_segment_boundary(q)); 5.876 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) 5.877 + blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn); 5.878 ++#endif 5.879 + clo->underlying_blksize = block_size(inode->i_bdev); 5.880 + } 5.881 + else 5.882 + clo->underlying_blksize = PAGE_SIZE; 5.883 +- DEBUGP("Underlying blocksize is %u\n", clo->underlying_blksize); 5.884 +- bbuf = cloop_malloc(clo->underlying_blksize); 5.885 ++ 5.886 ++ DEBUGP(KERN_INFO "Underlying blocksize of %s is %u\n", clo->underlying_filename, clo->underlying_blksize); 5.887 ++ DEBUGP(KERN_INFO "Underlying total size of %s is %llu\n", clo->underlying_filename, clo->underlying_total_size); 5.888 ++ 5.889 ++ /* clo->underlying_blksize should be larger than header_size, even if it's only PAGE_SIZE */ 5.890 ++ bbuf_size = clo->underlying_blksize; 5.891 ++ bbuf = cloop_malloc(bbuf_size); 5.892 + if(!bbuf) 5.893 + { 5.894 +- printk(KERN_ERR "%s: out of kernel mem for block buffer (%lu bytes)\n", 5.895 +- cloop_name, (unsigned long)clo->underlying_blksize); 5.896 ++ printk(KERN_ERR "%s: out of kernel mem for buffer (%u bytes)\n", 5.897 ++ cloop_name, (unsigned int) bbuf_size); 5.898 ++ error=-ENOMEM; goto error_release; 5.899 ++ } 5.900 ++ 5.901 ++ header_pos[0] = 0; /* header first */ 5.902 ++ header_pos[1] = clo->underlying_total_size - sizeof(struct cloop_head); /* header last */ 5.903 ++ for(i=0; i<2; i++) 5.904 ++ { 5.905 ++ /* Check for header */ 5.906 ++ size_t bytes_readable = MIN(clo->underlying_blksize, clo->underlying_total_size - header_pos[i]); 5.907 ++ size_t bytes_read = cloop_read_from_file(clo, file, bbuf, header_pos[i], bytes_readable); 5.908 ++ if(bytes_read != bytes_readable) 5.909 ++ { 5.910 ++ printk(KERN_ERR "%s: Bad file %s, read() of %s %u bytes returned %d.\n", 5.911 ++ cloop_name, clo->underlying_filename, (i==0)?"first":"last", 5.912 ++ (unsigned int)header_size, (int)bytes_read); 5.913 ++ error=-EBADF; 5.914 ++ goto error_release; 5.915 ++ } 5.916 ++ memcpy(&clo->head, bbuf, header_size); 5.917 ++ if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0) 5.918 ++ { 5.919 ++ clo->file_format++; 5.920 ++ clo->head.block_size=ntohl(clo->head.block_size); 5.921 ++ clo->head.num_blocks=ntohl(clo->head.num_blocks); 5.922 ++ clo->header_first = (i==0) ? 1 : 0; 5.923 ++ printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last"); 5.924 ++ break; 5.925 ++ } 5.926 ++ } 5.927 ++ if (clo->file_format == 0) 5.928 ++ { 5.929 ++ printk(KERN_ERR "%s: Cannot detect %s format.\n", 5.930 ++ cloop_name, cloop_name); 5.931 ++ error=-EBADF; goto error_release; 5.932 ++ } 5.933 ++ if (clo->head.block_size % 512 != 0) 5.934 ++ { 5.935 ++ printk(KERN_ERR "%s: blocksize %u not multiple of 512\n", 5.936 ++ cloop_name, clo->head.block_size); 5.937 ++ error=-EBADF; goto error_release; 5.938 ++ } 5.939 ++ total_offsets=clo->head.num_blocks; 5.940 ++ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)* 5.941 ++ total_offsets > inode->i_size)) 5.942 ++ { 5.943 ++ printk(KERN_ERR "%s: file %s too small for %u blocks\n", 5.944 ++ cloop_name, clo->underlying_filename, clo->head.num_blocks); 5.945 ++ error=-EBADF; goto error_release; 5.946 ++ } 5.947 ++ /* Allocate Memory for decompressors */ 5.948 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 5.949 ++ clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 5.950 ++ if(!clo->zstream.workspace) 5.951 ++ { 5.952 ++ printk(KERN_ERR "%s: out of mem for zlib working area %u\n", 5.953 ++ cloop_name, zlib_inflate_workspacesize()); 5.954 + error=-ENOMEM; goto error_release; 5.955 + } 5.956 +- total_offsets = 1; /* Dummy total_offsets: will be filled in first time around */ 5.957 +- for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++) 5.958 ++ zlib_inflateInit(&clo->zstream); 5.959 ++#endif 5.960 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 5.961 ++#if XZ_INTERNAL_CRC32 5.962 ++ /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */ 5.963 ++ xz_crc32_init(void); 5.964 ++#endif 5.965 ++ clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0); 5.966 ++#endif 5.967 ++ if (total_offsets + 1 == 0) /* Version 3 */ 5.968 + { 5.969 +- unsigned int offset = 0, num_readable; 5.970 +- size_t bytes_read = cloop_read_from_file(clo, file, bbuf, 5.971 +- i*clo->underlying_blksize, 5.972 +- clo->underlying_blksize); 5.973 +- if(bytes_read != clo->underlying_blksize) 5.974 ++ struct cloop_tail tail; 5.975 ++ if (isblkdev) 5.976 + { 5.977 +- printk(KERN_ERR "%s: Bad file, read() of first %lu bytes returned %d.\n", 5.978 +- cloop_name, (unsigned long)clo->underlying_blksize, (int)bytes_read); 5.979 +- error=-EBADF; 5.980 +- goto error_release; 5.981 ++ /* No end of file: can't find index */ 5.982 ++ printk(KERN_ERR "%s: no V3 support for block device\n", 5.983 ++ cloop_name); 5.984 ++ error=-EBADF; goto error_release; 5.985 + } 5.986 +- /* Header will be in block zero */ 5.987 +- if(i==0) 5.988 ++ bytes_read = cloop_read_from_file(clo, file, (void *) &tail, 5.989 ++ inode->i_size - sizeof(struct cloop_tail), 5.990 ++ sizeof(struct cloop_tail)); 5.991 ++ if (bytes_read == sizeof(struct cloop_tail)) 5.992 + { 5.993 +- memcpy(&clo->head, bbuf, sizeof(struct cloop_head)); 5.994 +- offset = sizeof(struct cloop_head); 5.995 +- if (ntohl(clo->head.block_size) % 512 != 0) 5.996 ++ unsigned long len, zlen; 5.997 ++ int ret; 5.998 ++ void *zbuf; 5.999 ++ clo->head.num_blocks = ntohl(tail.num_blocks); 5.1000 ++ total_offsets = clo->head.num_blocks; 5.1001 ++ clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets); 5.1002 ++ zlen = ntohl(tail.table_size); 5.1003 ++ zbuf = cloop_malloc(zlen); 5.1004 ++ if (!clo->block_ptrs || !zbuf) 5.1005 + { 5.1006 +- printk(KERN_ERR "%s: blocksize %u not multiple of 512\n", 5.1007 +- cloop_name, ntohl(clo->head.block_size)); 5.1008 +- error=-EBADF; goto error_release; 5.1009 +- } 5.1010 - if (clo->head.preamble[0x0B]!='V'||clo->head.preamble[0x0C]<'1') 5.1011 - { 5.1012 - printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, " 5.1013 - "please use an older version of %s for this file.\n", 5.1014 - cloop_name, cloop_name); 5.1015 - error=-EBADF; goto error_release; 5.1016 -- } 5.1017 ++ printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name); 5.1018 ++ error=-ENOMEM; goto error_release; 5.1019 + } 5.1020 - if (clo->head.preamble[0x0C]<'2') 5.1021 -- { 5.1022 ++ bytes_read = cloop_read_from_file(clo, file, zbuf, 5.1023 ++ inode->i_size - zlen - sizeof(struct cloop_tail), 5.1024 ++ zlen); 5.1025 ++ if (bytes_read != zlen) 5.1026 + { 5.1027 - printk(KERN_ERR "%s: Cannot read old architecture-dependent " 5.1028 - "(format <= 1.0) images, please use an older " 5.1029 - "version of %s for this file.\n", 5.1030 - cloop_name, cloop_name); 5.1031 -- error=-EBADF; goto error_release; 5.1032 -- } 5.1033 ++ printk(KERN_ERR "%s: can't read index\n", cloop_name); 5.1034 + error=-EBADF; goto error_release; 5.1035 + } 5.1036 - total_offsets=ntohl(clo->head.num_blocks)+1; 5.1037 - if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)* 5.1038 -+ total_offsets=ntohl(clo->head.num_blocks); 5.1039 -+ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)* 5.1040 - total_offsets > inode->i_size)) 5.1041 +- total_offsets > inode->i_size)) 5.1042 ++ len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets; 5.1043 ++ flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size)); 5.1044 ++// May 3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0) 5.1045 ++printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 5.1046 ++ clo, clo->block_ptrs, len, &len, zbuf, zlen, flags); 5.1047 ++ ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags); 5.1048 ++// May 3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at (null) 5.1049 ++printk(KERN_INFO "%s: uncompressed !\n", cloop_name); 5.1050 ++ cloop_free(zbuf, zlen); 5.1051 ++ if (ret != 0) 5.1052 { 5.1053 - printk(KERN_ERR "%s: file too small for %u blocks\n", 5.1054 - cloop_name, ntohl(clo->head.num_blocks)); 5.1055 +- printk(KERN_ERR "%s: file too small for %u blocks\n", 5.1056 +- cloop_name, ntohl(clo->head.num_blocks)); 5.1057 ++ printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n", 5.1058 ++ cloop_name, ret, flags); 5.1059 error=-EBADF; goto error_release; 5.1060 } 5.1061 - clo->offsets = cloop_malloc(sizeof(loff_t) * total_offsets); 5.1062 -+ if (total_offsets + 1 == 0) /* Version >= 3.0 */ 5.1063 -+ { 5.1064 -+ struct cloop_tail tail; 5.1065 -+ if(isblkdev) 5.1066 -+ { 5.1067 -+ /* No end of file: can't find index */ 5.1068 -+ printk(KERN_ERR "%s: no V3 support for block device\n", 5.1069 -+ cloop_name); 5.1070 -+ error=-EBADF; goto error_release; 5.1071 -+ } 5.1072 -+ bytes_read = cloop_read_from_file(clo, file, (void *) &tail, 5.1073 -+ inode->i_size - sizeof(struct cloop_tail), 5.1074 -+ sizeof(struct cloop_tail)); 5.1075 -+ if(bytes_read == sizeof(struct cloop_tail)) 5.1076 -+ { 5.1077 -+ unsigned long len, zlen; 5.1078 -+ void *zbuf; 5.1079 -+ clo->head.num_blocks = tail.num_blocks; 5.1080 -+ total_offsets = ntohl(clo->head.num_blocks); 5.1081 -+ clo->offsets = cloop_malloc(sizeof(struct block_info) * total_offsets); 5.1082 -+ if (!clo->offsets) 5.1083 -+ { 5.1084 -+ printk(KERN_ERR "%s: can't alloc index\n", 5.1085 -+ cloop_name); 5.1086 -+ error=-EBADF; goto error_release; 5.1087 -+ } 5.1088 -+ zbuf = &clo->offsets[total_offsets/2]; 5.1089 -+ zlen = ntohl(tail.table_size); 5.1090 -+ len = ntohl(tail.index_size) * total_offsets; 5.1091 -+ bytes_read = cloop_read_from_file(clo, file, zbuf, 5.1092 -+ inode->i_size - zlen - sizeof(struct cloop_tail), 5.1093 -+ zlen); 5.1094 -+ if (bytes_read != zlen) 5.1095 -+ { 5.1096 -+ printk(KERN_ERR "%s: can't read index\n", 5.1097 -+ cloop_name); 5.1098 -+ error=-EBADF; goto error_release; 5.1099 -+ } 5.1100 -+ clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 5.1101 -+ if(!clo->zstream.workspace) 5.1102 -+ { 5.1103 -+ printk(KERN_ERR "%s: can't alloc index workspace\n", 5.1104 -+ cloop_name); 5.1105 -+ error=-EBADF; goto error_release; 5.1106 -+ } 5.1107 -+ zlib_inflateInit(&clo->zstream); 5.1108 -+ uncompress(clo, (void *) clo->offsets, &len, zbuf, zlen); 5.1109 -+ cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); 5.1110 -+ clo->zstream.workspace = NULL; 5.1111 -+ break; 5.1112 -+ } 5.1113 -+ else 5.1114 -+ { 5.1115 -+ printk(KERN_ERR "%s: can't find index\n", 5.1116 -+ cloop_name); 5.1117 -+ error=-EBADF; goto error_release; 5.1118 -+ } 5.1119 -+ } 5.1120 -+ clo->offsets = cloop_malloc(sizeof(struct block_info) * total_offsets); 5.1121 - if (!clo->offsets) 5.1122 - { 5.1123 - printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 5.1124 -@@ -521,19 +571,22 @@ 5.1125 +- if (!clo->offsets) 5.1126 +- { 5.1127 +- printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 5.1128 +- error=-ENOMEM; goto error_release; 5.1129 +- } 5.1130 } 5.1131 - num_readable = MIN(total_offsets - offsets_read, 5.1132 - (clo->underlying_blksize - offset) 5.1133 +- num_readable = MIN(total_offsets - offsets_read, 5.1134 +- (clo->underlying_blksize - offset) 5.1135 - / sizeof(loff_t)); 5.1136 - memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(loff_t)); 5.1137 -+ / sizeof(struct block_info)); 5.1138 -+ memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(struct block_info)); 5.1139 - offsets_read += num_readable; 5.1140 - } 5.1141 - { /* Search for largest block rather than estimate. KK. */ 5.1142 - int i; 5.1143 +- offsets_read += num_readable; 5.1144 +- } 5.1145 +- { /* Search for largest block rather than estimate. KK. */ 5.1146 +- int i; 5.1147 - for(i=0;i<total_offsets-1;i++) 5.1148 -+ char *version = build_index(clo->offsets, ntohl(clo->head.num_blocks)); 5.1149 -+ for(i=0,clo->largest_block=0;i<total_offsets;i++) 5.1150 ++ else 5.1151 ++ { 5.1152 ++ printk(KERN_ERR "%s: can't find index\n", cloop_name); 5.1153 ++ error=-ENOMEM; goto error_release; 5.1154 ++ } 5.1155 ++ } 5.1156 ++ else 5.1157 ++ { 5.1158 ++ unsigned int n, total_bytes; 5.1159 ++ flags = 0; 5.1160 ++ clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets); 5.1161 ++ if (!clo->block_ptrs) 5.1162 ++ { 5.1163 ++ printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name); 5.1164 ++ error=-ENOMEM; goto error_release; 5.1165 ++ } 5.1166 ++ /* Read them offsets! */ 5.1167 ++ if(clo->header_first) 5.1168 ++ { 5.1169 ++ total_bytes = total_offsets * sizeof(struct block_info); 5.1170 ++ fs_read_position = sizeof(struct cloop_head); 5.1171 ++ } 5.1172 ++ else 5.1173 { 5.1174 - loff_t d=be64_to_cpu(clo->offsets[i+1]) - be64_to_cpu(clo->offsets[i]); 5.1175 - clo->largest_block=MAX(clo->largest_block,d); 5.1176 -+ clo->largest_block=MAX(clo->largest_block,clo->offsets[i].size); 5.1177 ++ total_bytes = total_offsets * sizeof(loff_t); 5.1178 ++ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 5.1179 ++ } 5.1180 ++ for(n=0;n<total_bytes;) 5.1181 ++ { 5.1182 ++ size_t bytes_readable; 5.1183 ++ bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position); 5.1184 ++ if(bytes_readable <= 0) break; /* Done */ 5.1185 ++ bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable); 5.1186 ++ if(bytes_read != bytes_readable) 5.1187 ++ { 5.1188 ++ printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n", 5.1189 ++ cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read); 5.1190 ++ error=-EBADF; 5.1191 ++ goto error_release; 5.1192 ++ } 5.1193 ++ memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read); 5.1194 ++ /* remember where to read the next blk from file */ 5.1195 ++ fs_read_position += bytes_read; 5.1196 ++ n += bytes_read; 5.1197 } 5.1198 - printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n", 5.1199 - cloop_name, filename, ntohl(clo->head.num_blocks), 5.1200 -+ i = ntohl(clo->head.block_size); 5.1201 -+ i += i/1000 + 12 + 4; /* max gzip block size */ 5.1202 -+ if (clo->largest_block > i) clo->largest_block = i; /* broken index ? */ 5.1203 -+ printk(KERN_INFO "%s: %s: %s, %u blocks, %u bytes/block, largest block is %lu bytes.\n", 5.1204 -+ cloop_name, filename, version, ntohl(clo->head.num_blocks), 5.1205 - ntohl(clo->head.block_size), clo->largest_block); 5.1206 +- ntohl(clo->head.block_size), clo->largest_block); 5.1207 } 5.1208 - /* Combo kmalloc used too large chunks (>130000). */ 5.1209 -@@ -565,16 +618,6 @@ 5.1210 - error=-ENOMEM; goto error_release_free_all; 5.1211 +-/* Combo kmalloc used too large chunks (>130000). */ 5.1212 + { 5.1213 + int i; 5.1214 +- for(i=0;i<BUFFERED_BLOCKS;i++) 5.1215 +- { 5.1216 +- clo->buffer[i] = cloop_malloc(ntohl(clo->head.block_size)); 5.1217 +- if(!clo->buffer[i]) 5.1218 +- { 5.1219 +- printk(KERN_ERR "%s: out of memory for buffer %lu\n", 5.1220 +- cloop_name, (unsigned long) ntohl(clo->head.block_size)); 5.1221 +- error=-ENOMEM; goto error_release_free; 5.1222 +- } 5.1223 +- } 5.1224 ++ char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags); 5.1225 ++ clo->largest_block = 0; 5.1226 ++ for (i = 0; i < clo->head.num_blocks; i++) 5.1227 ++ if (clo->block_ptrs[i].size > clo->largest_block) 5.1228 ++ clo->largest_block = clo->block_ptrs[i].size; 5.1229 ++ printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n", 5.1230 ++ cloop_name, clo->underlying_filename, version, clo->head.num_blocks, 5.1231 ++ clo->head.block_size, clo->largest_block); 5.1232 ++ } 5.1233 ++ { 5.1234 ++ int i; 5.1235 ++ clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ? 5.1236 ++ (buffers / clo->head.block_size) : 1; 5.1237 ++ clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t)); 5.1238 ++ clo->buffer = cloop_malloc(clo->num_buffered_blocks * sizeof (char*)); 5.1239 ++ if (!clo->buffered_blocknum || !clo->buffer) 5.1240 ++ { 5.1241 ++ printk(KERN_ERR "%s: out of memory for index of cache buffer (%lu bytes)\n", 5.1242 ++ cloop_name, (unsigned long)clo->num_buffered_blocks * sizeof (u_int32_t) + sizeof(char*) ); 5.1243 ++ error=-ENOMEM; goto error_release; 5.1244 ++ } 5.1245 ++ memset(clo->buffer, 0, clo->num_buffered_blocks * sizeof (char*)); 5.1246 ++ for(i=0;i<clo->num_buffered_blocks;i++) 5.1247 ++ { 5.1248 ++ clo->buffered_blocknum[i] = -1; 5.1249 ++ clo->buffer[i] = cloop_malloc(clo->head.block_size); 5.1250 ++ if(!clo->buffer[i]) 5.1251 ++ { 5.1252 ++ printk(KERN_ERR "%s: out of memory for cache buffer %lu\n", 5.1253 ++ cloop_name, (unsigned long) clo->head.block_size); 5.1254 ++ error=-ENOMEM; goto error_release_free; 5.1255 ++ } 5.1256 ++ } 5.1257 ++ clo->current_bufnum = 0; 5.1258 + } 5.1259 + clo->compressed_buffer = cloop_malloc(clo->largest_block); 5.1260 + if(!clo->compressed_buffer) 5.1261 +@@ -557,31 +803,7 @@ 5.1262 + cloop_name, clo->largest_block); 5.1263 + error=-ENOMEM; goto error_release_free_buffer; 5.1264 } 5.1265 - zlib_inflateInit(&clo->zstream); 5.1266 +- clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize()); 5.1267 +- if(!clo->zstream.workspace) 5.1268 +- { 5.1269 +- printk(KERN_ERR "%s: out of mem for zlib working area %u\n", 5.1270 +- cloop_name, zlib_inflate_workspacesize()); 5.1271 +- error=-ENOMEM; goto error_release_free_all; 5.1272 +- } 5.1273 +- zlib_inflateInit(&clo->zstream); 5.1274 - if(!isblkdev && 5.1275 - be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]) != inode->i_size) 5.1276 - { 5.1277 @@ -274,15 +1112,264 @@ 5.1278 - cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL; 5.1279 - goto error_release_free_all; 5.1280 - } 5.1281 +- { 5.1282 +- int i; 5.1283 +- for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1; 5.1284 +- clo->current_bufnum=0; 5.1285 +- } 5.1286 +- set_capacity(clo->clo_disk, (sector_t)(ntohl(clo->head.num_blocks)* 5.1287 +- (ntohl(clo->head.block_size)>>9))); 5.1288 ++ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9))); 5.1289 + clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num); 5.1290 + if(IS_ERR(clo->clo_thread)) 5.1291 + { 5.1292 +@@ -591,17 +813,17 @@ 5.1293 + } 5.1294 + if(preload > 0) 5.1295 + { 5.1296 +- clo->preload_array_size = ((preload<=ntohl(clo->head.num_blocks))?preload:ntohl(clo->head.num_blocks)); 5.1297 ++ clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks); 5.1298 + clo->preload_size = 0; 5.1299 + if((clo->preload_cache = cloop_malloc(clo->preload_array_size * sizeof(char *))) != NULL) 5.1300 + { 5.1301 + int i; 5.1302 + for(i=0; i<clo->preload_array_size; i++) 5.1303 + { 5.1304 +- if((clo->preload_cache[i] = cloop_malloc(ntohl(clo->head.block_size))) == NULL) 5.1305 ++ if((clo->preload_cache[i] = cloop_malloc(clo->head.block_size)) == NULL) 5.1306 + { /* Out of memory */ 5.1307 + printk(KERN_WARNING "%s: cloop_malloc(%d) failed for preload_cache[%d] (ignored).\n", 5.1308 +- cloop_name, ntohl(clo->head.block_size), i); 5.1309 ++ cloop_name, clo->head.block_size, i); 5.1310 + break; 5.1311 + } 5.1312 + } 5.1313 +@@ -612,13 +834,13 @@ 5.1314 + if(buffered_blocknum >= 0) 5.1315 + { 5.1316 + memcpy(clo->preload_cache[i], clo->buffer[buffered_blocknum], 5.1317 +- ntohl(clo->head.block_size)); 5.1318 ++ clo->head.block_size); 5.1319 + } 5.1320 + else 5.1321 + { 5.1322 + printk(KERN_WARNING "%s: can't read block %d into preload cache, set to zero.\n", 5.1323 + cloop_name, i); 5.1324 +- memset(clo->preload_cache[i], 0, ntohl(clo->head.block_size)); 5.1325 ++ memset(clo->preload_cache[i], 0, clo->head.block_size); 5.1326 + } 5.1327 + } 5.1328 + printk(KERN_INFO "%s: preloaded %d blocks into cache.\n", cloop_name, 5.1329 +@@ -641,22 +863,19 @@ 5.1330 + cloop_free(clo->compressed_buffer, clo->largest_block); 5.1331 + clo->compressed_buffer=NULL; 5.1332 + error_release_free_buffer: 5.1333 ++ if(clo->buffer) 5.1334 { 5.1335 int i; 5.1336 - for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1; 5.1337 -@@ -653,7 +696,7 @@ 5.1338 - } 5.1339 +- for(i=0; i<BUFFERED_BLOCKS; i++) 5.1340 +- { 5.1341 +- if(clo->buffer[i]) 5.1342 +- { 5.1343 +- cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); 5.1344 +- clo->buffer[i]=NULL; 5.1345 +- } 5.1346 +- } 5.1347 ++ for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) { cloop_free(clo->buffer[i], clo->head.block_size); clo->buffer[i]=NULL; }} 5.1348 ++ cloop_free(clo->buffer, clo->num_buffered_blocks*sizeof(char*)); clo->buffer=NULL; 5.1349 } 5.1350 ++ if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; } 5.1351 error_release_free: 5.1352 - cloop_free(clo->offsets, sizeof(loff_t) * total_offsets); 5.1353 -+ cloop_free(clo->offsets, sizeof(struct block_info) * total_offsets); 5.1354 - clo->offsets=NULL; 5.1355 +- clo->offsets=NULL; 5.1356 ++ cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets); 5.1357 ++ clo->block_ptrs=NULL; 5.1358 error_release: 5.1359 if(bbuf) cloop_free(bbuf, clo->underlying_blksize); 5.1360 ++ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 5.1361 + clo->backing_file=NULL; 5.1362 + return error; 5.1363 + } 5.1364 +@@ -673,7 +892,7 @@ 5.1365 + if(clo->backing_file) return -EBUSY; 5.1366 + file = fget(arg); /* get filp struct from ioctl arg fd */ 5.1367 + if(!file) return -EBADF; 5.1368 +- error=cloop_set_file(cloop_num,file,"losetup_file"); 5.1369 ++ error=cloop_set_file(cloop_num,file); 5.1370 + set_device_ro(bdev, 1); 5.1371 + if(error) fput(file); 5.1372 + return error; 5.1373 +@@ -684,29 +903,48 @@ 5.1374 + { 5.1375 + struct cloop_device *clo = cloop_dev[cloop_num]; 5.1376 + struct file *filp = clo->backing_file; 5.1377 +- int i; 5.1378 + if(clo->refcnt > 1) /* we needed one fd for the ioctl */ 5.1379 + return -EBUSY; 5.1380 + if(filp==NULL) return -EINVAL; 5.1381 + if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; } 5.1382 +- if(filp!=initial_file) fput(filp); 5.1383 +- else { filp_close(initial_file,0); initial_file=NULL; } 5.1384 ++ if(filp!=initial_file) 5.1385 ++ fput(filp); 5.1386 ++ else 5.1387 ++ { 5.1388 ++ filp_close(initial_file,0); 5.1389 ++ initial_file=NULL; 5.1390 ++ } 5.1391 + clo->backing_file = NULL; 5.1392 + clo->backing_inode = NULL; 5.1393 +- if(clo->offsets) { cloop_free(clo->offsets, clo->underlying_blksize); clo->offsets = NULL; } 5.1394 ++ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 5.1395 ++ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; } 5.1396 + if(clo->preload_cache) 5.1397 +- { 5.1398 +- for(i=0; i < clo->preload_size; i++) 5.1399 +- cloop_free(clo->preload_cache[i], ntohl(clo->head.block_size)); 5.1400 +- cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *)); 5.1401 +- clo->preload_cache = NULL; 5.1402 +- clo->preload_size = clo->preload_array_size = 0; 5.1403 +- } 5.1404 +- for(i=0; i<BUFFERED_BLOCKS; i++) 5.1405 +- if(clo->buffer[i]) { cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); clo->buffer[i]=NULL; } 5.1406 ++ { 5.1407 ++ int i; 5.1408 ++ for(i=0; i < clo->preload_size; i++) 5.1409 ++ cloop_free(clo->preload_cache[i], clo->head.block_size); 5.1410 ++ cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *)); 5.1411 ++ clo->preload_cache = NULL; 5.1412 ++ clo->preload_size = clo->preload_array_size = 0; 5.1413 ++ } 5.1414 ++ if (clo->buffered_blocknum) 5.1415 ++ { 5.1416 ++ cloop_free(clo->buffered_blocknum, sizeof(int) * clo->num_buffered_blocks); clo->buffered_blocknum = NULL; 5.1417 ++ } 5.1418 ++ if (clo->buffer) 5.1419 ++ { 5.1420 ++ int i; 5.1421 ++ for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) cloop_free(clo->buffer[i], clo->head.block_size); } 5.1422 ++ cloop_free(clo->buffer, sizeof(char*) * clo->num_buffered_blocks); clo->buffer = NULL; 5.1423 ++ } 5.1424 + if(clo->compressed_buffer) { cloop_free(clo->compressed_buffer, clo->largest_block); clo->compressed_buffer = NULL; } 5.1425 ++#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE)) 5.1426 + zlib_inflateEnd(&clo->zstream); 5.1427 + if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; } 5.1428 ++#endif 5.1429 ++#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)) 5.1430 ++ xz_dec_end(clo->xzdecoderstate); 5.1431 ++#endif 5.1432 + if(bdev) invalidate_bdev(bdev); 5.1433 + if(clo->clo_disk) set_capacity(clo->clo_disk, 0); 5.1434 + return 0; 5.1435 +@@ -731,8 +969,8 @@ 5.1436 + const struct loop_info64 *info) 5.1437 + { 5.1438 + if (!clo->backing_file) return -ENXIO; 5.1439 +- memcpy(clo->clo_file_name, info->lo_file_name, LO_NAME_SIZE); 5.1440 +- clo->clo_file_name[LO_NAME_SIZE-1] = 0; 5.1441 ++ if(clo->underlying_filename) kfree(clo->underlying_filename); 5.1442 ++ clo->underlying_filename = kstrdup(info->lo_file_name, GFP_KERNEL); 5.1443 + return 0; 5.1444 + } 5.1445 + 5.1446 +@@ -743,7 +981,11 @@ 5.1447 + struct kstat stat; 5.1448 + int err; 5.1449 + if (!file) return -ENXIO; 5.1450 +- err = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat); 5.1451 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 5.1452 ++ err = vfs_getattr(&file->f_path, &stat); 5.1453 ++#else 5.1454 ++ err = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); 5.1455 ++#endif 5.1456 + if (err) return err; 5.1457 + memset(info, 0, sizeof(*info)); 5.1458 + info->lo_number = clo->clo_number; 5.1459 +@@ -753,7 +995,8 @@ 5.1460 + info->lo_offset = 0; 5.1461 + info->lo_sizelimit = 0; 5.1462 + info->lo_flags = 0; 5.1463 +- memcpy(info->lo_file_name, clo->clo_file_name, LO_NAME_SIZE); 5.1464 ++ strncpy(info->lo_file_name, clo->underlying_filename, LO_NAME_SIZE); 5.1465 ++ info->lo_file_name[LO_NAME_SIZE-1]=0; 5.1466 + return 0; 5.1467 + } 5.1468 + 5.1469 +@@ -833,8 +1076,6 @@ 5.1470 + if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; 5.1471 + return err; 5.1472 + } 5.1473 +-/* EOF get/set_status */ 5.1474 +- 5.1475 + 5.1476 + static int cloop_ioctl(struct block_device *bdev, fmode_t mode, 5.1477 + unsigned int cmd, unsigned long arg) 5.1478 +@@ -914,21 +1155,20 @@ 5.1479 + /* losetup uses write-open and flags=0x8002 to set a new file */ 5.1480 + if(mode & FMODE_WRITE) 5.1481 + { 5.1482 +- printk(KERN_WARNING "%s: Can't open device read-write in mode 0x%x\n", cloop_name, mode); 5.1483 ++ printk(KERN_INFO "%s: Open in read-write mode 0x%x requested, ignored.\n", cloop_name, mode); 5.1484 + return -EROFS; 5.1485 + } 5.1486 + cloop_dev[cloop_num]->refcnt+=1; 5.1487 + return 0; 5.1488 + } 5.1489 + 5.1490 +-static int cloop_close(struct gendisk *disk, fmode_t mode) 5.1491 ++static void cloop_close(struct gendisk *disk, fmode_t mode) 5.1492 + { 5.1493 +- int cloop_num, err=0; 5.1494 +- if(!disk) return 0; 5.1495 ++ int cloop_num; 5.1496 ++ if(!disk) return; 5.1497 + cloop_num=((struct cloop_device *)disk->private_data)->clo_number; 5.1498 +- if(cloop_num < 0 || cloop_num > (cloop_count-1)) return 0; 5.1499 ++ if(cloop_num < 0 || cloop_num > (cloop_count-1)) return; 5.1500 + cloop_dev[cloop_num]->refcnt-=1; 5.1501 +- return err; 5.1502 + } 5.1503 + 5.1504 + static struct block_device_operations clo_fops = 5.1505 +@@ -973,6 +1213,10 @@ 5.1506 + goto error_out; 5.1507 + } 5.1508 + clo->clo_queue->queuedata = clo; 5.1509 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 5.1510 ++ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue); 5.1511 ++ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue); 5.1512 ++#endif 5.1513 + clo->clo_disk = alloc_disk(1); 5.1514 + if(!clo->clo_disk) 5.1515 + { 5.1516 +@@ -1004,6 +1248,11 @@ 5.1517 + cloop_dev[cloop_num] = NULL; 5.1518 + } 5.1519 + 5.1520 ++/* LZ4 Stuff */ 5.1521 ++#if (defined USE_LZ4_INTERNAL) 5.1522 ++#include "lz4_kmod.c" 5.1523 ++#endif 5.1524 ++ 5.1525 + static int __init cloop_init(void) 5.1526 + { 5.1527 + int error=0; 5.1528 +@@ -1044,7 +1293,7 @@ 5.1529 + initial_file=NULL; /* if IS_ERR, it's NOT open. */ 5.1530 + } 5.1531 + else 5.1532 +- error=cloop_set_file(0,initial_file,file); 5.1533 ++ error=cloop_set_file(0,initial_file); 5.1534 + if(error) 5.1535 + { 5.1536 + printk(KERN_ERR 5.1537 +@@ -1052,9 +1301,6 @@ 5.1538 + cloop_name, file, error); 5.1539 + goto init_out_dealloc; 5.1540 + } 5.1541 +- if(namelen >= LO_NAME_SIZE) namelen = LO_NAME_SIZE-1; 5.1542 +- memcpy(cloop_dev[0]->clo_file_name, file, namelen); 5.1543 +- cloop_dev[0]->clo_file_name[namelen] = 0; 5.1544 + } 5.1545 + return 0; 5.1546 + init_out_dealloc: