wok rev 24983

Up xcursor-comix (0.9.2)
author Pascal Bellard <pascal.bellard@slitaz.org>
date Sun May 08 16:45:21 2022 +0000 (23 months ago)
parents 0f92b8cc8086
children 2b0142c9f248
files linux-cloop/receipt linux-cloop/stuff/cloop.u linux64-cloop/receipt linux64-cloop/stuff/cloop.u wbar/receipt xa/receipt xautomation/receipt xcursor-aero/receipt xcursor-comix/receipt
line diff
     1.1 --- a/linux-cloop/receipt	Sun May 08 13:06:36 2022 +0000
     1.2 +++ b/linux-cloop/receipt	Sun May 08 16:45:21 2022 +0000
     1.3 @@ -2,16 +2,15 @@
     1.4  
     1.5  PACKAGE="linux-cloop"
     1.6  SOURCE="cloop"
     1.7 -_VERSION="2.639-2"
     1.8 -#VERSION="$(sed '/+#define CLOOP_VERSION/!d;s|.* "\(.*\)"|\1|' stuff/cloop.u)"
     1.9 +_VERSION="3.14.1.3"
    1.10  VERSION="4.12"
    1.11  CATEGORY="base-system"
    1.12  MAINTAINER="pascal.bellard@slitaz.org"
    1.13  LICENSE="GPL2"
    1.14  SHORT_DESC="The read-only compressed loop device kernel module."
    1.15  WEB_SITE="http://knoppix.net/wiki/Cloop"
    1.16 -TARBALL="${SOURCE}_${_VERSION}.tar.gz"
    1.17 -WGET_URL="http://debian-knoppix.alioth.debian.org/packages/$SOURCE/$TARBALL"
    1.18 +TARBALL="${SOURCE}_${_VERSION}.tar.xz"
    1.19 +WGET_URL="http://deb.debian.org/debian/pool/main/c/$SOURCE/$TARBALL"
    1.20  
    1.21  DEPENDS="linux"
    1.22  BUILD_DEPENDS="linux-module-headers xz"
    1.23 @@ -23,6 +22,7 @@
    1.24  {
    1.25  	patch -p0 < $stuff/cloop.u
    1.26  	make ARCH=i386 KERNEL_DIR="/usr/src/linux" cloop.ko && xz cloop.ko
    1.27 +	make cloop_suspend
    1.28  }
    1.29  	
    1.30  # Rules to gen a SliTaz package suitable for Tazpkg.
     2.1 --- a/linux-cloop/stuff/cloop.u	Sun May 08 13:06:36 2022 +0000
     2.2 +++ b/linux-cloop/stuff/cloop.u	Sun May 08 16:45:21 2022 +0000
     2.3 @@ -1,6 +1,6 @@
     2.4  --- cloop.h
     2.5  +++ cloop.h
     2.6 -@@ -1,15 +1,50 @@
     2.7 +@@ -1,3 +1,7 @@
     2.8  +#define CLOOP_SIGNATURE "#!/bin/sh"                      /* @ offset 0  */
     2.9  +#define CLOOP_SIGNATURE_SIZE 9
    2.10  +#define CLOOP_SIGNATURE_OFFSET 0x0
    2.11 @@ -8,99 +8,46 @@
    2.12   #ifndef _COMPRESSED_LOOP_H
    2.13   #define _COMPRESSED_LOOP_H
    2.14   
    2.15 --#define CLOOP_HEADROOM 128
    2.16 -+/*************************************************************************\
    2.17 -+* Starting with Format V4.0 (cloop version 4.x), cloop can now have two   *
    2.18 -+* alternative structures:                                                 *
    2.19 -+*                                                                         *
    2.20 -+* 1. Header first: "robust" format, handles missing blocks well           *
    2.21 -+* 2. Footer (header last): "streaming" format, easier to create           *
    2.22 -+*                                                                         *
    2.23 -+* The cloop kernel module autodetects both formats, and can (currently)   *
    2.24 -+* still handle the V2.0 format as well.                                   *
    2.25 -+*                                                                         *
    2.26 -+* 1. Header first:                                                        *
    2.27 -+*   +---------------------------- FIXED SIZE ---------------------------+ *
    2.28 -+*   |Signature (128 bytes)                                              | *
    2.29 -+*   |block_size (32bit number, network order)                           | *
    2.30 -+*   |num_blocks (32bit number, network order)                           | *
    2.31 -+*   +--------------------------- VARIABLE SIZE -------------------------+ *
    2.32 -+*   |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
    2.33 -+*   |compressed data blocks of variable size ...                        | *
    2.34 -+*   +-------------------------------------------------------------------+ *
    2.35 -+*                                                                         *
    2.36 -+* 2. Footer (header last):                                                *
    2.37 -+*   +--------------------------- VARIABLE SIZE -------------------------+ *
    2.38 -+*   |compressed data blocks of variable size ...                        | *
    2.39 -+*   |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
    2.40 -+*   +---------------------------- FIXED SIZE ---------------------------+ *
    2.41 -+*   |Signature (128 bytes)                                              | *
    2.42 -+*   |block_size (32bit number, network order)                           | *
    2.43 -+*   |num_blocks (32bit number, network order)                           | *
    2.44 -+*   +-------------------------------------------------------------------+ *
    2.45 -+*                                                                         *
    2.46 -+* Offsets are always relative to beginning of file, in all formats.       *
    2.47 -+* The block index contains num_blocks+1 offsets, followed (1) or          *
    2.48 -+* preceded (2) by the compressed blocks.                                  *
    2.49 -+\*************************************************************************/
    2.50 +@@ -38,10 +42,6 @@
    2.51   
    2.52 --/* The cloop header usually looks like this:          */
    2.53 --/* #!/bin/sh                                          */
    2.54 --/* #V2.00 Format                                      */
    2.55 --/* ...padding up to CLOOP_HEADROOM...                 */
    2.56 --/* block_size (32bit number, network order)           */
    2.57 --/* num_blocks (32bit number, network order)           */
    2.58 -+#include <linux/types.h>   /* u_int32_t */
    2.59 -+
    2.60 -+#define CLOOP_HEADROOM 128
    2.61 + #include <linux/types.h>   /* u_int32_t */
    2.62   
    2.63 -+/* Header of fixed length, can be located at beginning or end of file   */
    2.64 - struct cloop_head
    2.65 - {
    2.66 - 	char preamble[CLOOP_HEADROOM];
    2.67 -@@ -17,9 +52,163 @@
    2.68 +-#ifndef __KERNEL__
    2.69 +-#include <stdint.h> /* regular uint64_t */
    2.70 +-#endif
    2.71 +-
    2.72 + #define CLOOP_HEADROOM 128
    2.73 + 
    2.74 + /* Header of fixed length, can be located at beginning or end of file   */
    2.75 +@@ -52,13 +52,6 @@
    2.76   	u_int32_t num_blocks;
    2.77   };
    2.78   
    2.79 -+/************************************************************************\
    2.80 -+*  CLOOP4 flags for each compressed block                                *
    2.81 -+*  Value   Meaning                                                       *
    2.82 -+*    0     GZIP/7ZIP compression (compatible with V2.0 Format)           *
    2.83 -+*    1     no compression (incompressible data)                          *
    2.84 -+*    2     xz compression (currently best space saver)                   *
    2.85 -+*    3     lz4 compression                                               *
    2.86 -+*    4     lzo compression (fastest)                                     *
    2.87 -+\************************************************************************/
    2.88 -+
    2.89 -+typedef uint64_t cloop_block_ptr;
    2.90 -+
    2.91 -+/* Get value of first 4 bits */
    2.92 -+#define CLOOP_BLOCK_FLAGS(x)  ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60))
    2.93 -+/* Get value of last 60 bits */
    2.94 -+#define CLOOP_BLOCK_OFFSET(x)  ((x) & 0x0fffffffffffffffLLU)
    2.95 -+
    2.96 -+#define CLOOP_COMPRESSOR_ZLIB  0x0
    2.97 -+#define CLOOP_COMPRESSOR_NONE  0x1
    2.98 -+#define CLOOP_COMPRESSOR_XZ    0x2
    2.99 -+#define CLOOP_COMPRESSOR_LZ4   0x3
   2.100 -+#define CLOOP_COMPRESSOR_LZO1X 0x4
   2.101 -+
   2.102 -+#define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
   2.103 -+
   2.104 +-#define CLOOP2_SIGNATURE "V2.0"                       /* @ offset 0x0b  */
   2.105 +-#define CLOOP2_SIGNATURE_SIZE 4
   2.106 +-#define CLOOP2_SIGNATURE_OFFSET 0x0b
   2.107 +-#define CLOOP4_SIGNATURE "V4.0"                       /* @ offset 0x0b  */
   2.108 +-#define CLOOP4_SIGNATURE_SIZE 4
   2.109 +-#define CLOOP4_SIGNATURE_OFFSET 0x0b
   2.110 +-
   2.111 + /************************************************************************\
   2.112 + *  CLOOP4 flags for each compressed block                                *
   2.113 + *  Value   Meaning                                                       *
   2.114 +@@ -84,6 +77,134 @@
   2.115 + 
   2.116 + #define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
   2.117 + 
   2.118  +#define CLOOP_COMPRESSOR_LINK  0xF
   2.119  +
   2.120  +
   2.121 - /* data_index (num_blocks 64bit pointers, network order)...      */
   2.122 - /* compressed data (gzip block compressed format)...             */
   2.123 - 
   2.124 ++/* data_index (num_blocks 64bit pointers, network order)...      */
   2.125 ++/* compressed data (gzip block compressed format)...             */
   2.126 ++
   2.127  +struct cloop_tail
   2.128  +{
   2.129  +	u_int32_t table_size; 
   2.130 -+	u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
   2.131 ++	u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
   2.132  +#define CLOOP3_INDEX_SIZE(x)    ((unsigned int)((x) & 0xF))
   2.133 -+#define CLOOP3_BLOCKS_FLAGS(x)  ((unsigned int)((x) & 0x70) >> 4)
   2.134 -+#define CLOOP3_TRUNCATED(x)     ((unsigned int)((x) & 0x80) >> 7)
   2.135 -+#define CLOOP3_LASTLEN(x)       (unsigned int)((x) >> 8)
   2.136  +	u_int32_t num_blocks;
   2.137  +};
   2.138  +
   2.139 @@ -114,8 +61,10 @@
   2.140  +};
   2.141  +
   2.142  +static inline char *build_index(struct block_info *offsets, unsigned long n, 
   2.143 -+			unsigned long block_size, unsigned global_flags)
   2.144 ++			unsigned long block_size)
   2.145  +{
   2.146 ++	static char v[11];
   2.147 ++	u_int32_t flags = 0;
   2.148  +	u_int32_t *ofs32 = (u_int32_t *) offsets;
   2.149  +	loff_t    *ofs64 = (loff_t *) offsets;
   2.150  +
   2.151 @@ -140,8 +89,6 @@
   2.152  +		}
   2.153  +		else { /* V2.0/V4.0 */
   2.154  +			loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
   2.155 -+			u_int32_t flags;
   2.156 -+			static char v4[11];
   2.157  +			unsigned long i = n;
   2.158  +
   2.159  +			for (flags = 0; n-- ;) {
   2.160 @@ -159,12 +106,7 @@
   2.161  +					offsets[i] = offsets[offsets[i].offset];
   2.162  +				}
   2.163  +			}
   2.164 -+			strcpy(v4, (char *) "64BE v4.0a");
   2.165 -+			v4[10] = 'a' + ((flags-1) & 0xF);	// compressors used
   2.166 -+			if (flags > 0x10) {			// with links ?
   2.167 -+				v4[10] += 'A' - 'a';
   2.168 -+			}
   2.169 -+			return v4;
   2.170 ++			strcpy(v, (char *) "64BE v4.0a");
   2.171  +		}
   2.172  +	}
   2.173  +	else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
   2.174 @@ -180,7 +122,6 @@
   2.175  +	else { /* V3.0 or V0.68 */
   2.176  +		unsigned long i;
   2.177  +		loff_t j;
   2.178 -+		static char v3[11];
   2.179  +		
   2.180  +		for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
   2.181  +		if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
   2.182 @@ -195,28 +136,33 @@
   2.183  +		}
   2.184  +		
   2.185  +		v3_64 = (ofs32[1] == 0);
   2.186 -+		for (i = n; i-- != 0; )
   2.187 ++		for (i = n; i-- != 0; ) {
   2.188  +			offsets[i].size = ntohl(ofs32[i << v3_64]); 
   2.189 ++			if (offsets[i].size == 0xFFFFFFFF) {
   2.190 ++				offsets[i].size = 0x10000000 | block_size;
   2.191 ++			}
   2.192 ++			offsets[i].flags = (offsets[i].size >> 28);
   2.193 ++			offsets[i].size &= 0x0FFFFFFF; 
   2.194 ++		}
   2.195  +		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
   2.196  +			offsets[i].offset = j;
   2.197 -+			offsets[i].flags = global_flags;
   2.198 -+			if (offsets[i].size == 0xFFFFFFFF) {
   2.199 -+				offsets[i].flags = CLOOP_COMPRESSOR_NONE;
   2.200 -+				offsets[i].size = block_size;
   2.201 -+			}
   2.202 -+			if ((offsets[i].size & 0x80000000) == 0) {
   2.203 ++			if (offsets[i].flags < 8) {
   2.204  +				j += offsets[i].size;
   2.205  +			}
   2.206  +		}
   2.207  +		for (i = 0; i < n; i++) {
   2.208 -+			if (offsets[i].size & 0x80000000) {
   2.209 -+				offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
   2.210 ++			flags |= 1 << offsets[i].flags;
   2.211 ++			if (offsets[i].flags >= 8) {
   2.212 ++				offsets[i] = offsets[offsets[i].size];
   2.213  +			}
   2.214  +		}
   2.215 -+		strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
   2.216 -+		v3[10] += global_flags;
   2.217 -+		return v3;
   2.218 ++		strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
   2.219  +	}
   2.220 ++	v[10] = 'a' + ((flags-1) & 0xF);	// compressors used
   2.221 ++	if (flags > 0x10) {			// with links ?
   2.222 ++		v[10] += 'A' - 'a';
   2.223 ++	}
   2.224 ++	return v;
   2.225  +}
   2.226  +
   2.227   /* Cloop suspend IOCTL */
   2.228 @@ -224,661 +170,538 @@
   2.229   
   2.230  --- cloop.c
   2.231  +++ cloop.c
   2.232 -@@ -1,26 +1,23 @@
   2.233 --/*
   2.234 -- *  compressed_loop.c: Read-only compressed loop blockdevice
   2.235 -- *  hacked up by Rusty in 1999, extended and maintained by Klaus Knopper
   2.236 -- *
   2.237 -- *  A cloop file looks like this:
   2.238 -- *  [32-bit uncompressed block size: network order]
   2.239 -- *  [32-bit number of blocks (n_blocks): network order]
   2.240 -- *  [64-bit file offsets of start of blocks: network order]
   2.241 -- *    ...
   2.242 -- *    (n_blocks + 1).
   2.243 -- * n_blocks consisting of:
   2.244 -- *   [compressed block]
   2.245 -- *
   2.246 -- * Every version greatly inspired by code seen in loop.c
   2.247 -- * by Theodore Ts'o, 3/29/93.
   2.248 -- *
   2.249 -- * Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper.
   2.250 -- * Redistribution of this file is permitted under the GNU Public License.
   2.251 -- *
   2.252 -- */
   2.253 -+/************************************************************************\
   2.254 -+* cloop.c: Read-only compressed loop blockdevice                         *
   2.255 -+* hacked up by Rusty in 1999, extended and maintained by Klaus Knopper   *
   2.256 -+*                                                                        *
   2.257 -+* For all supported cloop file formats, please check the file "cloop.h"  *
   2.258 -+* New in Version 4:                                                      *
   2.259 -+* - Header can be first or last in cloop file,                           *
   2.260 -+* - Different compression algorithms supported (compression type         *
   2.261 -+*   encoded in first 4 bytes of block offset address)                    *
   2.262 -+*                                                                        *
   2.263 -+* Every version greatly inspired by code seen in loop.c                  *
   2.264 -+* by Theodore Ts'o, 3/29/93.                                             *
   2.265 -+*                                                                        *
   2.266 -+* Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper.           *
   2.267 -+* Redistribution of this file is permitted under the GNU Public License  *
   2.268 -+* V2.                                                                    *
   2.269 -+\************************************************************************/
   2.270 +@@ -17,7 +17,7 @@
   2.271 + \************************************************************************/
   2.272   
   2.273   #define CLOOP_NAME "cloop"
   2.274 --#define CLOOP_VERSION "2.639"
   2.275 +-#define CLOOP_VERSION "5.3"
   2.276  +#define CLOOP_VERSION "4.12"
   2.277   #define CLOOP_MAX 8
   2.278   
   2.279   #ifndef KBUILD_MODNAME
   2.280 -@@ -47,8 +44,27 @@
   2.281 - #include <asm/div64.h> /* do_div() for 64bit division */
   2.282 - #include <asm/uaccess.h>
   2.283 - #include <asm/byteorder.h>
   2.284 --/* Use zlib_inflate from lib/zlib_inflate */
   2.285 -+/* Check for ZLIB, LZO1X, LZ4 decompression algorithms in kernel. */
   2.286 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   2.287 - #include <linux/zutil.h>
   2.288 -+#endif
   2.289 -+#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   2.290 -+#include <linux/lzo.h>
   2.291 -+#endif
   2.292 -+#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   2.293 -+#include <linux/lz4.h>
   2.294 -+#endif
   2.295 -+#if (defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE))
   2.296 -+#include <linux/decompress/unlzma.h>
   2.297 -+#endif
   2.298 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   2.299 -+#include <linux/xz.h>
   2.300 -+#endif
   2.301 -+
   2.302 -+#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) || defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE) || defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE) || defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE) || defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)))
   2.303 -+#error "No decompression library selected in kernel config!"
   2.304 -+#endif
   2.305 -+
   2.306 +@@ -68,7 +68,6 @@
   2.307   #include <linux/loop.h>
   2.308   #include <linux/kthread.h>
   2.309   #include <linux/compat.h>
   2.310 -@@ -92,47 +108,64 @@
   2.311 - #define DEBUGP(format, x...)
   2.312 - #endif
   2.313 +-#include <linux/blk-mq.h> /* new multiqueue infrastructure */
   2.314 + #include "cloop.h"
   2.315   
   2.316 -+/* Default size of buffer to keep some decompressed blocks in memory to speed up access */
   2.317 -+#define BLOCK_BUFFER_MEM (16*65536)
   2.318 -+
   2.319 - /* One file can be opened at module insertion time */
   2.320 - /* insmod cloop file=/path/to/file */
   2.321 - static char *file=NULL;
   2.322 - static unsigned int preload=0;
   2.323 - static unsigned int cloop_max=CLOOP_MAX;
   2.324 -+static unsigned int buffers=BLOCK_BUFFER_MEM;
   2.325 - module_param(file, charp, 0);
   2.326 - module_param(preload, uint, 0);
   2.327 - module_param(cloop_max, uint, 0);
   2.328 - MODULE_PARM_DESC(file, "Initial cloop image file (full path) for /dev/cloop");
   2.329 - MODULE_PARM_DESC(preload, "Preload n blocks of cloop data into memory");
   2.330 - MODULE_PARM_DESC(cloop_max, "Maximum number of cloop devices (default 8)");
   2.331 -+MODULE_PARM_DESC(buffers, "Size of buffer to keep uncompressed blocks in memory in MiB (default 1)");
   2.332 + /* New License scheme */
   2.333 +@@ -93,10 +92,7 @@
   2.334 + /* Use experimental major for now */
   2.335 + #define MAJOR_NR 240
   2.336   
   2.337 - static struct file *initial_file=NULL;
   2.338 - static int cloop_major=MAJOR_NR;
   2.339 +-#ifndef DEVICE_NAME
   2.340 +-#define DEVICE_NAME CLOOP_NAME
   2.341 +-#endif
   2.342 +-
   2.343 ++/* #define DEVICE_NAME CLOOP_NAME */
   2.344 + /* #define DEVICE_NR(device) (MINOR(device)) */
   2.345 + /* #define DEVICE_ON(device) */
   2.346 + /* #define DEVICE_OFF(device) */
   2.347 +@@ -143,7 +139,7 @@
   2.348 +  u_int32_t allflags;
   2.349   
   2.350 --/* Number of buffered decompressed blocks */
   2.351 --#define BUFFERED_BLOCKS 8
   2.352 - struct cloop_device
   2.353 - {
   2.354 -- /* Copied straight from the file */
   2.355 -+ /* Header filled from the file */
   2.356 -  struct cloop_head head;
   2.357 -+ int header_first;
   2.358 -+ int file_format;
   2.359 - 
   2.360 -- /* An array of offsets of compressed blocks within the file */
   2.361 -- loff_t *offsets;
   2.362 -+ /* An or'd sum of all flags of each compressed block (v3) */
   2.363 -+ u_int32_t allflags;
   2.364 -+
   2.365 -+ /* An array of cloop_ptr flags/offset for compressed blocks within the file */
   2.366 +  /* An array of cloop_ptr flags/offset for compressed blocks within the file */
   2.367 +- cloop_block_ptr *block_ptrs;
   2.368  + struct block_info *block_ptrs;
   2.369   
   2.370    /* We buffer some uncompressed blocks for performance */
   2.371 -- int buffered_blocknum[BUFFERED_BLOCKS];
   2.372 -- int current_bufnum;
   2.373 -- void *buffer[BUFFERED_BLOCKS];
   2.374 -- void *compressed_buffer;
   2.375 -- size_t preload_array_size; /* Size of pointer array in blocks */
   2.376 -- size_t preload_size;       /* Number of successfully allocated blocks */
   2.377 -- char **preload_cache;      /* Pointers to preloaded blocks */
   2.378 -+ size_t num_buffered_blocks;	/* how many uncompressed blocks buffered for performance */
   2.379 -+ int *buffered_blocknum;        /* list of numbers of uncompressed blocks in buffer */
   2.380 -+ int current_bufnum;            /* which block is current */
   2.381 -+ unsigned char **buffer;        /* cache space for num_buffered_blocks uncompressed blocks */
   2.382 -+ void *compressed_buffer;       /* space for the largest compressed block */
   2.383 -+ size_t preload_array_size;     /* Size of pointer array in blocks */
   2.384 -+ size_t preload_size;           /* Number of successfully allocated blocks */
   2.385 -+ char **preload_cache;          /* Pointers to preloaded blocks */
   2.386 - 
   2.387 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   2.388 -  z_stream zstream;
   2.389 -+#endif
   2.390 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   2.391 -+ struct xz_dec *xzdecoderstate;
   2.392 -+ struct xz_buf xz_buffer;
   2.393 -+#endif
   2.394 - 
   2.395 -  struct file   *backing_file;  /* associated file */
   2.396 -  struct inode  *backing_inode; /* for bmap */
   2.397 - 
   2.398 -+ unsigned char *underlying_filename;
   2.399 -  unsigned long largest_block;
   2.400 -  unsigned int underlying_blksize;
   2.401 -+ loff_t underlying_total_size;
   2.402 -  int clo_number;
   2.403 -  int refcnt;
   2.404 -  struct block_device *bdev;
   2.405 -@@ -147,7 +180,6 @@
   2.406 +  size_t num_buffered_blocks;	/* how many uncompressed blocks buffered for performance */
   2.407 +@@ -178,14 +174,16 @@
   2.408 +  spinlock_t queue_lock;
   2.409 +  /* mutex for ioctl() */
   2.410 +  struct mutex clo_ctl_mutex;
   2.411 +- /* mutex for request */
   2.412 +- struct mutex clo_rq_mutex;
   2.413 ++ struct list_head clo_list;
   2.414 ++ struct task_struct *clo_thread;
   2.415 ++ wait_queue_head_t clo_event;
   2.416    struct request_queue *clo_queue;
   2.417    struct gendisk *clo_disk;
   2.418 +- struct blk_mq_tag_set tag_set;
   2.419    int suspended;
   2.420 -- char clo_file_name[LO_NAME_SIZE];
   2.421   };
   2.422   
   2.423 - /* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
   2.424 -@@ -156,52 +188,113 @@
   2.425 ++/* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
   2.426 ++   so we can specify how many devices we need via parameters. */
   2.427 + static struct cloop_device **cloop_dev;
   2.428   static const char *cloop_name=CLOOP_NAME;
   2.429   static int cloop_count = 0;
   2.430 - 
   2.431 --#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))) /* Must be compiled into kernel. */
   2.432 --#error  "Invalid Kernel configuration. CONFIG_ZLIB_INFLATE support is needed for cloop."
   2.433 --#endif
   2.434 --
   2.435 --/* Use __get_free_pages instead of vmalloc, allows up to 32 pages,
   2.436 -- * 2MB in one piece */
   2.437 - static void *cloop_malloc(size_t size)
   2.438 - {
   2.439 -- int order = get_order(size);
   2.440 -- if(order <= KMALLOC_MAX_ORDER)
   2.441 --   return (void *)kmalloc(size, GFP_KERNEL);
   2.442 -- else if(order < MAX_ORDER)
   2.443 --   return (void *)__get_free_pages(GFP_KERNEL, order);
   2.444 -+ /* kmalloc will fail after the system is running for a while, */
   2.445 -+ /* when large orders can't return contiguous memory. */
   2.446 -+ /* Let's just use vmalloc for now. :-/ */
   2.447 -+ /* int order = get_order(size); */
   2.448 -+ /* if(order <= KMALLOC_MAX_ORDER) */
   2.449 -+ /*  return (void *)kmalloc(size, GFP_KERNEL); */
   2.450 -+ /* else if(order < MAX_ORDER) */
   2.451 -+ /*  return (void *)__get_free_pages(GFP_KERNEL, order); */
   2.452 -  return (void *)vmalloc(size);
   2.453 +@@ -214,24 +212,21 @@
   2.454 +  vfree(mem);
   2.455   }
   2.456   
   2.457 - static void cloop_free(void *mem, size_t size)
   2.458 - {
   2.459 -- int order = get_order(size);
   2.460 -- if(order <= KMALLOC_MAX_ORDER)
   2.461 --   kfree(mem);
   2.462 -- else if(order < MAX_ORDER)
   2.463 --   free_pages((unsigned long)mem, order);
   2.464 -- else vfree(mem);
   2.465 -+ /* int order = get_order(size); */
   2.466 -+ /* if(order <= KMALLOC_MAX_ORDER) */
   2.467 -+ /*  kfree(mem); */
   2.468 -+ /* else if(order < MAX_ORDER) */
   2.469 -+ /*  free_pages((unsigned long)mem, order); */
   2.470 -+ /* else */
   2.471 -+ vfree(mem);
   2.472 - }
   2.473 - 
   2.474 --static int uncompress(struct cloop_device *clo,
   2.475 --                      unsigned char *dest, unsigned long *destLen,
   2.476 --                      unsigned char *source, unsigned long sourceLen)
   2.477 +-/* static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen) */
   2.478 +-static int uncompress(struct cloop_device *clo, u_int32_t block_num, u_int32_t compressed_length, unsigned long *uncompressed_length)
   2.479  +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags) 
   2.480   {
   2.481 -- /* Most of this code can be found in fs/cramfs/uncompress.c */
   2.482 -- int err;
   2.483 -- clo->zstream.next_in = source;
   2.484 -- clo->zstream.avail_in = sourceLen;
   2.485 -- clo->zstream.next_out = dest;
   2.486 -- clo->zstream.avail_out = *destLen;
   2.487 -- err = zlib_inflateReset(&clo->zstream);
   2.488 -- if (err != Z_OK)
   2.489 --  {
   2.490 --   printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
   2.491 --   zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   2.492 --  }
   2.493 -- err = zlib_inflate(&clo->zstream, Z_FINISH);
   2.494 -- *destLen = clo->zstream.total_out;
   2.495 -- if (err != Z_STREAM_END) return err;
   2.496 -- return Z_OK;
   2.497 -+ int err = -1;
   2.498 -+ switch(flags)
   2.499 -+ {
   2.500 -+  case CLOOP_COMPRESSOR_NONE:
   2.501 +  int err = -1;
   2.502 +- int flags = CLOOP_BLOCK_FLAGS(clo->block_ptrs[block_num]);
   2.503 +  switch(flags)
   2.504 +  {
   2.505 +   case CLOOP_COMPRESSOR_NONE:
   2.506 +-   /* block is umcompressed, swap pointers only! */
   2.507 +-   { char *tmp = clo->compressed_buffer; clo->compressed_buffer = clo->buffer[clo->current_bufnum]; clo->buffer[clo->current_bufnum] = tmp; }
   2.508 +-   DEBUGP("cloop: block %d is uncompressed (flags=%d), just swapping %u bytes\n", block_num, flags, compressed_length);
   2.509  +   memcpy(dest, source, *destLen = sourceLen);
   2.510  +   err = Z_OK;
   2.511 -+   break;
   2.512 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   2.513 -+  case CLOOP_COMPRESSOR_ZLIB:
   2.514 +    break;
   2.515 + #if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   2.516 +   case CLOOP_COMPRESSOR_ZLIB:
   2.517 +-   clo->zstream.next_in = clo->compressed_buffer;
   2.518 +-   clo->zstream.avail_in = compressed_length;
   2.519 +-   clo->zstream.next_out = clo->buffer[clo->current_bufnum];
   2.520 +-   clo->zstream.avail_out = clo->head.block_size;
   2.521  +   clo->zstream.next_in = source;
   2.522  +   clo->zstream.avail_in = sourceLen;
   2.523  +   clo->zstream.next_out = dest;
   2.524  +   clo->zstream.avail_out = *destLen;
   2.525 -+   err = zlib_inflateReset(&clo->zstream);
   2.526 -+   if (err != Z_OK)
   2.527 -+   {
   2.528 -+    printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
   2.529 -+    zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   2.530 -+   }
   2.531 -+   err = zlib_inflate(&clo->zstream, Z_FINISH);
   2.532 +    err = zlib_inflateReset(&clo->zstream);
   2.533 +    if (err != Z_OK)
   2.534 +    {
   2.535 +@@ -239,50 +234,50 @@
   2.536 +     zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   2.537 +    }
   2.538 +    err = zlib_inflate(&clo->zstream, Z_FINISH);
   2.539 +-   *uncompressed_length = clo->zstream.total_out;
   2.540  +   *destLen = clo->zstream.total_out;
   2.541 -+   if (err == Z_STREAM_END) err = 0;
   2.542 +    if (err == Z_STREAM_END) err = 0;
   2.543 +-   DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *uncompressed_length);
   2.544  +   DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
   2.545 -+   break;
   2.546 -+#endif
   2.547 -+#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   2.548 -+  case CLOOP_COMPRESSOR_LZO1X:
   2.549 -+   {
   2.550 -+    size_t tmp = (size_t) clo->head.block_size;
   2.551 +    break;
   2.552 + #endif
   2.553 + #if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   2.554 +   case CLOOP_COMPRESSOR_LZO1X:
   2.555 +    {
   2.556 +     size_t tmp = (size_t) clo->head.block_size;
   2.557 +-    err = lzo1x_decompress_safe(clo->compressed_buffer, compressed_length,
   2.558 +-             clo->buffer[clo->current_bufnum], &tmp);
   2.559 +-    if (err == LZO_E_OK) *uncompressed_length = (u_int32_t) tmp;
   2.560  +    err = lzo1x_decompress_safe(source, sourceLen,
   2.561  +             dest, &tmp);
   2.562  +    if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
   2.563 -+   }
   2.564 -+   break;
   2.565 -+#endif
   2.566 -+#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   2.567 -+  case CLOOP_COMPRESSOR_LZ4:
   2.568 -+   {
   2.569 +    }
   2.570 +    break;
   2.571 + #endif
   2.572 + #if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   2.573 +   case CLOOP_COMPRESSOR_LZ4:
   2.574 +    {
   2.575 +-    size_t outputSize = clo->head.block_size;
   2.576  +    size_t outputSize = *destLen;
   2.577 -+    /* We should adjust outputSize here, in case the last block is smaller than block_size */
   2.578 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
   2.579 +     /* We should adjust outputSize here, in case the last block is smaller than block_size */
   2.580 + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
   2.581 +-    err = lz4_decompress(clo->compressed_buffer, (size_t *) &compressed_length,
   2.582 +-                         clo->buffer[clo->current_bufnum], outputSize);
   2.583  +    err = lz4_decompress(source, (size_t *) &sourceLen,
   2.584  +                         dest, outputSize);
   2.585 -+#else
   2.586 + #else
   2.587 +-    err = LZ4_decompress_safe(clo->compressed_buffer,
   2.588 +-                              clo->buffer[clo->current_bufnum],
   2.589 +-                              compressed_length, outputSize);
   2.590  +    err = LZ4_decompress_safe(source,
   2.591  +                              dest,
   2.592  +                              sourceLen, outputSize);
   2.593 -+#endif
   2.594 -+    if (err >= 0) 
   2.595 -+    {
   2.596 -+     err = 0;
   2.597 + #endif
   2.598 +     if (err >= 0) 
   2.599 +     {
   2.600 +      err = 0;
   2.601 +-     *uncompressed_length = outputSize;
   2.602  +     *destLen = outputSize;
   2.603 -+    }
   2.604 -+   }
   2.605 -+  break;
   2.606 -+#endif
   2.607 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   2.608 -+ case CLOOP_COMPRESSOR_XZ:
   2.609 +     }
   2.610 +    }
   2.611 +   break;
   2.612 + #endif
   2.613 + #if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   2.614 +  case CLOOP_COMPRESSOR_XZ:
   2.615 +-  clo->xz_buffer.in = clo->compressed_buffer;
   2.616  +  clo->xz_buffer.in = source;
   2.617 -+  clo->xz_buffer.in_pos = 0;
   2.618 +   clo->xz_buffer.in_pos = 0;
   2.619 +-  clo->xz_buffer.in_size = compressed_length;
   2.620 +-  clo->xz_buffer.out = clo->buffer[clo->current_bufnum];
   2.621  +  clo->xz_buffer.in_size = sourceLen;
   2.622  +  clo->xz_buffer.out = dest;
   2.623 -+  clo->xz_buffer.out_pos = 0;
   2.624 +   clo->xz_buffer.out_pos = 0;
   2.625 +-  clo->xz_buffer.out_size = clo->head.block_size;
   2.626  +  clo->xz_buffer.out_size = *destLen;
   2.627 -+  xz_dec_reset(clo->xzdecoderstate);
   2.628 -+  err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
   2.629 -+  if (err == XZ_STREAM_END || err == XZ_OK)
   2.630 -+  {
   2.631 -+   err = 0;
   2.632 -+  }
   2.633 -+  else
   2.634 -+  {
   2.635 -+   printk(KERN_ERR "%s: xz_dec_run error %d\n", cloop_name, err);
   2.636 -+   err = 1;
   2.637 -+  }
   2.638 -+  break;
   2.639 -+#endif
   2.640 -+ default:
   2.641 -+   printk(KERN_ERR "%s: compression method is not supported!\n", cloop_name);
   2.642 -+ }
   2.643 -+ return err;
   2.644 - }
   2.645 - 
   2.646 - static ssize_t cloop_read_from_file(struct cloop_device *clo, struct file *f, char *buf,
   2.647 -@@ -220,7 +313,7 @@
   2.648 +   xz_dec_reset(clo->xzdecoderstate);
   2.649 +   err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
   2.650 +   if (err == XZ_STREAM_END || err == XZ_OK)
   2.651 +@@ -309,16 +304,12 @@
   2.652 +  while (buf_done < buf_len)
   2.653 +   {
   2.654 +    size_t size = buf_len - buf_done, size_read;
   2.655 +-   mm_segment_t old_fs;
   2.656 +    /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */
   2.657 +    /* int size_read = kernel_read(f, pos, buf + buf_done, size); */
   2.658 +-
   2.659 +-   // mutex_lock(&clo->clo_rq_mutex);
   2.660 +-   old_fs = get_fs();
   2.661 +-   set_fs(KERNEL_DS);
   2.662 ++   mm_segment_t old_fs = get_fs();
   2.663 ++   set_fs(get_ds());
   2.664 +    size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos);
   2.665 +    set_fs(old_fs);
   2.666 +-   // mutex_unlock(&clo->clo_rq_mutex);
   2.667   
   2.668      if(size_read <= 0)
   2.669       {
   2.670 --     printk(KERN_ERR "%s: Read error %d at pos %Lu in file %s, "
   2.671 -+     printk(KERN_ERR "%s: Read error %d at pos %llu in file %s, "
   2.672 -                      "%d bytes lost.\n", cloop_name, (int)size_read, pos,
   2.673 - 		     file, (int)size);
   2.674 -      memset(buf + buf_len - size, 0, size);
   2.675 -@@ -232,72 +325,84 @@
   2.676 - }
   2.677 +@@ -358,8 +349,8 @@
   2.678 +    return i;
   2.679 +   }
   2.680   
   2.681 - /* This looks more complicated than it is */
   2.682 --/* Returns number of block buffer to use for this request */
   2.683 -+/* Returns number of cache block buffer to use for this request */
   2.684 - static int cloop_load_buffer(struct cloop_device *clo, int blocknum)
   2.685 - {
   2.686 -- unsigned int buf_done = 0;
   2.687 -- unsigned long buflen;
   2.688 -- unsigned int buf_length;
   2.689 -+ loff_t compressed_block_offset;
   2.690 -+ long compressed_block_len;
   2.691 -+ long uncompressed_block_len=0;
   2.692 -  int ret;
   2.693 -  int i;
   2.694 -- if(blocknum > ntohl(clo->head.num_blocks) || blocknum < 0)
   2.695 --  {
   2.696 --   printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
   2.697 --                       cloop_name, blocknum);
   2.698 --   return -1;
   2.699 --  }
   2.700 -+ if(blocknum > clo->head.num_blocks || blocknum < 0)
   2.701 -+ {
   2.702 -+  printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
   2.703 -+         cloop_name, blocknum);
   2.704 -+  return -1;
   2.705 -+ }
   2.706 - 
   2.707 -  /* Quick return if the block we seek is already in one of the buffers. */
   2.708 -  /* Return number of buffer */
   2.709 -- for(i=0; i<BUFFERED_BLOCKS; i++)
   2.710 -+ for(i=0; i<clo->num_buffered_blocks; i++)
   2.711 -   if (blocknum == clo->buffered_blocknum[i])
   2.712 --   {
   2.713 --    DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
   2.714 --    return i;
   2.715 --   }
   2.716 --
   2.717 -- buf_length = be64_to_cpu(clo->offsets[blocknum+1]) - be64_to_cpu(clo->offsets[blocknum]);
   2.718 --
   2.719 --/* Load one compressed block from the file. */
   2.720 -- cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
   2.721 --                    be64_to_cpu(clo->offsets[blocknum]), buf_length);
   2.722 -+  {
   2.723 -+   DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
   2.724 -+   return i;
   2.725 -+  }
   2.726 - 
   2.727 -- buflen = ntohl(clo->head.block_size);
   2.728 +- compressed_block_offset = CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]);
   2.729 +- compressed_block_len = (long) (CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum+1]) - compressed_block_offset) ;
   2.730  + compressed_block_offset = clo->block_ptrs[blocknum].offset;
   2.731  + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
   2.732   
   2.733 -- /* Go to next position in the block ring buffer */
   2.734 -- clo->current_bufnum++;
   2.735 -- if(clo->current_bufnum >= BUFFERED_BLOCKS) clo->current_bufnum = 0;
   2.736 -+ /* Load one compressed block from the file. */
   2.737 -+ if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
   2.738 -+ {
   2.739 -+  size_t n = cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
   2.740 -+                    compressed_block_offset, compressed_block_len);
   2.741 -+  if (n!= compressed_block_len)
   2.742 -+   {
   2.743 -+    printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
   2.744 +  /* Load one compressed block from the file. */
   2.745 +  if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
   2.746 +@@ -369,12 +360,12 @@
   2.747 +   if (n!= compressed_block_len)
   2.748 +    {
   2.749 +     printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
   2.750 +-     cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
   2.751  +     cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
   2.752 -+    /* return -1; */
   2.753 -+   }
   2.754 -+ } else {
   2.755 -+  printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
   2.756 +     /* return -1; */
   2.757 +    }
   2.758 +  } else {
   2.759 +   printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
   2.760 +-  cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
   2.761  +  cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
   2.762 -+  return -1;
   2.763 -+ }
   2.764 -+  
   2.765 -+ /* Go to next position in the cache block buffer (which is used as a cyclic buffer) */
   2.766 -+ if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
   2.767 +   return -1;
   2.768 +  }
   2.769 +   
   2.770 +@@ -382,14 +373,16 @@
   2.771 +  if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
   2.772   
   2.773    /* Do the uncompression */
   2.774 -- ret = uncompress(clo, clo->buffer[clo->current_bufnum], &buflen, clo->compressed_buffer,
   2.775 --                  buf_length);
   2.776 +- ret = uncompress(clo, blocknum, compressed_block_len, &uncompressed_block_len);
   2.777  + uncompressed_block_len = clo->head.block_size;
   2.778  + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
   2.779  +	 clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
   2.780    /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
   2.781    if (ret != 0)
   2.782 --  {
   2.783 --   printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u "
   2.784 --          "%Lu-%Lu\n", cloop_name, ret, blocknum,
   2.785 --	  ntohl(clo->head.block_size), buflen, buf_length, buf_done,
   2.786 --	  be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1]));
   2.787 --   clo->buffered_blocknum[clo->current_bufnum] = -1;
   2.788 --   return -1;
   2.789 --  }
   2.790 -+ {
   2.791 -+  printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
   2.792 -+         cloop_name, ret, blocknum,
   2.793 +  {
   2.794 +   printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
   2.795 +          cloop_name, ret, blocknum,
   2.796 +-         compressed_block_len, CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]),
   2.797 +-         CLOOP_BLOCK_FLAGS(clo->block_ptrs[blocknum]));
   2.798  +         compressed_block_len, clo->block_ptrs[blocknum].offset,
   2.799  +         clo->block_ptrs[blocknum].flags);
   2.800 -+         clo->buffered_blocknum[clo->current_bufnum] = -1;
   2.801 -+  return -1;
   2.802 -+ }
   2.803 -  clo->buffered_blocknum[clo->current_bufnum] = blocknum;
   2.804 +          clo->buffered_blocknum[clo->current_bufnum] = -1;
   2.805 +   return -1;
   2.806 +  }
   2.807 +@@ -397,107 +390,146 @@
   2.808    return clo->current_bufnum;
   2.809   }
   2.810   
   2.811 - /* This function does all the real work. */
   2.812 --/* returns "uptodate" */
   2.813 +-static blk_status_t cloop_handle_request(struct cloop_device *clo, struct request *req)
   2.814 ++/* This function does all the real work. */
   2.815  +/* returns "uptodate"                    */
   2.816 - static int cloop_handle_request(struct cloop_device *clo, struct request *req)
   2.817 ++static int cloop_handle_request(struct cloop_device *clo, struct request *req)
   2.818   {
   2.819    int buffered_blocknum = -1;
   2.820    int preloaded = 0;
   2.821 -  loff_t offset     = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
   2.822 -- struct bio_vec *bvec;
   2.823 -+ struct bio_vec bvec;
   2.824 +- loff_t offset = (loff_t) blk_rq_pos(req)<<9;
   2.825 ++ loff_t offset     = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
   2.826 +  struct bio_vec bvec;
   2.827    struct req_iterator iter;
   2.828 +- blk_status_t ret = BLK_STS_OK;
   2.829 +-
   2.830 +- if (unlikely(req_op(req) != REQ_OP_READ ))
   2.831 +- {
   2.832 +-  blk_dump_rq_flags(req, DEVICE_NAME " bad request");
   2.833 +-  return BLK_STS_IOERR;
   2.834 +- }
   2.835 +-
   2.836 +- if (unlikely(!clo->backing_file && !clo->suspended))
   2.837 +- {
   2.838 +-  DEBUGP("cloop_handle_request: not connected to a file\n");
   2.839 +-  return BLK_STS_IOERR;
   2.840 +- }
   2.841 +-
   2.842    rq_for_each_segment(bvec, req, iter)
   2.843 +- {
   2.844 +-  unsigned long len = bvec.bv_len;
   2.845 +-  loff_t to_offset  = bvec.bv_offset;
   2.846 +-
   2.847 +-  while(len > 0)
   2.848     {
   2.849 --   unsigned long len = bvec->bv_len;
   2.850 --   char *to_ptr      = kmap(bvec->bv_page) + bvec->bv_offset;
   2.851 +-   u_int32_t length_in_buffer;
   2.852 +-   loff_t block_offset = offset;
   2.853 +-   u_int32_t offset_in_buffer;
   2.854 +-   char *from_ptr, *to_ptr;
   2.855 +-   /* do_div (div64.h) returns the 64bit division remainder and  */
   2.856 +-   /* puts the result in the first argument, i.e. block_offset   */
   2.857 +-   /* becomes the blocknumber to load, and offset_in_buffer the  */
   2.858 +-   /* position in the buffer */
   2.859 +-   offset_in_buffer = do_div(block_offset, clo->head.block_size);
   2.860 +-   /* Lookup preload cache */
   2.861 +-   if(block_offset < clo->preload_size && clo->preload_cache != NULL && clo->preload_cache[block_offset] != NULL)
   2.862 +-   { /* Copy from cache */
   2.863 +-    preloaded = 1;
   2.864 +-    from_ptr = clo->preload_cache[block_offset];
   2.865 +-   }
   2.866 +-   else
   2.867 +-   {
   2.868 +-    preloaded = 0;
   2.869 +-    buffered_blocknum = cloop_load_buffer(clo,block_offset);
   2.870 +-    if(buffered_blocknum == -1)
   2.871  +   unsigned long len = bvec.bv_len;
   2.872  +   char *to_ptr      = kmap(bvec.bv_page) + bvec.bv_offset;
   2.873 -    while(len > 0)
   2.874 ++   while(len > 0)
   2.875       {
   2.876 -      u_int32_t length_in_buffer;
   2.877 -@@ -308,7 +413,7 @@
   2.878 -      /* puts the result in the first argument, i.e. block_offset   */
   2.879 -      /* becomes the blocknumber to load, and offset_in_buffer the  */
   2.880 -      /* position in the buffer */
   2.881 --     offset_in_buffer = do_div(block_offset, ntohl(clo->head.block_size));
   2.882 +-     ret = BLK_STS_IOERR;
   2.883 +-     break; /* invalid data, leave inner loop */
   2.884 ++     u_int32_t length_in_buffer;
   2.885 ++     loff_t block_offset = offset;
   2.886 ++     u_int32_t offset_in_buffer;
   2.887 ++     char *from_ptr;
   2.888 ++     /* do_div (div64.h) returns the 64bit division remainder and  */
   2.889 ++     /* puts the result in the first argument, i.e. block_offset   */
   2.890 ++     /* becomes the blocknumber to load, and offset_in_buffer the  */
   2.891 ++     /* position in the buffer */
   2.892  +     offset_in_buffer = do_div(block_offset, clo->head.block_size);
   2.893 -      /* Lookup preload cache */
   2.894 -      if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
   2.895 -         clo->preload_cache[block_offset] != NULL)
   2.896 -@@ -325,7 +430,7 @@
   2.897 -        from_ptr = clo->buffer[buffered_blocknum];
   2.898 -       }
   2.899 -      /* Now, at least part of what we want will be in the buffer. */
   2.900 --     length_in_buffer = ntohl(clo->head.block_size) - offset_in_buffer;
   2.901 ++     /* Lookup preload cache */
   2.902 ++     if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
   2.903 ++        clo->preload_cache[block_offset] != NULL)
   2.904 ++      { /* Copy from cache */
   2.905 ++       preloaded = 1;
   2.906 ++       from_ptr = clo->preload_cache[block_offset];
   2.907 ++      }
   2.908 ++     else
   2.909 ++      {
   2.910 ++       preloaded = 0;
   2.911 ++       buffered_blocknum = cloop_load_buffer(clo,block_offset);
   2.912 ++       if(buffered_blocknum == -1) break; /* invalid data, leave inner loop */
   2.913 ++       /* Copy from buffer */
   2.914 ++       from_ptr = clo->buffer[buffered_blocknum];
   2.915 ++      }
   2.916 ++     /* Now, at least part of what we want will be in the buffer. */
   2.917  +     length_in_buffer = clo->head.block_size - offset_in_buffer;
   2.918 -      if(length_in_buffer > len)
   2.919 -       {
   2.920 - /*   DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
   2.921 -@@ -337,18 +442,19 @@
   2.922 -      len         -= length_in_buffer;
   2.923 -      offset      += length_in_buffer;
   2.924 -     } /* while inner loop */
   2.925 --   kunmap(bvec->bv_page);
   2.926 ++     if(length_in_buffer > len)
   2.927 ++      {
   2.928 ++/*   DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
   2.929 ++                      length_in_buffer,len); */
   2.930 ++       length_in_buffer = len;
   2.931 ++      }
   2.932 ++     memcpy(to_ptr, from_ptr + offset_in_buffer, length_in_buffer);
   2.933 ++     to_ptr      += length_in_buffer;
   2.934 ++     len         -= length_in_buffer;
   2.935 ++     offset      += length_in_buffer;
   2.936 ++    } /* while inner loop */
   2.937  +   kunmap(bvec.bv_page);
   2.938  +   cond_resched();
   2.939 -   } /* end rq_for_each_segment*/
   2.940 -  return ((buffered_blocknum!=-1) || preloaded);
   2.941 - }
   2.942 - 
   2.943 - /* Adopted from loop.c, a kernel thread to handle physical reads and
   2.944 -- * decompression. */
   2.945 ++  } /* end rq_for_each_segment*/
   2.946 ++ return ((buffered_blocknum!=-1) || preloaded);
   2.947 ++}
   2.948 ++
   2.949 ++/* Adopted from loop.c, a kernel thread to handle physical reads and
   2.950  +   decompression. */
   2.951 - static int cloop_thread(void *data)
   2.952 - {
   2.953 -  struct cloop_device *clo = data;
   2.954 -  current->flags |= PF_NOFREEZE;
   2.955 -- set_user_nice(current, -15);
   2.956 ++static int cloop_thread(void *data)
   2.957 ++{
   2.958 ++ struct cloop_device *clo = data;
   2.959 ++ current->flags |= PF_NOFREEZE;
   2.960  + set_user_nice(current, 10);
   2.961 -  while (!kthread_should_stop()||!list_empty(&clo->clo_list))
   2.962 -   {
   2.963 -    int err;
   2.964 -@@ -390,10 +496,18 @@
   2.965 -    int rw;
   2.966 -  /* quick sanity checks */
   2.967 -    /* blk_fs_request() was removed in 2.6.36 */
   2.968 --   if (unlikely(req == NULL || (req->cmd_type != REQ_TYPE_FS)))
   2.969 ++ while (!kthread_should_stop()||!list_empty(&clo->clo_list))
   2.970 ++  {
   2.971 ++   int err;
   2.972 ++   err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) || 
   2.973 ++                                  kthread_should_stop());
   2.974 ++   if(unlikely(err))
   2.975 ++    {
   2.976 ++     DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n");
   2.977 ++     continue;
   2.978 +     }
   2.979 +-    /* Copy from buffer */
   2.980 +-    from_ptr = clo->buffer[buffered_blocknum];
   2.981 +-   }
   2.982 +-   /* Now, at least part of what we want will be in the buffer. */
   2.983 +-   length_in_buffer = clo->head.block_size - offset_in_buffer;
   2.984 +-   if(length_in_buffer > len)
   2.985 +-   {
   2.986 +-   /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", length_in_buffer,len); */
   2.987 +-    length_in_buffer = len;
   2.988 +-   }
   2.989 +-   to_ptr      = kmap_atomic(bvec.bv_page);
   2.990 +-   memcpy(to_ptr + to_offset, from_ptr + offset_in_buffer, length_in_buffer);
   2.991 +-   kunmap_atomic(to_ptr);
   2.992 +-   to_offset   += length_in_buffer;
   2.993 +-   len         -= length_in_buffer;
   2.994 +-   offset      += length_in_buffer;
   2.995 +-  } /* while inner loop */
   2.996 +- } /* rq_for_each_segment */
   2.997 +- return ret;
   2.998 +-}
   2.999 +-
  2.1000 +-static blk_status_t cloop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
  2.1001 +-{
  2.1002 +-//  struct request_queue *q  = hctx->queue;
  2.1003 +-//  struct cloop_device *clo = q->queuedata;
  2.1004 +- struct request *req = bd->rq;
  2.1005 +- struct cloop_device *clo = req->rq_disk->private_data;
  2.1006 +- blk_status_t ret         = BLK_STS_OK;
  2.1007 +-
  2.1008 +-#if 1 /* Does it work when loading libraries? */
  2.1009 +- /* Since we have a buffered block list as well as data to read */
  2.1010 +- /* from disk (slow), and are (probably) never called from an   */
  2.1011 +- /* interrupt, we use a simple mutex lock right here to ensure  */
  2.1012 +- /* consistency.                                                */
  2.1013 +-  mutex_lock(&clo->clo_rq_mutex);
  2.1014 +- #else
  2.1015 +-  spin_lock_irq(&clo->queue_lock);
  2.1016 +- #endif
  2.1017 +- blk_mq_start_request(req);
  2.1018 +- do {
  2.1019 +-  ret = cloop_handle_request(clo, req);
  2.1020 +- } while(blk_update_request(req, ret, blk_rq_cur_bytes(req)));
  2.1021 +- blk_mq_end_request(req, ret);
  2.1022 +- #if 1 /* See above */
  2.1023 +-  mutex_unlock(&clo->clo_rq_mutex);
  2.1024 +- #else
  2.1025 +-  spin_unlock_irq(&clo->queue_lock);
  2.1026 +- #endif
  2.1027 +- return ret;
  2.1028 ++   if(!list_empty(&clo->clo_list))
  2.1029 ++    {
  2.1030 ++     struct request *req;
  2.1031 ++     unsigned long flags;
  2.1032 ++     int uptodate;
  2.1033 ++     spin_lock_irq(&clo->queue_lock);
  2.1034 ++     req = list_entry(clo->clo_list.next, struct request, queuelist);
  2.1035 ++     list_del_init(&req->queuelist);
  2.1036 ++     spin_unlock_irq(&clo->queue_lock);
  2.1037 ++     uptodate = cloop_handle_request(clo, req);
  2.1038 ++     spin_lock_irqsave(&clo->queue_lock, flags);
  2.1039 ++     __blk_end_request_all(req, uptodate ? 0 : -EIO);
  2.1040 ++     spin_unlock_irqrestore(&clo->queue_lock, flags);
  2.1041 ++    }
  2.1042 ++  }
  2.1043 ++ DEBUGP(KERN_ERR "cloop_thread exited.\n");
  2.1044 ++ return 0;
  2.1045 ++}
  2.1046 ++
  2.1047 ++/* This is called by the kernel block queue management every now and then,
  2.1048 ++ * with successive read requests qeued and sorted in a (hopefully)
  2.1049 ++ * "most efficient way". spin_lock_irq() is being held by the kernel. */
  2.1050 ++static void cloop_do_request(struct request_queue *q)
  2.1051 ++{
  2.1052 ++ struct request *req;
  2.1053 ++ while((req = blk_fetch_request(q)) != NULL)
  2.1054 ++  {
  2.1055 ++   struct cloop_device *clo;
  2.1056 ++   int rw;
  2.1057 ++ /* quick sanity checks */
  2.1058 ++   /* blk_fs_request() was removed in 2.6.36 */
  2.1059  +   if (unlikely(req == NULL
  2.1060  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
  2.1061  +   || (req->cmd_type != REQ_TYPE_FS)
  2.1062  +#endif
  2.1063  +   ))
  2.1064 -     goto error_continue;
  2.1065 -    rw = rq_data_dir(req);
  2.1066 --   if (unlikely(rw != READ && rw != READA))
  2.1067 ++    goto error_continue;
  2.1068 ++   rw = rq_data_dir(req);
  2.1069  +   if (unlikely(rw != READ
  2.1070  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
  2.1071  +                && rw != READA
  2.1072  +#endif
  2.1073  +    ))
  2.1074 -     {
  2.1075 -      DEBUGP("cloop_do_request: bad command\n");
  2.1076 -      goto error_continue;
  2.1077 -@@ -409,40 +523,51 @@
  2.1078 -    continue; /* next request */
  2.1079 -   error_continue:
  2.1080 -    DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
  2.1081 ++    {
  2.1082 ++     DEBUGP("cloop_do_request: bad command\n");
  2.1083 ++     goto error_continue;
  2.1084 ++    }
  2.1085 ++   clo = req->rq_disk->private_data;
  2.1086 ++   if (unlikely(!clo->backing_file && !clo->suspended))
  2.1087 ++    {
  2.1088 ++     DEBUGP("cloop_do_request: not connected to a file\n");
  2.1089 ++     goto error_continue;
  2.1090 ++    }
  2.1091 ++   list_add_tail(&req->queuelist, &clo->clo_list); /* Add to working list for thread */
  2.1092 ++   wake_up(&clo->clo_event);    /* Wake up cloop_thread */
  2.1093 ++   continue; /* next request */
  2.1094 ++  error_continue:
  2.1095 ++   DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
  2.1096  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  2.1097 -    req->errors++;
  2.1098 ++   req->errors++;
  2.1099  +#else
  2.1100  +   req->error_count++;
  2.1101  +#endif
  2.1102 -    __blk_end_request_all(req, -EIO);
  2.1103 -   }
  2.1104 ++   __blk_end_request_all(req, -EIO);
  2.1105 ++  }
  2.1106   }
  2.1107   
  2.1108 --/* Read header and offsets from already opened file */
  2.1109 --static int cloop_set_file(int cloop_num, struct file *file, char *filename)
  2.1110 -+/* Read header, flags and offsets from already opened file */
  2.1111 -+static int cloop_set_file(int cloop_num, struct file *file)
  2.1112 - {
  2.1113 -  struct cloop_device *clo = cloop_dev[cloop_num];
  2.1114 -  struct inode *inode;
  2.1115 + /* Read header, flags and offsets from already opened file */
  2.1116 +@@ -508,7 +540,7 @@
  2.1117    char *bbuf=NULL;
  2.1118 -- unsigned int i, offsets_read, total_offsets;
  2.1119 -- int isblkdev;
  2.1120 -- int error = 0;
  2.1121 -+ unsigned int bbuf_size = 0;
  2.1122 -+ const unsigned int header_size = sizeof(struct cloop_head);
  2.1123 +  unsigned int bbuf_size = 0;
  2.1124 +  const unsigned int header_size = sizeof(struct cloop_head);
  2.1125 +- unsigned int i, offsets_read=0, total_offsets=0;
  2.1126  + unsigned int i, total_offsets=0;
  2.1127 -+ loff_t fs_read_position = 0, header_pos[2];
  2.1128 -+ int flags, isblkdev, bytes_read, error = 0;
  2.1129 -+ if (clo->suspended) return error;
  2.1130 -+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
  2.1131 -  inode = file->f_dentry->d_inode;
  2.1132 -+ clo->underlying_filename = kstrdup(file->f_dentry->d_name.name ? file->f_dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
  2.1133 -+ #else
  2.1134 -+ inode = file->f_path.dentry->d_inode;
  2.1135 -+ clo->underlying_filename = kstrdup(file->f_path.dentry->d_name.name ? file->f_path.dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
  2.1136 -+ #endif
  2.1137 -  isblkdev=S_ISBLK(inode->i_mode)?1:0;
  2.1138 -  if(!isblkdev&&!S_ISREG(inode->i_mode))
  2.1139 +  loff_t fs_read_position = 0, header_pos[2];
  2.1140 +  int isblkdev, bytes_read, error = 0;
  2.1141 +  if (clo->suspended) return error;
  2.1142 +@@ -581,29 +613,19 @@
  2.1143 +     goto error_release;
  2.1144 +    }
  2.1145 +    memcpy(&clo->head, bbuf, header_size);
  2.1146 +-   if (strncmp(bbuf+CLOOP4_SIGNATURE_OFFSET, CLOOP4_SIGNATURE, CLOOP4_SIGNATURE_SIZE)==0)
  2.1147 ++   if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
  2.1148 +    {
  2.1149 +-    clo->file_format=4;
  2.1150 ++    clo->file_format++;
  2.1151 +     clo->head.block_size=ntohl(clo->head.block_size);
  2.1152 +     clo->head.num_blocks=ntohl(clo->head.num_blocks);
  2.1153 +     clo->header_first =  (i==0) ? 1 : 0;
  2.1154 +-    printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  2.1155 +-    break;
  2.1156 +-   }
  2.1157 +-   else if (strncmp(bbuf+CLOOP2_SIGNATURE_OFFSET, CLOOP2_SIGNATURE, CLOOP2_SIGNATURE_SIZE)==0)
  2.1158 +-   {
  2.1159 +-    clo->file_format=2;
  2.1160 +-    clo->head.block_size=ntohl(clo->head.block_size);
  2.1161 +-    clo->head.num_blocks=ntohl(clo->head.num_blocks);
  2.1162 +-    clo->header_first =  (i==0) ? 1 : 0;
  2.1163 +-    printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  2.1164 ++    printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  2.1165 +     break;
  2.1166 +    }
  2.1167 +   }
  2.1168 +  if (clo->file_format == 0)
  2.1169     {
  2.1170 -    printk(KERN_ERR "%s: %s not a regular file or block device\n",
  2.1171 --		   cloop_name, filename);
  2.1172 -+		   cloop_name, clo->underlying_filename);
  2.1173 +-   printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
  2.1174 +-                   "please use an older version of %s for this file.\n",
  2.1175 ++   printk(KERN_ERR "%s: Cannot detect %s format.\n",
  2.1176 +                    cloop_name, cloop_name);
  2.1177 +        error=-EBADF; goto error_release;
  2.1178 +   }
  2.1179 +@@ -613,67 +635,133 @@
  2.1180 +           cloop_name, clo->head.block_size);
  2.1181      error=-EBADF; goto error_release;
  2.1182     }
  2.1183 -  clo->backing_file = file;
  2.1184 -  clo->backing_inode= inode ;
  2.1185 -- if(!isblkdev&&inode->i_size<sizeof(struct cloop_head))
  2.1186 -+ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size;
  2.1187 -+ if(clo->underlying_total_size < header_size)
  2.1188 +- total_offsets=clo->head.num_blocks+1;
  2.1189 +- if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
  2.1190 ++ total_offsets=clo->head.num_blocks;
  2.1191 ++ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
  2.1192 +                       total_offsets > inode->i_size))
  2.1193     {
  2.1194 --   printk(KERN_ERR "%s: %lu bytes (must be >= %u bytes)\n",
  2.1195 --                   cloop_name, (unsigned long)inode->i_size,
  2.1196 --		   (unsigned)sizeof(struct cloop_head));
  2.1197 -+   printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n",
  2.1198 -+                   cloop_name, clo->underlying_total_size,
  2.1199 -+		   (unsigned int)header_size);
  2.1200 +    printk(KERN_ERR "%s: file %s too small for %u blocks\n",
  2.1201 +           cloop_name, clo->underlying_filename, clo->head.num_blocks);
  2.1202      error=-EBADF; goto error_release;
  2.1203     }
  2.1204 -- /* In suspended mode, we have done all checks necessary - FF */
  2.1205 -- if (clo->suspended)
  2.1206 --   return error;
  2.1207 -  if(isblkdev)
  2.1208 -   {
  2.1209 -    struct request_queue *q = bdev_get_queue(inode->i_bdev);
  2.1210 -@@ -451,104 +576,225 @@
  2.1211 -    /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */
  2.1212 -    blk_queue_max_segment_size(clo->clo_queue, queue_max_segment_size(q));
  2.1213 -    blk_queue_segment_boundary(clo->clo_queue, queue_segment_boundary(q));
  2.1214 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
  2.1215 -    blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn);
  2.1216 -+#endif
  2.1217 -    clo->underlying_blksize = block_size(inode->i_bdev);
  2.1218 -   }
  2.1219 -  else
  2.1220 -    clo->underlying_blksize = PAGE_SIZE;
  2.1221 -- DEBUGP("Underlying blocksize is %u\n", clo->underlying_blksize);
  2.1222 -- bbuf = cloop_malloc(clo->underlying_blksize);
  2.1223 -+
  2.1224 -+ DEBUGP(KERN_INFO "Underlying blocksize of %s is %u\n", clo->underlying_filename, clo->underlying_blksize);
  2.1225 -+ DEBUGP(KERN_INFO "Underlying total size of %s is %llu\n", clo->underlying_filename, clo->underlying_total_size);
  2.1226 -+
  2.1227 -+ /* clo->underlying_blksize should be larger than header_size, even if it's only PAGE_SIZE */
  2.1228 -+ bbuf_size = clo->underlying_blksize;
  2.1229 -+ bbuf = cloop_malloc(bbuf_size);
  2.1230 -  if(!bbuf)
  2.1231 -   {
  2.1232 --   printk(KERN_ERR "%s: out of kernel mem for block buffer (%lu bytes)\n",
  2.1233 --                   cloop_name, (unsigned long)clo->underlying_blksize);
  2.1234 -+   printk(KERN_ERR "%s: out of kernel mem for buffer (%u bytes)\n",
  2.1235 -+                   cloop_name, (unsigned int) bbuf_size);
  2.1236 -+   error=-ENOMEM; goto error_release;
  2.1237 -+  }
  2.1238 -+
  2.1239 -+ header_pos[0] = 0; /* header first */
  2.1240 -+ header_pos[1] = clo->underlying_total_size - sizeof(struct cloop_head); /* header last */
  2.1241 -+ for(i=0; i<2; i++)
  2.1242 -+  {
  2.1243 -+   /* Check for header */
  2.1244 -+   size_t bytes_readable = MIN(clo->underlying_blksize, clo->underlying_total_size - header_pos[i]);
  2.1245 -+   size_t bytes_read = cloop_read_from_file(clo, file, bbuf, header_pos[i], bytes_readable);
  2.1246 -+   if(bytes_read != bytes_readable)
  2.1247 -+   {
  2.1248 -+    printk(KERN_ERR "%s: Bad file %s, read() of %s %u bytes returned %d.\n",
  2.1249 -+                    cloop_name, clo->underlying_filename, (i==0)?"first":"last",
  2.1250 -+		    (unsigned int)header_size, (int)bytes_read);
  2.1251 -+    error=-EBADF;
  2.1252 -+    goto error_release;
  2.1253 -+   }
  2.1254 -+   memcpy(&clo->head, bbuf, header_size);
  2.1255 -+   if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
  2.1256 -+   {
  2.1257 -+    clo->file_format++;
  2.1258 -+    clo->head.block_size=ntohl(clo->head.block_size);
  2.1259 -+    clo->head.num_blocks=ntohl(clo->head.num_blocks);
  2.1260 -+    clo->header_first =  (i==0) ? 1 : 0;
  2.1261 -+    printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  2.1262 -+    break;
  2.1263 -+   }
  2.1264 -+  }
  2.1265 -+ if (clo->file_format == 0)
  2.1266 -+  {
  2.1267 -+   printk(KERN_ERR "%s: Cannot detect %s format.\n",
  2.1268 -+                   cloop_name, cloop_name);
  2.1269 -+       error=-EBADF; goto error_release;
  2.1270 -+  }
  2.1271 -+ if (clo->head.block_size % 512 != 0)
  2.1272 -+  {
  2.1273 -+   printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
  2.1274 -+          cloop_name, clo->head.block_size);
  2.1275 -+   error=-EBADF; goto error_release;
  2.1276 -+  }
  2.1277 -+ total_offsets=clo->head.num_blocks;
  2.1278 -+ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
  2.1279 -+                      total_offsets > inode->i_size))
  2.1280 -+  {
  2.1281 -+   printk(KERN_ERR "%s: file %s too small for %u blocks\n",
  2.1282 -+          cloop_name, clo->underlying_filename, clo->head.num_blocks);
  2.1283 -+   error=-EBADF; goto error_release;
  2.1284 -+  }
  2.1285 +- clo->block_ptrs = cloop_malloc(sizeof(cloop_block_ptr) * total_offsets);
  2.1286 +- if (!clo->block_ptrs)
  2.1287  + /* Allocate Memory for decompressors */
  2.1288  +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  2.1289  + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
  2.1290  + if(!clo->zstream.workspace)
  2.1291 -+  {
  2.1292 +   {
  2.1293 +-   printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
  2.1294  +   printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
  2.1295  +          cloop_name, zlib_inflate_workspacesize());
  2.1296      error=-ENOMEM; goto error_release;
  2.1297     }
  2.1298 -- total_offsets = 1; /* Dummy total_offsets: will be filled in first time around */
  2.1299 -- for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++)
  2.1300 +- /* Read them offsets! */
  2.1301 +- if(clo->header_first)
  2.1302  + zlib_inflateInit(&clo->zstream);
  2.1303  +#endif
  2.1304  +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  2.1305 @@ -890,33 +713,20 @@
  2.1306  +#endif
  2.1307  + if (total_offsets + 1 == 0) /* Version 3 */
  2.1308     {
  2.1309 --   unsigned int offset = 0, num_readable;
  2.1310 --   size_t bytes_read = cloop_read_from_file(clo, file, bbuf,
  2.1311 --                                          i*clo->underlying_blksize,
  2.1312 --                                          clo->underlying_blksize);
  2.1313 --   if(bytes_read != clo->underlying_blksize)
  2.1314 +-   fs_read_position = sizeof(struct cloop_head);
  2.1315  +   struct cloop_tail tail;
  2.1316  +   if (isblkdev)
  2.1317 -     {
  2.1318 --     printk(KERN_ERR "%s: Bad file, read() of first %lu bytes returned %d.\n",
  2.1319 --                   cloop_name, (unsigned long)clo->underlying_blksize, (int)bytes_read);
  2.1320 --     error=-EBADF;
  2.1321 --     goto error_release;
  2.1322 ++    {
  2.1323  +    /* No end of file: can't find index */
  2.1324  +     printk(KERN_ERR "%s: no V3 support for block device\n", 
  2.1325  +            cloop_name);
  2.1326  +     error=-EBADF; goto error_release;
  2.1327 -     }
  2.1328 --   /* Header will be in block zero */
  2.1329 --   if(i==0)
  2.1330 ++    }
  2.1331  +   bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
  2.1332  +			inode->i_size - sizeof(struct cloop_tail),
  2.1333  +			sizeof(struct cloop_tail));
  2.1334  +   if (bytes_read == sizeof(struct cloop_tail))
  2.1335 -     {
  2.1336 --     memcpy(&clo->head, bbuf, sizeof(struct cloop_head));
  2.1337 --     offset = sizeof(struct cloop_head);
  2.1338 --     if (ntohl(clo->head.block_size) % 512 != 0)
  2.1339 ++    {
  2.1340  +     unsigned long len, zlen;
  2.1341  +     int ret;
  2.1342  +     void *zbuf;
  2.1343 @@ -926,79 +736,47 @@
  2.1344  +     zlen = ntohl(tail.table_size);
  2.1345  +     zbuf = cloop_malloc(zlen);
  2.1346  +     if (!clo->block_ptrs || !zbuf)
  2.1347 -       {
  2.1348 --       printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
  2.1349 --              cloop_name, ntohl(clo->head.block_size));
  2.1350 --       error=-EBADF; goto error_release;
  2.1351 --      }
  2.1352 --     if (clo->head.preamble[0x0B]!='V'||clo->head.preamble[0x0C]<'1')
  2.1353 --      {
  2.1354 --       printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
  2.1355 --		       "please use an older version of %s for this file.\n",
  2.1356 --		       cloop_name, cloop_name);
  2.1357 --       error=-EBADF; goto error_release;
  2.1358 ++      {
  2.1359  +       printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
  2.1360  +       error=-ENOMEM; goto error_release;
  2.1361 -       }
  2.1362 --     if (clo->head.preamble[0x0C]<'2')
  2.1363 ++      }
  2.1364  +     bytes_read = cloop_read_from_file(clo, file, zbuf,
  2.1365  +			inode->i_size - zlen - sizeof(struct cloop_tail),
  2.1366  +			zlen);
  2.1367  +     if (bytes_read != zlen)
  2.1368 -       {
  2.1369 --       printk(KERN_ERR "%s: Cannot read old architecture-dependent "
  2.1370 --		       "(format <= 1.0) images, please use an older "
  2.1371 --		       "version of %s for this file.\n",
  2.1372 --		       cloop_name, cloop_name);
  2.1373 ++      {
  2.1374  +       printk(KERN_ERR "%s: can't read index\n", cloop_name);
  2.1375 -        error=-EBADF; goto error_release;
  2.1376 -       }
  2.1377 --     total_offsets=ntohl(clo->head.num_blocks)+1;
  2.1378 --     if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
  2.1379 --                       total_offsets > inode->i_size))
  2.1380 ++       error=-EBADF; goto error_release;
  2.1381 ++      }
  2.1382  +     len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
  2.1383 -+     flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
  2.1384 -+// May  3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
  2.1385 -+printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 
  2.1386 -+		clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
  2.1387 -+     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
  2.1388 -+// May  3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at   (null)
  2.1389 -+printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
  2.1390 ++     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
  2.1391  +     cloop_free(zbuf, zlen);
  2.1392  +     if (ret != 0)
  2.1393 -       {
  2.1394 --       printk(KERN_ERR "%s: file too small for %u blocks\n",
  2.1395 --              cloop_name, ntohl(clo->head.num_blocks));
  2.1396 -+        printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
  2.1397 -+               cloop_name, ret, flags);
  2.1398 -        error=-EBADF; goto error_release;
  2.1399 -       }
  2.1400 --     clo->offsets = cloop_malloc(sizeof(loff_t) * total_offsets);
  2.1401 --     if (!clo->offsets)
  2.1402 --      {
  2.1403 --       printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
  2.1404 --       error=-ENOMEM; goto error_release;
  2.1405 --      }
  2.1406 -     }
  2.1407 --   num_readable = MIN(total_offsets - offsets_read,
  2.1408 --                      (clo->underlying_blksize - offset) 
  2.1409 --                      / sizeof(loff_t));
  2.1410 --   memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(loff_t));
  2.1411 --   offsets_read += num_readable;
  2.1412 --  }
  2.1413 --  { /* Search for largest block rather than estimate. KK. */
  2.1414 --   int i;
  2.1415 --   for(i=0;i<total_offsets-1;i++)
  2.1416 ++      {
  2.1417 ++        printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
  2.1418 ++               cloop_name, ret);
  2.1419 ++       error=-EBADF; goto error_release;
  2.1420 ++      }
  2.1421 ++    }
  2.1422  +   else
  2.1423  +    {
  2.1424  +     printk(KERN_ERR "%s: can't find index\n", cloop_name);
  2.1425  +     error=-ENOMEM; goto error_release;
  2.1426  +    }
  2.1427 -+  }
  2.1428 -+ else
  2.1429 -+  {
  2.1430 +   }
  2.1431 +  else
  2.1432 +   {
  2.1433 +-   fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_offsets * sizeof(loff_t);
  2.1434 +-  }
  2.1435 +- for(offsets_read=0;offsets_read<total_offsets;)
  2.1436 +-  {
  2.1437 +-   size_t bytes_readable;
  2.1438 +-   unsigned int num_readable, offset = 0;
  2.1439 +-   bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
  2.1440 +-   if(bytes_readable <= 0) break; /* Done */
  2.1441 +-   bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
  2.1442 +-   if(bytes_read != bytes_readable)
  2.1443  +   unsigned int n, total_bytes;
  2.1444 -+   flags = 0;
  2.1445  +   clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
  2.1446  +   if (!clo->block_ptrs)
  2.1447  +    {
  2.1448 @@ -1007,14 +785,26 @@
  2.1449  +    }
  2.1450  +   /* Read them offsets! */
  2.1451  +   if(clo->header_first)
  2.1452 -+    {
  2.1453 +     {
  2.1454 +-     printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
  2.1455 +-            cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
  2.1456 +-     error=-EBADF;
  2.1457 +-     goto error_release;
  2.1458  +     total_bytes = total_offsets * sizeof(struct block_info);
  2.1459  +     fs_read_position = sizeof(struct cloop_head);
  2.1460 -+    }
  2.1461 +     }
  2.1462 +-   /* remember where to read the next blk from file */
  2.1463 +-   fs_read_position += bytes_read;
  2.1464 +-   /* calculate how many offsets can be taken from current bbuf */
  2.1465 +-   num_readable = MIN(total_offsets - offsets_read,
  2.1466 +-                      bytes_read / sizeof(loff_t));
  2.1467 +-   DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1);
  2.1468 +-   for (i=0,offset=0; i<num_readable; i++)
  2.1469  +   else
  2.1470       {
  2.1471 --     loff_t d=be64_to_cpu(clo->offsets[i+1]) - be64_to_cpu(clo->offsets[i]);
  2.1472 --     clo->largest_block=MAX(clo->largest_block,d);
  2.1473 +-     loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) );
  2.1474 +-     if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp);
  2.1475 +-     if(offsets_read > 0)
  2.1476  +     total_bytes = total_offsets * sizeof(loff_t);
  2.1477  +     fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
  2.1478  +    }
  2.1479 @@ -1025,35 +815,28 @@
  2.1480  +     if(bytes_readable <= 0) break; /* Done */
  2.1481  +     bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
  2.1482  +     if(bytes_read != bytes_readable)
  2.1483 -+      {
  2.1484 +       {
  2.1485 +-       loff_t d = CLOOP_BLOCK_OFFSET(tmp) - CLOOP_BLOCK_OFFSET(clo->block_ptrs[offsets_read-1]);
  2.1486 +-       if(d > clo->largest_block) clo->largest_block = d;
  2.1487  +       printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
  2.1488  +              cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
  2.1489  +       error=-EBADF;
  2.1490  +       goto error_release;
  2.1491 -+      }
  2.1492 +       }
  2.1493 +-     clo->block_ptrs[offsets_read++] = tmp;
  2.1494 +-     offset += sizeof(loff_t);
  2.1495  +     memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
  2.1496  +     /* remember where to read the next blk from file */
  2.1497  +     fs_read_position += bytes_read;
  2.1498  +     n += bytes_read;
  2.1499       }
  2.1500 --   printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  2.1501 --          cloop_name, filename, ntohl(clo->head.num_blocks),
  2.1502 --          ntohl(clo->head.block_size), clo->largest_block);
  2.1503     }
  2.1504 --/* Combo kmalloc used too large chunks (>130000). */
  2.1505 +-  printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  2.1506 +-         cloop_name, clo->underlying_filename, clo->head.num_blocks,
  2.1507 +-         clo->head.block_size, clo->largest_block);
  2.1508    {
  2.1509     int i;
  2.1510 --  for(i=0;i<BUFFERED_BLOCKS;i++)
  2.1511 --   {
  2.1512 --    clo->buffer[i] = cloop_malloc(ntohl(clo->head.block_size));
  2.1513 --    if(!clo->buffer[i])
  2.1514 --     {
  2.1515 --      printk(KERN_ERR "%s: out of memory for buffer %lu\n",
  2.1516 --             cloop_name, (unsigned long) ntohl(clo->head.block_size));
  2.1517 --      error=-ENOMEM; goto error_release_free;
  2.1518 --     }
  2.1519 --   }
  2.1520 -+  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
  2.1521 ++  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
  2.1522  +  clo->largest_block = 0;
  2.1523  +  for (i = 0; i < clo->head.num_blocks; i++)
  2.1524  +    if (clo->block_ptrs[i].size > clo->largest_block)
  2.1525 @@ -1061,39 +844,15 @@
  2.1526  +  printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  2.1527  +         cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
  2.1528  +         clo->head.block_size, clo->largest_block);
  2.1529 -+ }
  2.1530 -+ {
  2.1531 -+  int i;
  2.1532 -+  clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  2.1533 -+                              (buffers / clo->head.block_size) : 1;
  2.1534 -+  clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  2.1535 -+  clo->buffer = cloop_malloc(clo->num_buffered_blocks * sizeof (char*));
  2.1536 -+  if (!clo->buffered_blocknum || !clo->buffer)
  2.1537 -+  {
  2.1538 -+   printk(KERN_ERR "%s: out of memory for index of cache buffer (%lu bytes)\n",
  2.1539 -+                    cloop_name, (unsigned long)clo->num_buffered_blocks * sizeof (u_int32_t) + sizeof(char*) );
  2.1540 -+                    error=-ENOMEM; goto error_release;
  2.1541 -+  }
  2.1542 -+  memset(clo->buffer, 0, clo->num_buffered_blocks * sizeof (char*));
  2.1543 -+  for(i=0;i<clo->num_buffered_blocks;i++)
  2.1544 -+  {
  2.1545 -+   clo->buffered_blocknum[i] = -1;
  2.1546 -+   clo->buffer[i] = cloop_malloc(clo->head.block_size);
  2.1547 -+   if(!clo->buffer[i])
  2.1548 -+    {
  2.1549 -+     printk(KERN_ERR "%s: out of memory for cache buffer %lu\n",
  2.1550 -+            cloop_name, (unsigned long) clo->head.block_size);
  2.1551 -+     error=-ENOMEM; goto error_release_free;
  2.1552 -+    }
  2.1553 -+  }
  2.1554 -+  clo->current_bufnum = 0;
  2.1555 -  }
  2.1556 -  clo->compressed_buffer = cloop_malloc(clo->largest_block);
  2.1557 -  if(!clo->compressed_buffer)
  2.1558 -@@ -557,31 +803,7 @@
  2.1559 +   clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  2.1560 +                               (buffers / clo->head.block_size) : 1;
  2.1561 +   clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  2.1562 +@@ -705,36 +793,14 @@
  2.1563             cloop_name, clo->largest_block);
  2.1564      error=-ENOMEM; goto error_release_free_buffer;
  2.1565     }
  2.1566 +- /* Allocate Memory for decompressors */
  2.1567 +-#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  2.1568  - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
  2.1569  - if(!clo->zstream.workspace)
  2.1570  -  {
  2.1571 @@ -1102,443 +861,48 @@
  2.1572  -   error=-ENOMEM; goto error_release_free_all;
  2.1573  -  }
  2.1574  - zlib_inflateInit(&clo->zstream);
  2.1575 -- if(!isblkdev &&
  2.1576 --    be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]) != inode->i_size)
  2.1577 --  {
  2.1578 --   printk(KERN_ERR "%s: final offset wrong (%Lu not %Lu)\n",
  2.1579 +-#endif
  2.1580 +-#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  2.1581 +-#if XZ_INTERNAL_CRC32
  2.1582 +-  /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
  2.1583 +-  xz_crc32_init(void);
  2.1584 +-#endif
  2.1585 +-  clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
  2.1586 +-#endif
  2.1587 +- if(CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]) > clo->underlying_total_size)
  2.1588 ++ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  2.1589 ++ clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
  2.1590 ++ if(IS_ERR(clo->clo_thread))
  2.1591 +   {
  2.1592 +-   printk(KERN_ERR "%s: final offset wrong (%llu > %llu)\n",
  2.1593  -          cloop_name,
  2.1594 --          be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]),
  2.1595 --          inode->i_size);
  2.1596 +-	  CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]),
  2.1597 +-          clo->underlying_total_size);
  2.1598 +-#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  2.1599  -   cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
  2.1600 --   goto error_release_free_all;
  2.1601 --  }
  2.1602 -- {
  2.1603 --  int i;
  2.1604 --  for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1;
  2.1605 --  clo->current_bufnum=0;
  2.1606 -- }
  2.1607 -- set_capacity(clo->clo_disk, (sector_t)(ntohl(clo->head.num_blocks)*
  2.1608 --              (ntohl(clo->head.block_size)>>9)));
  2.1609 -+ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  2.1610 -  clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
  2.1611 -  if(IS_ERR(clo->clo_thread))
  2.1612 -   {
  2.1613 -@@ -591,17 +813,17 @@
  2.1614 +-#endif
  2.1615 ++   error = PTR_ERR(clo->clo_thread);
  2.1616 ++   clo->clo_thread=NULL;
  2.1617 +    goto error_release_free_all;
  2.1618     }
  2.1619 +- set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  2.1620    if(preload > 0)
  2.1621     {
  2.1622 --   clo->preload_array_size = ((preload<=ntohl(clo->head.num_blocks))?preload:ntohl(clo->head.num_blocks));
  2.1623 -+   clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
  2.1624 -    clo->preload_size = 0;
  2.1625 -    if((clo->preload_cache = cloop_malloc(clo->preload_array_size * sizeof(char *))) != NULL)
  2.1626 -     {
  2.1627 -      int i;
  2.1628 -      for(i=0; i<clo->preload_array_size; i++)
  2.1629 -       {
  2.1630 --       if((clo->preload_cache[i] = cloop_malloc(ntohl(clo->head.block_size))) == NULL)
  2.1631 -+       if((clo->preload_cache[i] = cloop_malloc(clo->head.block_size)) == NULL)
  2.1632 -         { /* Out of memory */
  2.1633 -          printk(KERN_WARNING "%s: cloop_malloc(%d) failed for preload_cache[%d] (ignored).\n",
  2.1634 --                             cloop_name, ntohl(clo->head.block_size), i);
  2.1635 -+                             cloop_name, clo->head.block_size, i);
  2.1636 - 	 break;
  2.1637 - 	}
  2.1638 -       }
  2.1639 -@@ -612,13 +834,13 @@
  2.1640 -        if(buffered_blocknum >= 0)
  2.1641 -         {
  2.1642 - 	 memcpy(clo->preload_cache[i], clo->buffer[buffered_blocknum],
  2.1643 --	        ntohl(clo->head.block_size));
  2.1644 -+	        clo->head.block_size);
  2.1645 - 	}
  2.1646 -        else
  2.1647 -         {
  2.1648 -          printk(KERN_WARNING "%s: can't read block %d into preload cache, set to zero.\n",
  2.1649 - 	                     cloop_name, i);
  2.1650 --	 memset(clo->preload_cache[i], 0, ntohl(clo->head.block_size));
  2.1651 -+	 memset(clo->preload_cache[i], 0, clo->head.block_size);
  2.1652 - 	}
  2.1653 -       }
  2.1654 -      printk(KERN_INFO "%s: preloaded %d blocks into cache.\n", cloop_name,
  2.1655 -@@ -641,22 +863,19 @@
  2.1656 -  cloop_free(clo->compressed_buffer, clo->largest_block);
  2.1657 -  clo->compressed_buffer=NULL;
  2.1658 - error_release_free_buffer:
  2.1659 -+ if(clo->buffer)
  2.1660 -  {
  2.1661 -   int i;
  2.1662 --  for(i=0; i<BUFFERED_BLOCKS; i++)
  2.1663 --   { 
  2.1664 --    if(clo->buffer[i])
  2.1665 --     {
  2.1666 --      cloop_free(clo->buffer[i], ntohl(clo->head.block_size));
  2.1667 --      clo->buffer[i]=NULL;
  2.1668 --     }
  2.1669 --   }
  2.1670 -+  for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) { cloop_free(clo->buffer[i], clo->head.block_size); clo->buffer[i]=NULL; }}
  2.1671 -+  cloop_free(clo->buffer, clo->num_buffered_blocks*sizeof(char*)); clo->buffer=NULL;
  2.1672 +    clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
  2.1673 +@@ -780,6 +846,7 @@
  2.1674 +      clo->preload_array_size = clo->preload_size = 0;
  2.1675 +     }
  2.1676 +   }
  2.1677 ++ wake_up_process(clo->clo_thread);
  2.1678 +  /* Uncheck */
  2.1679 +  return error;
  2.1680 + error_release_free_all:
  2.1681 +@@ -794,9 +861,13 @@
  2.1682    }
  2.1683 -+ if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
  2.1684 +  if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
  2.1685   error_release_free:
  2.1686 -- cloop_free(clo->offsets, sizeof(loff_t) * total_offsets);
  2.1687 -- clo->offsets=NULL;
  2.1688 +- cloop_free(clo->block_ptrs, sizeof(cloop_block_ptr) * total_offsets);
  2.1689  + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
  2.1690 -+ clo->block_ptrs=NULL;
  2.1691 - error_release:
  2.1692 -  if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
  2.1693 -+ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  2.1694 -  clo->backing_file=NULL;
  2.1695 -  return error;
  2.1696 - }
  2.1697 -@@ -673,7 +892,7 @@
  2.1698 -  if(clo->backing_file) return -EBUSY;
  2.1699 -  file = fget(arg); /* get filp struct from ioctl arg fd */
  2.1700 -  if(!file) return -EBADF;
  2.1701 -- error=cloop_set_file(cloop_num,file,"losetup_file");
  2.1702 -+ error=cloop_set_file(cloop_num,file);
  2.1703 -  set_device_ro(bdev, 1);
  2.1704 -  if(error) fput(file);
  2.1705 -  return error;
  2.1706 -@@ -684,29 +903,48 @@
  2.1707 - {
  2.1708 -  struct cloop_device *clo = cloop_dev[cloop_num];
  2.1709 -  struct file *filp = clo->backing_file;
  2.1710 -- int i;
  2.1711 -  if(clo->refcnt > 1)	/* we needed one fd for the ioctl */
  2.1712 -    return -EBUSY;
  2.1713 -  if(filp==NULL) return -EINVAL;
  2.1714 -  if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
  2.1715 -- if(filp!=initial_file) fput(filp);
  2.1716 -- else { filp_close(initial_file,0); initial_file=NULL; }
  2.1717 -+ if(filp!=initial_file)
  2.1718 -+  fput(filp);
  2.1719 -+ else
  2.1720 -+ {
  2.1721 -+  filp_close(initial_file,0);
  2.1722 -+  initial_file=NULL;
  2.1723 -+ }
  2.1724 -  clo->backing_file  = NULL;
  2.1725 -  clo->backing_inode = NULL;
  2.1726 -- if(clo->offsets) { cloop_free(clo->offsets, clo->underlying_blksize); clo->offsets = NULL; }
  2.1727 -+ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  2.1728 -+ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
  2.1729 -  if(clo->preload_cache)
  2.1730 --  {
  2.1731 --   for(i=0; i < clo->preload_size; i++)
  2.1732 --    cloop_free(clo->preload_cache[i], ntohl(clo->head.block_size));
  2.1733 --   cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
  2.1734 --   clo->preload_cache = NULL;
  2.1735 --   clo->preload_size = clo->preload_array_size = 0;
  2.1736 --  }
  2.1737 -- for(i=0; i<BUFFERED_BLOCKS; i++)
  2.1738 --      if(clo->buffer[i]) { cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); clo->buffer[i]=NULL; }
  2.1739 -+ {
  2.1740 -+  int i;
  2.1741 -+  for(i=0; i < clo->preload_size; i++)
  2.1742 -+   cloop_free(clo->preload_cache[i], clo->head.block_size);
  2.1743 -+  cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
  2.1744 -+  clo->preload_cache = NULL;
  2.1745 -+  clo->preload_size = clo->preload_array_size = 0;
  2.1746 -+ }
  2.1747 -+ if (clo->buffered_blocknum)
  2.1748 -+ {
  2.1749 -+  cloop_free(clo->buffered_blocknum, sizeof(int) * clo->num_buffered_blocks); clo->buffered_blocknum = NULL;
  2.1750 -+ }
  2.1751 -+ if (clo->buffer)
  2.1752 -+ {
  2.1753 -+  int i;
  2.1754 -+  for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) cloop_free(clo->buffer[i], clo->head.block_size); }
  2.1755 -+  cloop_free(clo->buffer, sizeof(char*) * clo->num_buffered_blocks); clo->buffer = NULL;
  2.1756 -+ }
  2.1757 -  if(clo->compressed_buffer) { cloop_free(clo->compressed_buffer, clo->largest_block); clo->compressed_buffer = NULL; }
  2.1758 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  2.1759 -  zlib_inflateEnd(&clo->zstream);
  2.1760 -  if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
  2.1761 -+#endif
  2.1762 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  2.1763 -+  xz_dec_end(clo->xzdecoderstate);
  2.1764 -+#endif
  2.1765 -  if(bdev) invalidate_bdev(bdev);
  2.1766 -  if(clo->clo_disk) set_capacity(clo->clo_disk, 0);
  2.1767 -  return 0;
  2.1768 -@@ -731,8 +969,8 @@
  2.1769 -                             const struct loop_info64 *info)
  2.1770 - {
  2.1771 -  if (!clo->backing_file) return -ENXIO;
  2.1772 -- memcpy(clo->clo_file_name, info->lo_file_name, LO_NAME_SIZE);
  2.1773 -- clo->clo_file_name[LO_NAME_SIZE-1] = 0;
  2.1774 -+ if(clo->underlying_filename) kfree(clo->underlying_filename);
  2.1775 -+ clo->underlying_filename = kstrdup(info->lo_file_name, GFP_KERNEL);
  2.1776 -  return 0;
  2.1777 - }
  2.1778 - 
  2.1779 -@@ -743,7 +981,11 @@
  2.1780 -  struct kstat stat;
  2.1781 -  int err;
  2.1782 -  if (!file) return -ENXIO;
  2.1783 -- err = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
  2.1784 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
  2.1785 -+ err = vfs_getattr(&file->f_path, &stat);
  2.1786 -+#else
  2.1787 -+ err = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
  2.1788 -+#endif
  2.1789 -  if (err) return err;
  2.1790 -  memset(info, 0, sizeof(*info));
  2.1791 -  info->lo_number  = clo->clo_number;
  2.1792 -@@ -753,7 +995,8 @@
  2.1793 -  info->lo_offset  = 0;
  2.1794 -  info->lo_sizelimit = 0;
  2.1795 -  info->lo_flags   = 0;
  2.1796 -- memcpy(info->lo_file_name, clo->clo_file_name, LO_NAME_SIZE);
  2.1797 -+ strncpy(info->lo_file_name, clo->underlying_filename, LO_NAME_SIZE);
  2.1798 -+ info->lo_file_name[LO_NAME_SIZE-1]=0;
  2.1799 -  return 0;
  2.1800 - }
  2.1801 - 
  2.1802 -@@ -833,8 +1076,6 @@
  2.1803 -  if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT;
  2.1804 -  return err;
  2.1805 - }
  2.1806 --/* EOF get/set_status */
  2.1807 --
  2.1808 - 
  2.1809 - static int cloop_ioctl(struct block_device *bdev, fmode_t mode,
  2.1810 - 	unsigned int cmd, unsigned long arg)
  2.1811 -@@ -914,21 +1155,20 @@
  2.1812 -  /* losetup uses write-open and flags=0x8002 to set a new file */
  2.1813 -  if(mode & FMODE_WRITE)
  2.1814 -   {
  2.1815 --   printk(KERN_WARNING "%s: Can't open device read-write in mode 0x%x\n", cloop_name, mode);
  2.1816 -+   printk(KERN_INFO "%s: Open in read-write mode 0x%x requested, ignored.\n", cloop_name, mode);
  2.1817 -    return -EROFS;
  2.1818 -   }
  2.1819 -  cloop_dev[cloop_num]->refcnt+=1;
  2.1820 -  return 0;
  2.1821 - }
  2.1822 - 
  2.1823 --static int cloop_close(struct gendisk *disk, fmode_t mode)
  2.1824 -+static void cloop_close(struct gendisk *disk, fmode_t mode)
  2.1825 - {
  2.1826 -- int cloop_num, err=0;
  2.1827 -- if(!disk) return 0;
  2.1828 -+ int cloop_num;
  2.1829 -+ if(!disk) return;
  2.1830 -  cloop_num=((struct cloop_device *)disk->private_data)->clo_number;
  2.1831 -- if(cloop_num < 0 || cloop_num > (cloop_count-1)) return 0;
  2.1832 -+ if(cloop_num < 0 || cloop_num > (cloop_count-1)) return;
  2.1833 -  cloop_dev[cloop_num]->refcnt-=1;
  2.1834 -- return err;
  2.1835 - }
  2.1836 - 
  2.1837 - static struct block_device_operations clo_fops =
  2.1838 -@@ -973,6 +1213,10 @@
  2.1839 -    goto error_out;
  2.1840 -   }
  2.1841 -  clo->clo_queue->queuedata = clo;
  2.1842 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  2.1843 -+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
  2.1844 -+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
  2.1845 -+#endif
  2.1846 -  clo->clo_disk = alloc_disk(1);
  2.1847 -  if(!clo->clo_disk)
  2.1848 -   {
  2.1849 -@@ -1004,6 +1248,11 @@
  2.1850 -  cloop_dev[cloop_num] = NULL;
  2.1851 - }
  2.1852 - 
  2.1853 -+/* LZ4 Stuff */
  2.1854 -+#if (defined USE_LZ4_INTERNAL)
  2.1855 -+#include "lz4_kmod.c"
  2.1856 -+#endif
  2.1857 -+
  2.1858 - static int __init cloop_init(void)
  2.1859 - {
  2.1860 -  int error=0;
  2.1861 -@@ -1044,7 +1293,7 @@
  2.1862 -      initial_file=NULL; /* if IS_ERR, it's NOT open. */
  2.1863 -     }
  2.1864 -    else
  2.1865 --     error=cloop_set_file(0,initial_file,file);
  2.1866 -+     error=cloop_set_file(0,initial_file);
  2.1867 -    if(error)
  2.1868 -     {
  2.1869 -      printk(KERN_ERR
  2.1870 -@@ -1052,9 +1301,6 @@
  2.1871 -             cloop_name, file, error);
  2.1872 -      goto init_out_dealloc;
  2.1873 -     }
  2.1874 --   if(namelen >= LO_NAME_SIZE) namelen = LO_NAME_SIZE-1;
  2.1875 --   memcpy(cloop_dev[0]->clo_file_name, file, namelen);
  2.1876 --   cloop_dev[0]->clo_file_name[namelen] = 0;
  2.1877 -   }
  2.1878 -  return 0;
  2.1879 - init_out_dealloc:
  2.1880 ---- cloop.h
  2.1881 -+++ cloop.h
  2.1882 -@@ -86,11 +86,8 @@
  2.1883 - struct cloop_tail
  2.1884 - {
  2.1885 - 	u_int32_t table_size; 
  2.1886 --	u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
  2.1887 -+	u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
  2.1888 - #define CLOOP3_INDEX_SIZE(x)    ((unsigned int)((x) & 0xF))
  2.1889 --#define CLOOP3_BLOCKS_FLAGS(x)  ((unsigned int)((x) & 0x70) >> 4)
  2.1890 --#define CLOOP3_TRUNCATED(x)     ((unsigned int)((x) & 0x80) >> 7)
  2.1891 --#define CLOOP3_LASTLEN(x)       (unsigned int)((x) >> 8)
  2.1892 - 	u_int32_t num_blocks;
  2.1893 - };
  2.1894 - 
  2.1895 -@@ -104,8 +101,10 @@
  2.1896 - };
  2.1897 - 
  2.1898 - static inline char *build_index(struct block_info *offsets, unsigned long n, 
  2.1899 --			unsigned long block_size, unsigned global_flags)
  2.1900 -+			unsigned long block_size)
  2.1901 - {
  2.1902 -+	static char v[11];
  2.1903 -+	u_int32_t flags = 0;
  2.1904 - 	u_int32_t *ofs32 = (u_int32_t *) offsets;
  2.1905 - 	loff_t    *ofs64 = (loff_t *) offsets;
  2.1906 - 
  2.1907 -@@ -130,8 +129,6 @@
  2.1908 - 		}
  2.1909 - 		else { /* V2.0/V4.0 */
  2.1910 - 			loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
  2.1911 --			u_int32_t flags;
  2.1912 --			static char v4[11];
  2.1913 - 			unsigned long i = n;
  2.1914 - 
  2.1915 - 			for (flags = 0; n-- ;) {
  2.1916 -@@ -149,12 +146,7 @@
  2.1917 - 					offsets[i] = offsets[offsets[i].offset];
  2.1918 - 				}
  2.1919 - 			}
  2.1920 --			strcpy(v4, (char *) "64BE v4.0a");
  2.1921 --			v4[10] = 'a' + ((flags-1) & 0xF);	// compressors used
  2.1922 --			if (flags > 0x10) {			// with links ?
  2.1923 --				v4[10] += 'A' - 'a';
  2.1924 --			}
  2.1925 --			return v4;
  2.1926 -+			strcpy(v, (char *) "64BE v4.0a");
  2.1927 - 		}
  2.1928 - 	}
  2.1929 - 	else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
  2.1930 -@@ -170,7 +162,6 @@
  2.1931 - 	else { /* V3.0 or V0.68 */
  2.1932 - 		unsigned long i;
  2.1933 - 		loff_t j;
  2.1934 --		static char v3[11];
  2.1935 - 		
  2.1936 - 		for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
  2.1937 - 		if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
  2.1938 -@@ -185,28 +176,33 @@
  2.1939 - 		}
  2.1940 - 		
  2.1941 - 		v3_64 = (ofs32[1] == 0);
  2.1942 --		for (i = n; i-- != 0; )
  2.1943 -+		for (i = n; i-- != 0; ) {
  2.1944 - 			offsets[i].size = ntohl(ofs32[i << v3_64]); 
  2.1945 --		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
  2.1946 --			offsets[i].offset = j;
  2.1947 --			offsets[i].flags = global_flags;
  2.1948 - 			if (offsets[i].size == 0xFFFFFFFF) {
  2.1949 --				offsets[i].flags = CLOOP_COMPRESSOR_NONE;
  2.1950 --				offsets[i].size = block_size;
  2.1951 -+				offsets[i].size = 0x10000000 | block_size;
  2.1952 - 			}
  2.1953 --			if ((offsets[i].size & 0x80000000) == 0) {
  2.1954 -+			offsets[i].flags = (offsets[i].size >> 28);
  2.1955 -+			offsets[i].size &= 0x0FFFFFFF; 
  2.1956 -+		}
  2.1957 -+		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
  2.1958 -+			offsets[i].offset = j;
  2.1959 -+			if (offsets[i].flags < 8) {
  2.1960 - 				j += offsets[i].size;
  2.1961 - 			}
  2.1962 - 		}
  2.1963 - 		for (i = 0; i < n; i++) {
  2.1964 --			if (offsets[i].size & 0x80000000) {
  2.1965 --				offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
  2.1966 -+			flags |= 1 << offsets[i].flags;
  2.1967 -+			if (offsets[i].flags >= 8) {
  2.1968 -+				offsets[i] = offsets[offsets[i].size];
  2.1969 - 			}
  2.1970 - 		}
  2.1971 --		strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
  2.1972 --		v3[10] += global_flags;
  2.1973 --		return v3;
  2.1974 -+		strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
  2.1975 -+	}
  2.1976 -+	v[10] = 'a' + ((flags-1) & 0xF);	// compressors used
  2.1977 -+	if (flags > 0x10) {			// with links ?
  2.1978 -+		v[10] += 'A' - 'a';
  2.1979 - 	}
  2.1980 -+	return v;
  2.1981 - }
  2.1982 - 
  2.1983 - /* Cloop suspend IOCTL */
  2.1984 ---- cloop.c
  2.1985 -+++ cloop.c
  2.1986 -@@ -542,7 +542,7 @@
  2.1987 -  const unsigned int header_size = sizeof(struct cloop_head);
  2.1988 -  unsigned int i, total_offsets=0;
  2.1989 -  loff_t fs_read_position = 0, header_pos[2];
  2.1990 -- int flags, isblkdev, bytes_read, error = 0;
  2.1991 -+ int isblkdev, bytes_read, error = 0;
  2.1992 -  if (clo->suspended) return error;
  2.1993 -  #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
  2.1994 -  inode = file->f_dentry->d_inode;
  2.1995 -@@ -698,18 +698,12 @@
  2.1996 -        error=-EBADF; goto error_release;
  2.1997 -       }
  2.1998 -      len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
  2.1999 --     flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
  2.2000 --// May  3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
  2.2001 --printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 
  2.2002 --		clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
  2.2003 --     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
  2.2004 --// May  3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at   (null)
  2.2005 --printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
  2.2006 -+     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
  2.2007 -      cloop_free(zbuf, zlen);
  2.2008 -      if (ret != 0)
  2.2009 -       {
  2.2010 --        printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
  2.2011 --               cloop_name, ret, flags);
  2.2012 -+        printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
  2.2013 -+               cloop_name, ret);
  2.2014 -        error=-EBADF; goto error_release;
  2.2015 -       }
  2.2016 -     }
  2.2017 -@@ -722,7 +716,6 @@
  2.2018 -  else
  2.2019 -   {
  2.2020 -    unsigned int n, total_bytes;
  2.2021 --   flags = 0;
  2.2022 -    clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
  2.2023 -    if (!clo->block_ptrs)
  2.2024 -     {
  2.2025 -@@ -761,7 +754,7 @@
  2.2026 -   }
  2.2027 -  {
  2.2028 -   int i;
  2.2029 --  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
  2.2030 -+  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
  2.2031 -   clo->largest_block = 0;
  2.2032 -   for (i = 0; i < clo->head.num_blocks; i++)
  2.2033 -     if (clo->block_ptrs[i].size > clo->largest_block)
  2.2034 -@@ -769,9 +762,6 @@
  2.2035 -   printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  2.2036 -          cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
  2.2037 -          clo->head.block_size, clo->largest_block);
  2.2038 -- }
  2.2039 -- {
  2.2040 --  int i;
  2.2041 -   clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  2.2042 -                               (buffers / clo->head.block_size) : 1;
  2.2043 -   clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  2.2044 -@@ -874,6 +864,10 @@
  2.2045 -  cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
  2.2046    clo->block_ptrs=NULL;
  2.2047   error_release:
  2.2048  +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  2.2049 @@ -1548,3 +912,146 @@
  2.2050    if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
  2.2051    if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  2.2052    clo->backing_file=NULL;
  2.2053 +@@ -829,6 +900,7 @@
  2.2054 +  if(clo->refcnt > 1)	/* we needed one fd for the ioctl */
  2.2055 +    return -EBUSY;
  2.2056 +  if(filp==NULL) return -EINVAL;
  2.2057 ++ if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
  2.2058 +  if(filp!=initial_file)
  2.2059 +   fput(filp);
  2.2060 +  else
  2.2061 +@@ -839,7 +911,7 @@
  2.2062 +  clo->backing_file  = NULL;
  2.2063 +  clo->backing_inode = NULL;
  2.2064 +  if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  2.2065 +- if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks+1); clo->block_ptrs = NULL; }
  2.2066 ++ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
  2.2067 +  if(clo->preload_cache)
  2.2068 +  {
  2.2069 +   int i;
  2.2070 +@@ -1054,15 +1126,15 @@
  2.2071 +   case LOOP_CLR_FD:       /* Change arg */ 
  2.2072 +   case LOOP_GET_STATUS64: /* Change arg */ 
  2.2073 +   case LOOP_SET_STATUS64: /* Change arg */ 
  2.2074 +-    return cloop_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
  2.2075 ++	arg = (unsigned long) compat_ptr(arg);
  2.2076 +   case LOOP_SET_STATUS:   /* unchanged */
  2.2077 +   case LOOP_GET_STATUS:   /* unchanged */
  2.2078 +   case LOOP_SET_FD:       /* unchanged */
  2.2079 +   case LOOP_CHANGE_FD:    /* unchanged */
  2.2080 +-    return cloop_ioctl(bdev, mode, cmd, arg);
  2.2081 +-  default:
  2.2082 +-    return -ENOIOCTLCMD;
  2.2083 ++	return cloop_ioctl(bdev, mode, cmd, arg);
  2.2084 ++	break;
  2.2085 +  }
  2.2086 ++ return -ENOIOCTLCMD;
  2.2087 + }
  2.2088 + #endif
  2.2089 + 
  2.2090 +@@ -1093,7 +1165,7 @@
  2.2091 +  cloop_dev[cloop_num]->refcnt-=1;
  2.2092 + }
  2.2093 + 
  2.2094 +-static const struct block_device_operations clo_fops =
  2.2095 ++static struct block_device_operations clo_fops =
  2.2096 + {
  2.2097 +         owner:		THIS_MODULE,
  2.2098 +         open:           cloop_open,
  2.2099 +@@ -1105,12 +1177,6 @@
  2.2100 + 	/* locked_ioctl ceased to exist in 2.6.36 */
  2.2101 + };
  2.2102 + 
  2.2103 +-static const struct blk_mq_ops cloop_mq_ops = {
  2.2104 +-	.queue_rq       = cloop_queue_rq,
  2.2105 +-/*	.init_request	= cloop_init_request, */
  2.2106 +-/*	.complete	= cloop_complete_rq, */
  2.2107 +-};
  2.2108 +-
  2.2109 + static int cloop_register_blkdev(int major_nr)
  2.2110 + {
  2.2111 +  return register_blkdev(major_nr, cloop_name);
  2.2112 +@@ -1124,37 +1190,33 @@
  2.2113 + 
  2.2114 + static int cloop_alloc(int cloop_num)
  2.2115 + {
  2.2116 +- struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));
  2.2117 ++ struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));;
  2.2118 +  if(clo == NULL) goto error_out;
  2.2119 +  cloop_dev[cloop_num] = clo;
  2.2120 +  memset(clo, 0, sizeof(struct cloop_device));
  2.2121 +  clo->clo_number = cloop_num;
  2.2122 +- clo->tag_set.ops = &cloop_mq_ops;
  2.2123 +- clo->tag_set.nr_hw_queues = 1;
  2.2124 +- clo->tag_set.queue_depth = 128;
  2.2125 +- clo->tag_set.numa_node = NUMA_NO_NODE;
  2.2126 +- clo->tag_set.cmd_size = 0; /* No extra data needed */
  2.2127 +- /* BLK_MQ_F_BLOCKING is extremely important if we want to call blocking functions like vfs_read */
  2.2128 +- clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
  2.2129 +- clo->tag_set.driver_data = clo;
  2.2130 +- if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo;
  2.2131 +- clo->clo_queue = blk_mq_init_queue(&clo->tag_set);
  2.2132 +- if(IS_ERR(clo->clo_queue))
  2.2133 ++ clo->clo_thread = NULL;
  2.2134 ++ init_waitqueue_head(&clo->clo_event);
  2.2135 ++ spin_lock_init(&clo->queue_lock);
  2.2136 ++ mutex_init(&clo->clo_ctl_mutex);
  2.2137 ++ INIT_LIST_HEAD(&clo->clo_list);
  2.2138 ++ clo->clo_queue = blk_init_queue(cloop_do_request, &clo->queue_lock);
  2.2139 ++ if(!clo->clo_queue)
  2.2140 +   {
  2.2141 +    printk(KERN_ERR "%s: Unable to alloc queue[%d]\n", cloop_name, cloop_num);
  2.2142 +-   goto error_out_free_tags;
  2.2143 ++   goto error_out;
  2.2144 +   }
  2.2145 +  clo->clo_queue->queuedata = clo;
  2.2146 +- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS);
  2.2147 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  2.2148 ++ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
  2.2149 ++ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
  2.2150 ++#endif
  2.2151 +  clo->clo_disk = alloc_disk(1);
  2.2152 +  if(!clo->clo_disk)
  2.2153 +   {
  2.2154 +    printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num);
  2.2155 +-   goto error_out_free_queue;
  2.2156 ++   goto error_disk;
  2.2157 +   }
  2.2158 +- spin_lock_init(&clo->queue_lock);
  2.2159 +- mutex_init(&clo->clo_ctl_mutex);
  2.2160 +- mutex_init(&clo->clo_rq_mutex);
  2.2161 +  clo->clo_disk->major = cloop_major;
  2.2162 +  clo->clo_disk->first_minor = cloop_num;
  2.2163 +  clo->clo_disk->fops = &clo_fops;
  2.2164 +@@ -1163,12 +1225,8 @@
  2.2165 +  sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num);
  2.2166 +  add_disk(clo->clo_disk);
  2.2167 +  return 0;
  2.2168 +-error_out_free_queue:
  2.2169 ++error_disk:
  2.2170 +  blk_cleanup_queue(clo->clo_queue);
  2.2171 +-error_out_free_tags:
  2.2172 +- blk_mq_free_tag_set(&clo->tag_set);
  2.2173 +-error_out_free_clo:
  2.2174 +- cloop_free(clo, sizeof(struct cloop_device));
  2.2175 + error_out:
  2.2176 +  return -ENOMEM;
  2.2177 + }
  2.2178 +@@ -1179,7 +1237,6 @@
  2.2179 +  if(clo == NULL) return;
  2.2180 +  del_gendisk(clo->clo_disk);
  2.2181 +  blk_cleanup_queue(clo->clo_queue);
  2.2182 +- blk_mq_free_tag_set(&clo->tag_set);
  2.2183 +  put_disk(clo->clo_disk);
  2.2184 +  cloop_free(clo, sizeof(struct cloop_device));
  2.2185 +  cloop_dev[cloop_num] = NULL;
  2.2186 +--- cloop_suspend.c
  2.2187 ++++ cloop_suspend.c
  2.2188 +@@ -14,6 +14,7 @@
  2.2189 + #include <fcntl.h>
  2.2190 + #include <unistd.h>
  2.2191 + #include <stdio.h>
  2.2192 ++#include <stdint.h>
  2.2193 + 
  2.2194 + /* We don't use the structure, so that define does not hurt */
  2.2195 + #define dev_t int
     3.1 --- a/linux64-cloop/receipt	Sun May 08 13:06:36 2022 +0000
     3.2 +++ b/linux64-cloop/receipt	Sun May 08 16:45:21 2022 +0000
     3.3 @@ -2,16 +2,15 @@
     3.4  
     3.5  PACKAGE="linux64-cloop"
     3.6  SOURCE="cloop"
     3.7 -_VERSION="2.639-2"
     3.8 -#VERSION="$(sed '/+#define CLOOP_VERSION/!d;s|.* "\(.*\)"|\1|' stuff/cloop.u)"
     3.9 +_VERSION="3.14.1.3"
    3.10  VERSION="4.12"
    3.11  CATEGORY="base-system"
    3.12  MAINTAINER="pascal.bellard@slitaz.org"
    3.13  LICENSE="GPL2"
    3.14  SHORT_DESC="The read-only compressed loop device kernel module."
    3.15  WEB_SITE="http://knoppix.net/wiki/Cloop"
    3.16 -TARBALL="${SOURCE}_${_VERSION}.tar.gz"
    3.17 -WGET_URL="http://debian-knoppix.alioth.debian.org/packages/$SOURCE/$TARBALL"
    3.18 +TARBALL="${SOURCE}_${_VERSION}.tar.xz"
    3.19 +WGET_URL="http://deb.debian.org/debian/pool/main/c/$SOURCE/$TARBALL"
    3.20  PROVIDE="linux-cloop:linux64"
    3.21  
    3.22  DEPENDS="linux64"
    3.23 @@ -26,6 +25,7 @@
    3.24  {
    3.25  	patch -p0 < $stuff/cloop.u
    3.26  	make ARCH=x86_64 KERNEL_DIR="/usr/src/linux" cloop.ko && xz cloop.ko
    3.27 +	make cloop_suspend
    3.28  }
    3.29  	
    3.30  # Rules to gen a SliTaz package suitable for Tazpkg.
     4.1 --- a/linux64-cloop/stuff/cloop.u	Sun May 08 13:06:36 2022 +0000
     4.2 +++ b/linux64-cloop/stuff/cloop.u	Sun May 08 16:45:21 2022 +0000
     4.3 @@ -1,6 +1,6 @@
     4.4  --- cloop.h
     4.5  +++ cloop.h
     4.6 -@@ -1,15 +1,50 @@
     4.7 +@@ -1,3 +1,7 @@
     4.8  +#define CLOOP_SIGNATURE "#!/bin/sh"                      /* @ offset 0  */
     4.9  +#define CLOOP_SIGNATURE_SIZE 9
    4.10  +#define CLOOP_SIGNATURE_OFFSET 0x0
    4.11 @@ -8,99 +8,46 @@
    4.12   #ifndef _COMPRESSED_LOOP_H
    4.13   #define _COMPRESSED_LOOP_H
    4.14   
    4.15 --#define CLOOP_HEADROOM 128
    4.16 -+/*************************************************************************\
    4.17 -+* Starting with Format V4.0 (cloop version 4.x), cloop can now have two   *
    4.18 -+* alternative structures:                                                 *
    4.19 -+*                                                                         *
    4.20 -+* 1. Header first: "robust" format, handles missing blocks well           *
    4.21 -+* 2. Footer (header last): "streaming" format, easier to create           *
    4.22 -+*                                                                         *
    4.23 -+* The cloop kernel module autodetects both formats, and can (currently)   *
    4.24 -+* still handle the V2.0 format as well.                                   *
    4.25 -+*                                                                         *
    4.26 -+* 1. Header first:                                                        *
    4.27 -+*   +---------------------------- FIXED SIZE ---------------------------+ *
    4.28 -+*   |Signature (128 bytes)                                              | *
    4.29 -+*   |block_size (32bit number, network order)                           | *
    4.30 -+*   |num_blocks (32bit number, network order)                           | *
    4.31 -+*   +--------------------------- VARIABLE SIZE -------------------------+ *
    4.32 -+*   |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
    4.33 -+*   |compressed data blocks of variable size ...                        | *
    4.34 -+*   +-------------------------------------------------------------------+ *
    4.35 -+*                                                                         *
    4.36 -+* 2. Footer (header last):                                                *
    4.37 -+*   +--------------------------- VARIABLE SIZE -------------------------+ *
    4.38 -+*   |compressed data blocks of variable size ...                        | *
    4.39 -+*   |num_blocks * FlagsOffset (upper 4 bits flags, lower 64 bits offset)| *
    4.40 -+*   +---------------------------- FIXED SIZE ---------------------------+ *
    4.41 -+*   |Signature (128 bytes)                                              | *
    4.42 -+*   |block_size (32bit number, network order)                           | *
    4.43 -+*   |num_blocks (32bit number, network order)                           | *
    4.44 -+*   +-------------------------------------------------------------------+ *
    4.45 -+*                                                                         *
    4.46 -+* Offsets are always relative to beginning of file, in all formats.       *
    4.47 -+* The block index contains num_blocks+1 offsets, followed (1) or          *
    4.48 -+* preceded (2) by the compressed blocks.                                  *
    4.49 -+\*************************************************************************/
    4.50 +@@ -38,10 +42,6 @@
    4.51   
    4.52 --/* The cloop header usually looks like this:          */
    4.53 --/* #!/bin/sh                                          */
    4.54 --/* #V2.00 Format                                      */
    4.55 --/* ...padding up to CLOOP_HEADROOM...                 */
    4.56 --/* block_size (32bit number, network order)           */
    4.57 --/* num_blocks (32bit number, network order)           */
    4.58 -+#include <linux/types.h>   /* u_int32_t */
    4.59 -+
    4.60 -+#define CLOOP_HEADROOM 128
    4.61 + #include <linux/types.h>   /* u_int32_t */
    4.62   
    4.63 -+/* Header of fixed length, can be located at beginning or end of file   */
    4.64 - struct cloop_head
    4.65 - {
    4.66 - 	char preamble[CLOOP_HEADROOM];
    4.67 -@@ -17,9 +52,163 @@
    4.68 +-#ifndef __KERNEL__
    4.69 +-#include <stdint.h> /* regular uint64_t */
    4.70 +-#endif
    4.71 +-
    4.72 + #define CLOOP_HEADROOM 128
    4.73 + 
    4.74 + /* Header of fixed length, can be located at beginning or end of file   */
    4.75 +@@ -52,13 +52,6 @@
    4.76   	u_int32_t num_blocks;
    4.77   };
    4.78   
    4.79 -+/************************************************************************\
    4.80 -+*  CLOOP4 flags for each compressed block                                *
    4.81 -+*  Value   Meaning                                                       *
    4.82 -+*    0     GZIP/7ZIP compression (compatible with V2.0 Format)           *
    4.83 -+*    1     no compression (incompressible data)                          *
    4.84 -+*    2     xz compression (currently best space saver)                   *
    4.85 -+*    3     lz4 compression                                               *
    4.86 -+*    4     lzo compression (fastest)                                     *
    4.87 -+\************************************************************************/
    4.88 -+
    4.89 -+typedef uint64_t cloop_block_ptr;
    4.90 -+
    4.91 -+/* Get value of first 4 bits */
    4.92 -+#define CLOOP_BLOCK_FLAGS(x)  ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60))
    4.93 -+/* Get value of last 60 bits */
    4.94 -+#define CLOOP_BLOCK_OFFSET(x)  ((x) & 0x0fffffffffffffffLLU)
    4.95 -+
    4.96 -+#define CLOOP_COMPRESSOR_ZLIB  0x0
    4.97 -+#define CLOOP_COMPRESSOR_NONE  0x1
    4.98 -+#define CLOOP_COMPRESSOR_XZ    0x2
    4.99 -+#define CLOOP_COMPRESSOR_LZ4   0x3
   4.100 -+#define CLOOP_COMPRESSOR_LZO1X 0x4
   4.101 -+
   4.102 -+#define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
   4.103 -+
   4.104 +-#define CLOOP2_SIGNATURE "V2.0"                       /* @ offset 0x0b  */
   4.105 +-#define CLOOP2_SIGNATURE_SIZE 4
   4.106 +-#define CLOOP2_SIGNATURE_OFFSET 0x0b
   4.107 +-#define CLOOP4_SIGNATURE "V4.0"                       /* @ offset 0x0b  */
   4.108 +-#define CLOOP4_SIGNATURE_SIZE 4
   4.109 +-#define CLOOP4_SIGNATURE_OFFSET 0x0b
   4.110 +-
   4.111 + /************************************************************************\
   4.112 + *  CLOOP4 flags for each compressed block                                *
   4.113 + *  Value   Meaning                                                       *
   4.114 +@@ -84,6 +77,134 @@
   4.115 + 
   4.116 + #define CLOOP_COMPRESSOR_VALID(x) ((x) >= CLOOP_COMPRESSOR_ZLIB && (x) <= CLOOP_COMPRESSOR_LZO1X)
   4.117 + 
   4.118  +#define CLOOP_COMPRESSOR_LINK  0xF
   4.119  +
   4.120  +
   4.121 - /* data_index (num_blocks 64bit pointers, network order)...      */
   4.122 - /* compressed data (gzip block compressed format)...             */
   4.123 - 
   4.124 ++/* data_index (num_blocks 64bit pointers, network order)...      */
   4.125 ++/* compressed data (gzip block compressed format)...             */
   4.126 ++
   4.127  +struct cloop_tail
   4.128  +{
   4.129  +	u_int32_t table_size; 
   4.130 -+	u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
   4.131 ++	u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
   4.132  +#define CLOOP3_INDEX_SIZE(x)    ((unsigned int)((x) & 0xF))
   4.133 -+#define CLOOP3_BLOCKS_FLAGS(x)  ((unsigned int)((x) & 0x70) >> 4)
   4.134 -+#define CLOOP3_TRUNCATED(x)     ((unsigned int)((x) & 0x80) >> 7)
   4.135 -+#define CLOOP3_LASTLEN(x)       (unsigned int)((x) >> 8)
   4.136  +	u_int32_t num_blocks;
   4.137  +};
   4.138  +
   4.139 @@ -114,8 +61,10 @@
   4.140  +};
   4.141  +
   4.142  +static inline char *build_index(struct block_info *offsets, unsigned long n, 
   4.143 -+			unsigned long block_size, unsigned global_flags)
   4.144 ++			unsigned long block_size)
   4.145  +{
   4.146 ++	static char v[11];
   4.147 ++	u_int32_t flags = 0;
   4.148  +	u_int32_t *ofs32 = (u_int32_t *) offsets;
   4.149  +	loff_t    *ofs64 = (loff_t *) offsets;
   4.150  +
   4.151 @@ -140,8 +89,6 @@
   4.152  +		}
   4.153  +		else { /* V2.0/V4.0 */
   4.154  +			loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
   4.155 -+			u_int32_t flags;
   4.156 -+			static char v4[11];
   4.157  +			unsigned long i = n;
   4.158  +
   4.159  +			for (flags = 0; n-- ;) {
   4.160 @@ -159,12 +106,7 @@
   4.161  +					offsets[i] = offsets[offsets[i].offset];
   4.162  +				}
   4.163  +			}
   4.164 -+			strcpy(v4, (char *) "64BE v4.0a");
   4.165 -+			v4[10] = 'a' + ((flags-1) & 0xF);	// compressors used
   4.166 -+			if (flags > 0x10) {			// with links ?
   4.167 -+				v4[10] += 'A' - 'a';
   4.168 -+			}
   4.169 -+			return v4;
   4.170 ++			strcpy(v, (char *) "64BE v4.0a");
   4.171  +		}
   4.172  +	}
   4.173  +	else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
   4.174 @@ -180,7 +122,6 @@
   4.175  +	else { /* V3.0 or V0.68 */
   4.176  +		unsigned long i;
   4.177  +		loff_t j;
   4.178 -+		static char v3[11];
   4.179  +		
   4.180  +		for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
   4.181  +		if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
   4.182 @@ -195,28 +136,33 @@
   4.183  +		}
   4.184  +		
   4.185  +		v3_64 = (ofs32[1] == 0);
   4.186 -+		for (i = n; i-- != 0; )
   4.187 ++		for (i = n; i-- != 0; ) {
   4.188  +			offsets[i].size = ntohl(ofs32[i << v3_64]); 
   4.189 ++			if (offsets[i].size == 0xFFFFFFFF) {
   4.190 ++				offsets[i].size = 0x10000000 | block_size;
   4.191 ++			}
   4.192 ++			offsets[i].flags = (offsets[i].size >> 28);
   4.193 ++			offsets[i].size &= 0x0FFFFFFF; 
   4.194 ++		}
   4.195  +		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
   4.196  +			offsets[i].offset = j;
   4.197 -+			offsets[i].flags = global_flags;
   4.198 -+			if (offsets[i].size == 0xFFFFFFFF) {
   4.199 -+				offsets[i].flags = CLOOP_COMPRESSOR_NONE;
   4.200 -+				offsets[i].size = block_size;
   4.201 -+			}
   4.202 -+			if ((offsets[i].size & 0x80000000) == 0) {
   4.203 ++			if (offsets[i].flags < 8) {
   4.204  +				j += offsets[i].size;
   4.205  +			}
   4.206  +		}
   4.207  +		for (i = 0; i < n; i++) {
   4.208 -+			if (offsets[i].size & 0x80000000) {
   4.209 -+				offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
   4.210 ++			flags |= 1 << offsets[i].flags;
   4.211 ++			if (offsets[i].flags >= 8) {
   4.212 ++				offsets[i] = offsets[offsets[i].size];
   4.213  +			}
   4.214  +		}
   4.215 -+		strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
   4.216 -+		v3[10] += global_flags;
   4.217 -+		return v3;
   4.218 ++		strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
   4.219  +	}
   4.220 ++	v[10] = 'a' + ((flags-1) & 0xF);	// compressors used
   4.221 ++	if (flags > 0x10) {			// with links ?
   4.222 ++		v[10] += 'A' - 'a';
   4.223 ++	}
   4.224 ++	return v;
   4.225  +}
   4.226  +
   4.227   /* Cloop suspend IOCTL */
   4.228 @@ -224,661 +170,538 @@
   4.229   
   4.230  --- cloop.c
   4.231  +++ cloop.c
   4.232 -@@ -1,26 +1,23 @@
   4.233 --/*
   4.234 -- *  compressed_loop.c: Read-only compressed loop blockdevice
   4.235 -- *  hacked up by Rusty in 1999, extended and maintained by Klaus Knopper
   4.236 -- *
   4.237 -- *  A cloop file looks like this:
   4.238 -- *  [32-bit uncompressed block size: network order]
   4.239 -- *  [32-bit number of blocks (n_blocks): network order]
   4.240 -- *  [64-bit file offsets of start of blocks: network order]
   4.241 -- *    ...
   4.242 -- *    (n_blocks + 1).
   4.243 -- * n_blocks consisting of:
   4.244 -- *   [compressed block]
   4.245 -- *
   4.246 -- * Every version greatly inspired by code seen in loop.c
   4.247 -- * by Theodore Ts'o, 3/29/93.
   4.248 -- *
   4.249 -- * Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper.
   4.250 -- * Redistribution of this file is permitted under the GNU Public License.
   4.251 -- *
   4.252 -- */
   4.253 -+/************************************************************************\
   4.254 -+* cloop.c: Read-only compressed loop blockdevice                         *
   4.255 -+* hacked up by Rusty in 1999, extended and maintained by Klaus Knopper   *
   4.256 -+*                                                                        *
   4.257 -+* For all supported cloop file formats, please check the file "cloop.h"  *
   4.258 -+* New in Version 4:                                                      *
   4.259 -+* - Header can be first or last in cloop file,                           *
   4.260 -+* - Different compression algorithms supported (compression type         *
   4.261 -+*   encoded in first 4 bytes of block offset address)                    *
   4.262 -+*                                                                        *
   4.263 -+* Every version greatly inspired by code seen in loop.c                  *
   4.264 -+* by Theodore Ts'o, 3/29/93.                                             *
   4.265 -+*                                                                        *
   4.266 -+* Copyright 1999-2009 by Paul `Rusty' Russell & Klaus Knopper.           *
   4.267 -+* Redistribution of this file is permitted under the GNU Public License  *
   4.268 -+* V2.                                                                    *
   4.269 -+\************************************************************************/
   4.270 +@@ -17,7 +17,7 @@
   4.271 + \************************************************************************/
   4.272   
   4.273   #define CLOOP_NAME "cloop"
   4.274 --#define CLOOP_VERSION "2.639"
   4.275 +-#define CLOOP_VERSION "5.3"
   4.276  +#define CLOOP_VERSION "4.12"
   4.277   #define CLOOP_MAX 8
   4.278   
   4.279   #ifndef KBUILD_MODNAME
   4.280 -@@ -47,8 +44,27 @@
   4.281 - #include <asm/div64.h> /* do_div() for 64bit division */
   4.282 - #include <asm/uaccess.h>
   4.283 - #include <asm/byteorder.h>
   4.284 --/* Use zlib_inflate from lib/zlib_inflate */
   4.285 -+/* Check for ZLIB, LZO1X, LZ4 decompression algorithms in kernel. */
   4.286 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   4.287 - #include <linux/zutil.h>
   4.288 -+#endif
   4.289 -+#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   4.290 -+#include <linux/lzo.h>
   4.291 -+#endif
   4.292 -+#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   4.293 -+#include <linux/lz4.h>
   4.294 -+#endif
   4.295 -+#if (defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE))
   4.296 -+#include <linux/decompress/unlzma.h>
   4.297 -+#endif
   4.298 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   4.299 -+#include <linux/xz.h>
   4.300 -+#endif
   4.301 -+
   4.302 -+#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) || defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE) || defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE) || defined(CONFIG_DECOMPRESS_LZMA) || defined(CONFIG_DECOMPRESS_LZMA_MODULE) || defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE)))
   4.303 -+#error "No decompression library selected in kernel config!"
   4.304 -+#endif
   4.305 -+
   4.306 +@@ -68,7 +68,6 @@
   4.307   #include <linux/loop.h>
   4.308   #include <linux/kthread.h>
   4.309   #include <linux/compat.h>
   4.310 -@@ -92,47 +108,64 @@
   4.311 - #define DEBUGP(format, x...)
   4.312 - #endif
   4.313 +-#include <linux/blk-mq.h> /* new multiqueue infrastructure */
   4.314 + #include "cloop.h"
   4.315   
   4.316 -+/* Default size of buffer to keep some decompressed blocks in memory to speed up access */
   4.317 -+#define BLOCK_BUFFER_MEM (16*65536)
   4.318 -+
   4.319 - /* One file can be opened at module insertion time */
   4.320 - /* insmod cloop file=/path/to/file */
   4.321 - static char *file=NULL;
   4.322 - static unsigned int preload=0;
   4.323 - static unsigned int cloop_max=CLOOP_MAX;
   4.324 -+static unsigned int buffers=BLOCK_BUFFER_MEM;
   4.325 - module_param(file, charp, 0);
   4.326 - module_param(preload, uint, 0);
   4.327 - module_param(cloop_max, uint, 0);
   4.328 - MODULE_PARM_DESC(file, "Initial cloop image file (full path) for /dev/cloop");
   4.329 - MODULE_PARM_DESC(preload, "Preload n blocks of cloop data into memory");
   4.330 - MODULE_PARM_DESC(cloop_max, "Maximum number of cloop devices (default 8)");
   4.331 -+MODULE_PARM_DESC(buffers, "Size of buffer to keep uncompressed blocks in memory in MiB (default 1)");
   4.332 + /* New License scheme */
   4.333 +@@ -93,10 +92,7 @@
   4.334 + /* Use experimental major for now */
   4.335 + #define MAJOR_NR 240
   4.336   
   4.337 - static struct file *initial_file=NULL;
   4.338 - static int cloop_major=MAJOR_NR;
   4.339 +-#ifndef DEVICE_NAME
   4.340 +-#define DEVICE_NAME CLOOP_NAME
   4.341 +-#endif
   4.342 +-
   4.343 ++/* #define DEVICE_NAME CLOOP_NAME */
   4.344 + /* #define DEVICE_NR(device) (MINOR(device)) */
   4.345 + /* #define DEVICE_ON(device) */
   4.346 + /* #define DEVICE_OFF(device) */
   4.347 +@@ -143,7 +139,7 @@
   4.348 +  u_int32_t allflags;
   4.349   
   4.350 --/* Number of buffered decompressed blocks */
   4.351 --#define BUFFERED_BLOCKS 8
   4.352 - struct cloop_device
   4.353 - {
   4.354 -- /* Copied straight from the file */
   4.355 -+ /* Header filled from the file */
   4.356 -  struct cloop_head head;
   4.357 -+ int header_first;
   4.358 -+ int file_format;
   4.359 - 
   4.360 -- /* An array of offsets of compressed blocks within the file */
   4.361 -- loff_t *offsets;
   4.362 -+ /* An or'd sum of all flags of each compressed block (v3) */
   4.363 -+ u_int32_t allflags;
   4.364 -+
   4.365 -+ /* An array of cloop_ptr flags/offset for compressed blocks within the file */
   4.366 +  /* An array of cloop_ptr flags/offset for compressed blocks within the file */
   4.367 +- cloop_block_ptr *block_ptrs;
   4.368  + struct block_info *block_ptrs;
   4.369   
   4.370    /* We buffer some uncompressed blocks for performance */
   4.371 -- int buffered_blocknum[BUFFERED_BLOCKS];
   4.372 -- int current_bufnum;
   4.373 -- void *buffer[BUFFERED_BLOCKS];
   4.374 -- void *compressed_buffer;
   4.375 -- size_t preload_array_size; /* Size of pointer array in blocks */
   4.376 -- size_t preload_size;       /* Number of successfully allocated blocks */
   4.377 -- char **preload_cache;      /* Pointers to preloaded blocks */
   4.378 -+ size_t num_buffered_blocks;	/* how many uncompressed blocks buffered for performance */
   4.379 -+ int *buffered_blocknum;        /* list of numbers of uncompressed blocks in buffer */
   4.380 -+ int current_bufnum;            /* which block is current */
   4.381 -+ unsigned char **buffer;        /* cache space for num_buffered_blocks uncompressed blocks */
   4.382 -+ void *compressed_buffer;       /* space for the largest compressed block */
   4.383 -+ size_t preload_array_size;     /* Size of pointer array in blocks */
   4.384 -+ size_t preload_size;           /* Number of successfully allocated blocks */
   4.385 -+ char **preload_cache;          /* Pointers to preloaded blocks */
   4.386 - 
   4.387 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   4.388 -  z_stream zstream;
   4.389 -+#endif
   4.390 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   4.391 -+ struct xz_dec *xzdecoderstate;
   4.392 -+ struct xz_buf xz_buffer;
   4.393 -+#endif
   4.394 - 
   4.395 -  struct file   *backing_file;  /* associated file */
   4.396 -  struct inode  *backing_inode; /* for bmap */
   4.397 - 
   4.398 -+ unsigned char *underlying_filename;
   4.399 -  unsigned long largest_block;
   4.400 -  unsigned int underlying_blksize;
   4.401 -+ loff_t underlying_total_size;
   4.402 -  int clo_number;
   4.403 -  int refcnt;
   4.404 -  struct block_device *bdev;
   4.405 -@@ -147,7 +180,6 @@
   4.406 +  size_t num_buffered_blocks;	/* how many uncompressed blocks buffered for performance */
   4.407 +@@ -178,14 +174,16 @@
   4.408 +  spinlock_t queue_lock;
   4.409 +  /* mutex for ioctl() */
   4.410 +  struct mutex clo_ctl_mutex;
   4.411 +- /* mutex for request */
   4.412 +- struct mutex clo_rq_mutex;
   4.413 ++ struct list_head clo_list;
   4.414 ++ struct task_struct *clo_thread;
   4.415 ++ wait_queue_head_t clo_event;
   4.416    struct request_queue *clo_queue;
   4.417    struct gendisk *clo_disk;
   4.418 +- struct blk_mq_tag_set tag_set;
   4.419    int suspended;
   4.420 -- char clo_file_name[LO_NAME_SIZE];
   4.421   };
   4.422   
   4.423 - /* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
   4.424 -@@ -156,52 +188,113 @@
   4.425 ++/* Changed in 2.639: cloop_dev is now a an array of cloop_dev pointers,
   4.426 ++   so we can specify how many devices we need via parameters. */
   4.427 + static struct cloop_device **cloop_dev;
   4.428   static const char *cloop_name=CLOOP_NAME;
   4.429   static int cloop_count = 0;
   4.430 - 
   4.431 --#if (!(defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))) /* Must be compiled into kernel. */
   4.432 --#error  "Invalid Kernel configuration. CONFIG_ZLIB_INFLATE support is needed for cloop."
   4.433 --#endif
   4.434 --
   4.435 --/* Use __get_free_pages instead of vmalloc, allows up to 32 pages,
   4.436 -- * 2MB in one piece */
   4.437 - static void *cloop_malloc(size_t size)
   4.438 - {
   4.439 -- int order = get_order(size);
   4.440 -- if(order <= KMALLOC_MAX_ORDER)
   4.441 --   return (void *)kmalloc(size, GFP_KERNEL);
   4.442 -- else if(order < MAX_ORDER)
   4.443 --   return (void *)__get_free_pages(GFP_KERNEL, order);
   4.444 -+ /* kmalloc will fail after the system is running for a while, */
   4.445 -+ /* when large orders can't return contiguous memory. */
   4.446 -+ /* Let's just use vmalloc for now. :-/ */
   4.447 -+ /* int order = get_order(size); */
   4.448 -+ /* if(order <= KMALLOC_MAX_ORDER) */
   4.449 -+ /*  return (void *)kmalloc(size, GFP_KERNEL); */
   4.450 -+ /* else if(order < MAX_ORDER) */
   4.451 -+ /*  return (void *)__get_free_pages(GFP_KERNEL, order); */
   4.452 -  return (void *)vmalloc(size);
   4.453 +@@ -214,24 +212,21 @@
   4.454 +  vfree(mem);
   4.455   }
   4.456   
   4.457 - static void cloop_free(void *mem, size_t size)
   4.458 - {
   4.459 -- int order = get_order(size);
   4.460 -- if(order <= KMALLOC_MAX_ORDER)
   4.461 --   kfree(mem);
   4.462 -- else if(order < MAX_ORDER)
   4.463 --   free_pages((unsigned long)mem, order);
   4.464 -- else vfree(mem);
   4.465 -+ /* int order = get_order(size); */
   4.466 -+ /* if(order <= KMALLOC_MAX_ORDER) */
   4.467 -+ /*  kfree(mem); */
   4.468 -+ /* else if(order < MAX_ORDER) */
   4.469 -+ /*  free_pages((unsigned long)mem, order); */
   4.470 -+ /* else */
   4.471 -+ vfree(mem);
   4.472 - }
   4.473 - 
   4.474 --static int uncompress(struct cloop_device *clo,
   4.475 --                      unsigned char *dest, unsigned long *destLen,
   4.476 --                      unsigned char *source, unsigned long sourceLen)
   4.477 +-/* static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen) */
   4.478 +-static int uncompress(struct cloop_device *clo, u_int32_t block_num, u_int32_t compressed_length, unsigned long *uncompressed_length)
   4.479  +static int uncompress(struct cloop_device *clo, unsigned char *dest, unsigned long *destLen, unsigned char *source, unsigned long sourceLen, int flags) 
   4.480   {
   4.481 -- /* Most of this code can be found in fs/cramfs/uncompress.c */
   4.482 -- int err;
   4.483 -- clo->zstream.next_in = source;
   4.484 -- clo->zstream.avail_in = sourceLen;
   4.485 -- clo->zstream.next_out = dest;
   4.486 -- clo->zstream.avail_out = *destLen;
   4.487 -- err = zlib_inflateReset(&clo->zstream);
   4.488 -- if (err != Z_OK)
   4.489 --  {
   4.490 --   printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
   4.491 --   zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   4.492 --  }
   4.493 -- err = zlib_inflate(&clo->zstream, Z_FINISH);
   4.494 -- *destLen = clo->zstream.total_out;
   4.495 -- if (err != Z_STREAM_END) return err;
   4.496 -- return Z_OK;
   4.497 -+ int err = -1;
   4.498 -+ switch(flags)
   4.499 -+ {
   4.500 -+  case CLOOP_COMPRESSOR_NONE:
   4.501 +  int err = -1;
   4.502 +- int flags = CLOOP_BLOCK_FLAGS(clo->block_ptrs[block_num]);
   4.503 +  switch(flags)
   4.504 +  {
   4.505 +   case CLOOP_COMPRESSOR_NONE:
   4.506 +-   /* block is umcompressed, swap pointers only! */
   4.507 +-   { char *tmp = clo->compressed_buffer; clo->compressed_buffer = clo->buffer[clo->current_bufnum]; clo->buffer[clo->current_bufnum] = tmp; }
   4.508 +-   DEBUGP("cloop: block %d is uncompressed (flags=%d), just swapping %u bytes\n", block_num, flags, compressed_length);
   4.509  +   memcpy(dest, source, *destLen = sourceLen);
   4.510  +   err = Z_OK;
   4.511 -+   break;
   4.512 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   4.513 -+  case CLOOP_COMPRESSOR_ZLIB:
   4.514 +    break;
   4.515 + #if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
   4.516 +   case CLOOP_COMPRESSOR_ZLIB:
   4.517 +-   clo->zstream.next_in = clo->compressed_buffer;
   4.518 +-   clo->zstream.avail_in = compressed_length;
   4.519 +-   clo->zstream.next_out = clo->buffer[clo->current_bufnum];
   4.520 +-   clo->zstream.avail_out = clo->head.block_size;
   4.521  +   clo->zstream.next_in = source;
   4.522  +   clo->zstream.avail_in = sourceLen;
   4.523  +   clo->zstream.next_out = dest;
   4.524  +   clo->zstream.avail_out = *destLen;
   4.525 -+   err = zlib_inflateReset(&clo->zstream);
   4.526 -+   if (err != Z_OK)
   4.527 -+   {
   4.528 -+    printk(KERN_ERR "%s: zlib_inflateReset error %d\n", cloop_name, err);
   4.529 -+    zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   4.530 -+   }
   4.531 -+   err = zlib_inflate(&clo->zstream, Z_FINISH);
   4.532 +    err = zlib_inflateReset(&clo->zstream);
   4.533 +    if (err != Z_OK)
   4.534 +    {
   4.535 +@@ -239,50 +234,50 @@
   4.536 +     zlib_inflateEnd(&clo->zstream); zlib_inflateInit(&clo->zstream);
   4.537 +    }
   4.538 +    err = zlib_inflate(&clo->zstream, Z_FINISH);
   4.539 +-   *uncompressed_length = clo->zstream.total_out;
   4.540  +   *destLen = clo->zstream.total_out;
   4.541 -+   if (err == Z_STREAM_END) err = 0;
   4.542 +    if (err == Z_STREAM_END) err = 0;
   4.543 +-   DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *uncompressed_length);
   4.544  +   DEBUGP("cloop: zlib decompression done, ret =%d, size =%lu\n", err, *destLen);
   4.545 -+   break;
   4.546 -+#endif
   4.547 -+#if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   4.548 -+  case CLOOP_COMPRESSOR_LZO1X:
   4.549 -+   {
   4.550 -+    size_t tmp = (size_t) clo->head.block_size;
   4.551 +    break;
   4.552 + #endif
   4.553 + #if (defined(CONFIG_LZO_DECOMPRESS) || defined(CONFIG_LZO_DECOMPRESS_MODULE))
   4.554 +   case CLOOP_COMPRESSOR_LZO1X:
   4.555 +    {
   4.556 +     size_t tmp = (size_t) clo->head.block_size;
   4.557 +-    err = lzo1x_decompress_safe(clo->compressed_buffer, compressed_length,
   4.558 +-             clo->buffer[clo->current_bufnum], &tmp);
   4.559 +-    if (err == LZO_E_OK) *uncompressed_length = (u_int32_t) tmp;
   4.560  +    err = lzo1x_decompress_safe(source, sourceLen,
   4.561  +             dest, &tmp);
   4.562  +    if (err == LZO_E_OK) *destLen = (u_int32_t) tmp;
   4.563 -+   }
   4.564 -+   break;
   4.565 -+#endif
   4.566 -+#if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   4.567 -+  case CLOOP_COMPRESSOR_LZ4:
   4.568 -+   {
   4.569 +    }
   4.570 +    break;
   4.571 + #endif
   4.572 + #if (defined(CONFIG_DECOMPRESS_LZ4) || defined(CONFIG_DECOMPRESS_LZ4_MODULE))
   4.573 +   case CLOOP_COMPRESSOR_LZ4:
   4.574 +    {
   4.575 +-    size_t outputSize = clo->head.block_size;
   4.576  +    size_t outputSize = *destLen;
   4.577 -+    /* We should adjust outputSize here, in case the last block is smaller than block_size */
   4.578 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
   4.579 +     /* We should adjust outputSize here, in case the last block is smaller than block_size */
   4.580 + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
   4.581 +-    err = lz4_decompress(clo->compressed_buffer, (size_t *) &compressed_length,
   4.582 +-                         clo->buffer[clo->current_bufnum], outputSize);
   4.583  +    err = lz4_decompress(source, (size_t *) &sourceLen,
   4.584  +                         dest, outputSize);
   4.585 -+#else
   4.586 + #else
   4.587 +-    err = LZ4_decompress_safe(clo->compressed_buffer,
   4.588 +-                              clo->buffer[clo->current_bufnum],
   4.589 +-                              compressed_length, outputSize);
   4.590  +    err = LZ4_decompress_safe(source,
   4.591  +                              dest,
   4.592  +                              sourceLen, outputSize);
   4.593 -+#endif
   4.594 -+    if (err >= 0) 
   4.595 -+    {
   4.596 -+     err = 0;
   4.597 + #endif
   4.598 +     if (err >= 0) 
   4.599 +     {
   4.600 +      err = 0;
   4.601 +-     *uncompressed_length = outputSize;
   4.602  +     *destLen = outputSize;
   4.603 -+    }
   4.604 -+   }
   4.605 -+  break;
   4.606 -+#endif
   4.607 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   4.608 -+ case CLOOP_COMPRESSOR_XZ:
   4.609 +     }
   4.610 +    }
   4.611 +   break;
   4.612 + #endif
   4.613 + #if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
   4.614 +  case CLOOP_COMPRESSOR_XZ:
   4.615 +-  clo->xz_buffer.in = clo->compressed_buffer;
   4.616  +  clo->xz_buffer.in = source;
   4.617 -+  clo->xz_buffer.in_pos = 0;
   4.618 +   clo->xz_buffer.in_pos = 0;
   4.619 +-  clo->xz_buffer.in_size = compressed_length;
   4.620 +-  clo->xz_buffer.out = clo->buffer[clo->current_bufnum];
   4.621  +  clo->xz_buffer.in_size = sourceLen;
   4.622  +  clo->xz_buffer.out = dest;
   4.623 -+  clo->xz_buffer.out_pos = 0;
   4.624 +   clo->xz_buffer.out_pos = 0;
   4.625 +-  clo->xz_buffer.out_size = clo->head.block_size;
   4.626  +  clo->xz_buffer.out_size = *destLen;
   4.627 -+  xz_dec_reset(clo->xzdecoderstate);
   4.628 -+  err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
   4.629 -+  if (err == XZ_STREAM_END || err == XZ_OK)
   4.630 -+  {
   4.631 -+   err = 0;
   4.632 -+  }
   4.633 -+  else
   4.634 -+  {
   4.635 -+   printk(KERN_ERR "%s: xz_dec_run error %d\n", cloop_name, err);
   4.636 -+   err = 1;
   4.637 -+  }
   4.638 -+  break;
   4.639 -+#endif
   4.640 -+ default:
   4.641 -+   printk(KERN_ERR "%s: compression method is not supported!\n", cloop_name);
   4.642 -+ }
   4.643 -+ return err;
   4.644 - }
   4.645 - 
   4.646 - static ssize_t cloop_read_from_file(struct cloop_device *clo, struct file *f, char *buf,
   4.647 -@@ -220,7 +313,7 @@
   4.648 +   xz_dec_reset(clo->xzdecoderstate);
   4.649 +   err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer);
   4.650 +   if (err == XZ_STREAM_END || err == XZ_OK)
   4.651 +@@ -309,16 +304,12 @@
   4.652 +  while (buf_done < buf_len)
   4.653 +   {
   4.654 +    size_t size = buf_len - buf_done, size_read;
   4.655 +-   mm_segment_t old_fs;
   4.656 +    /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */
   4.657 +    /* int size_read = kernel_read(f, pos, buf + buf_done, size); */
   4.658 +-
   4.659 +-   // mutex_lock(&clo->clo_rq_mutex);
   4.660 +-   old_fs = get_fs();
   4.661 +-   set_fs(KERNEL_DS);
   4.662 ++   mm_segment_t old_fs = get_fs();
   4.663 ++   set_fs(get_ds());
   4.664 +    size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos);
   4.665 +    set_fs(old_fs);
   4.666 +-   // mutex_unlock(&clo->clo_rq_mutex);
   4.667   
   4.668      if(size_read <= 0)
   4.669       {
   4.670 --     printk(KERN_ERR "%s: Read error %d at pos %Lu in file %s, "
   4.671 -+     printk(KERN_ERR "%s: Read error %d at pos %llu in file %s, "
   4.672 -                      "%d bytes lost.\n", cloop_name, (int)size_read, pos,
   4.673 - 		     file, (int)size);
   4.674 -      memset(buf + buf_len - size, 0, size);
   4.675 -@@ -232,72 +325,84 @@
   4.676 - }
   4.677 +@@ -358,8 +349,8 @@
   4.678 +    return i;
   4.679 +   }
   4.680   
   4.681 - /* This looks more complicated than it is */
   4.682 --/* Returns number of block buffer to use for this request */
   4.683 -+/* Returns number of cache block buffer to use for this request */
   4.684 - static int cloop_load_buffer(struct cloop_device *clo, int blocknum)
   4.685 - {
   4.686 -- unsigned int buf_done = 0;
   4.687 -- unsigned long buflen;
   4.688 -- unsigned int buf_length;
   4.689 -+ loff_t compressed_block_offset;
   4.690 -+ long compressed_block_len;
   4.691 -+ long uncompressed_block_len=0;
   4.692 -  int ret;
   4.693 -  int i;
   4.694 -- if(blocknum > ntohl(clo->head.num_blocks) || blocknum < 0)
   4.695 --  {
   4.696 --   printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
   4.697 --                       cloop_name, blocknum);
   4.698 --   return -1;
   4.699 --  }
   4.700 -+ if(blocknum > clo->head.num_blocks || blocknum < 0)
   4.701 -+ {
   4.702 -+  printk(KERN_WARNING "%s: Invalid block number %d requested.\n",
   4.703 -+         cloop_name, blocknum);
   4.704 -+  return -1;
   4.705 -+ }
   4.706 - 
   4.707 -  /* Quick return if the block we seek is already in one of the buffers. */
   4.708 -  /* Return number of buffer */
   4.709 -- for(i=0; i<BUFFERED_BLOCKS; i++)
   4.710 -+ for(i=0; i<clo->num_buffered_blocks; i++)
   4.711 -   if (blocknum == clo->buffered_blocknum[i])
   4.712 --   {
   4.713 --    DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
   4.714 --    return i;
   4.715 --   }
   4.716 --
   4.717 -- buf_length = be64_to_cpu(clo->offsets[blocknum+1]) - be64_to_cpu(clo->offsets[blocknum]);
   4.718 --
   4.719 --/* Load one compressed block from the file. */
   4.720 -- cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
   4.721 --                    be64_to_cpu(clo->offsets[blocknum]), buf_length);
   4.722 -+  {
   4.723 -+   DEBUGP(KERN_INFO "cloop_load_buffer: Found buffered block %d\n", i);
   4.724 -+   return i;
   4.725 -+  }
   4.726 - 
   4.727 -- buflen = ntohl(clo->head.block_size);
   4.728 +- compressed_block_offset = CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]);
   4.729 +- compressed_block_len = (long) (CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum+1]) - compressed_block_offset) ;
   4.730  + compressed_block_offset = clo->block_ptrs[blocknum].offset;
   4.731  + compressed_block_len = (long) (clo->block_ptrs[blocknum].size) ;
   4.732   
   4.733 -- /* Go to next position in the block ring buffer */
   4.734 -- clo->current_bufnum++;
   4.735 -- if(clo->current_bufnum >= BUFFERED_BLOCKS) clo->current_bufnum = 0;
   4.736 -+ /* Load one compressed block from the file. */
   4.737 -+ if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
   4.738 -+ {
   4.739 -+  size_t n = cloop_read_from_file(clo, clo->backing_file, (char *)clo->compressed_buffer,
   4.740 -+                    compressed_block_offset, compressed_block_len);
   4.741 -+  if (n!= compressed_block_len)
   4.742 -+   {
   4.743 -+    printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
   4.744 +  /* Load one compressed block from the file. */
   4.745 +  if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */
   4.746 +@@ -369,12 +360,12 @@
   4.747 +   if (n!= compressed_block_len)
   4.748 +    {
   4.749 +     printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n",
   4.750 +-     cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
   4.751  +     cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
   4.752 -+    /* return -1; */
   4.753 -+   }
   4.754 -+ } else {
   4.755 -+  printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
   4.756 +     /* return -1; */
   4.757 +    }
   4.758 +  } else {
   4.759 +   printk(KERN_ERR "%s: invalid data block len %ld bytes @ %lld from file %s\n",
   4.760 +-  cloop_name, compressed_block_len, clo->block_ptrs[blocknum], clo->underlying_filename);
   4.761  +  cloop_name, compressed_block_len, clo->block_ptrs[blocknum].offset, clo->underlying_filename);
   4.762 -+  return -1;
   4.763 -+ }
   4.764 -+  
   4.765 -+ /* Go to next position in the cache block buffer (which is used as a cyclic buffer) */
   4.766 -+ if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
   4.767 +   return -1;
   4.768 +  }
   4.769 +   
   4.770 +@@ -382,14 +373,16 @@
   4.771 +  if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0;
   4.772   
   4.773    /* Do the uncompression */
   4.774 -- ret = uncompress(clo, clo->buffer[clo->current_bufnum], &buflen, clo->compressed_buffer,
   4.775 --                  buf_length);
   4.776 +- ret = uncompress(clo, blocknum, compressed_block_len, &uncompressed_block_len);
   4.777  + uncompressed_block_len = clo->head.block_size;
   4.778  + ret = uncompress(clo, clo->buffer[clo->current_bufnum], &uncompressed_block_len,
   4.779  +	 clo->compressed_buffer, compressed_block_len, clo->block_ptrs[blocknum].flags);
   4.780    /* DEBUGP("cloop: buflen after uncompress: %ld\n",buflen); */
   4.781    if (ret != 0)
   4.782 --  {
   4.783 --   printk(KERN_ERR "%s: zlib decompression error %i uncompressing block %u %u/%lu/%u/%u "
   4.784 --          "%Lu-%Lu\n", cloop_name, ret, blocknum,
   4.785 --	  ntohl(clo->head.block_size), buflen, buf_length, buf_done,
   4.786 --	  be64_to_cpu(clo->offsets[blocknum]), be64_to_cpu(clo->offsets[blocknum+1]));
   4.787 --   clo->buffered_blocknum[clo->current_bufnum] = -1;
   4.788 --   return -1;
   4.789 --  }
   4.790 -+ {
   4.791 -+  printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
   4.792 -+         cloop_name, ret, blocknum,
   4.793 +  {
   4.794 +   printk(KERN_ERR "%s: decompression error %i uncompressing block %u %lu bytes @ %llu, flags %u\n",
   4.795 +          cloop_name, ret, blocknum,
   4.796 +-         compressed_block_len, CLOOP_BLOCK_OFFSET(clo->block_ptrs[blocknum]),
   4.797 +-         CLOOP_BLOCK_FLAGS(clo->block_ptrs[blocknum]));
   4.798  +         compressed_block_len, clo->block_ptrs[blocknum].offset,
   4.799  +         clo->block_ptrs[blocknum].flags);
   4.800 -+         clo->buffered_blocknum[clo->current_bufnum] = -1;
   4.801 -+  return -1;
   4.802 -+ }
   4.803 -  clo->buffered_blocknum[clo->current_bufnum] = blocknum;
   4.804 +          clo->buffered_blocknum[clo->current_bufnum] = -1;
   4.805 +   return -1;
   4.806 +  }
   4.807 +@@ -397,107 +390,146 @@
   4.808    return clo->current_bufnum;
   4.809   }
   4.810   
   4.811 - /* This function does all the real work. */
   4.812 --/* returns "uptodate" */
   4.813 +-static blk_status_t cloop_handle_request(struct cloop_device *clo, struct request *req)
   4.814 ++/* This function does all the real work. */
   4.815  +/* returns "uptodate"                    */
   4.816 - static int cloop_handle_request(struct cloop_device *clo, struct request *req)
   4.817 ++static int cloop_handle_request(struct cloop_device *clo, struct request *req)
   4.818   {
   4.819    int buffered_blocknum = -1;
   4.820    int preloaded = 0;
   4.821 -  loff_t offset     = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
   4.822 -- struct bio_vec *bvec;
   4.823 -+ struct bio_vec bvec;
   4.824 +- loff_t offset = (loff_t) blk_rq_pos(req)<<9;
   4.825 ++ loff_t offset     = (loff_t) blk_rq_pos(req)<<9; /* req->sector<<9 */
   4.826 +  struct bio_vec bvec;
   4.827    struct req_iterator iter;
   4.828 +- blk_status_t ret = BLK_STS_OK;
   4.829 +-
   4.830 +- if (unlikely(req_op(req) != REQ_OP_READ ))
   4.831 +- {
   4.832 +-  blk_dump_rq_flags(req, DEVICE_NAME " bad request");
   4.833 +-  return BLK_STS_IOERR;
   4.834 +- }
   4.835 +-
   4.836 +- if (unlikely(!clo->backing_file && !clo->suspended))
   4.837 +- {
   4.838 +-  DEBUGP("cloop_handle_request: not connected to a file\n");
   4.839 +-  return BLK_STS_IOERR;
   4.840 +- }
   4.841 +-
   4.842    rq_for_each_segment(bvec, req, iter)
   4.843 +- {
   4.844 +-  unsigned long len = bvec.bv_len;
   4.845 +-  loff_t to_offset  = bvec.bv_offset;
   4.846 +-
   4.847 +-  while(len > 0)
   4.848     {
   4.849 --   unsigned long len = bvec->bv_len;
   4.850 --   char *to_ptr      = kmap(bvec->bv_page) + bvec->bv_offset;
   4.851 +-   u_int32_t length_in_buffer;
   4.852 +-   loff_t block_offset = offset;
   4.853 +-   u_int32_t offset_in_buffer;
   4.854 +-   char *from_ptr, *to_ptr;
   4.855 +-   /* do_div (div64.h) returns the 64bit division remainder and  */
   4.856 +-   /* puts the result in the first argument, i.e. block_offset   */
   4.857 +-   /* becomes the blocknumber to load, and offset_in_buffer the  */
   4.858 +-   /* position in the buffer */
   4.859 +-   offset_in_buffer = do_div(block_offset, clo->head.block_size);
   4.860 +-   /* Lookup preload cache */
   4.861 +-   if(block_offset < clo->preload_size && clo->preload_cache != NULL && clo->preload_cache[block_offset] != NULL)
   4.862 +-   { /* Copy from cache */
   4.863 +-    preloaded = 1;
   4.864 +-    from_ptr = clo->preload_cache[block_offset];
   4.865 +-   }
   4.866 +-   else
   4.867 +-   {
   4.868 +-    preloaded = 0;
   4.869 +-    buffered_blocknum = cloop_load_buffer(clo,block_offset);
   4.870 +-    if(buffered_blocknum == -1)
   4.871  +   unsigned long len = bvec.bv_len;
   4.872  +   char *to_ptr      = kmap(bvec.bv_page) + bvec.bv_offset;
   4.873 -    while(len > 0)
   4.874 ++   while(len > 0)
   4.875       {
   4.876 -      u_int32_t length_in_buffer;
   4.877 -@@ -308,7 +413,7 @@
   4.878 -      /* puts the result in the first argument, i.e. block_offset   */
   4.879 -      /* becomes the blocknumber to load, and offset_in_buffer the  */
   4.880 -      /* position in the buffer */
   4.881 --     offset_in_buffer = do_div(block_offset, ntohl(clo->head.block_size));
   4.882 +-     ret = BLK_STS_IOERR;
   4.883 +-     break; /* invalid data, leave inner loop */
   4.884 ++     u_int32_t length_in_buffer;
   4.885 ++     loff_t block_offset = offset;
   4.886 ++     u_int32_t offset_in_buffer;
   4.887 ++     char *from_ptr;
   4.888 ++     /* do_div (div64.h) returns the 64bit division remainder and  */
   4.889 ++     /* puts the result in the first argument, i.e. block_offset   */
   4.890 ++     /* becomes the blocknumber to load, and offset_in_buffer the  */
   4.891 ++     /* position in the buffer */
   4.892  +     offset_in_buffer = do_div(block_offset, clo->head.block_size);
   4.893 -      /* Lookup preload cache */
   4.894 -      if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
   4.895 -         clo->preload_cache[block_offset] != NULL)
   4.896 -@@ -325,7 +430,7 @@
   4.897 -        from_ptr = clo->buffer[buffered_blocknum];
   4.898 -       }
   4.899 -      /* Now, at least part of what we want will be in the buffer. */
   4.900 --     length_in_buffer = ntohl(clo->head.block_size) - offset_in_buffer;
   4.901 ++     /* Lookup preload cache */
   4.902 ++     if(block_offset < clo->preload_size && clo->preload_cache != NULL &&
   4.903 ++        clo->preload_cache[block_offset] != NULL)
   4.904 ++      { /* Copy from cache */
   4.905 ++       preloaded = 1;
   4.906 ++       from_ptr = clo->preload_cache[block_offset];
   4.907 ++      }
   4.908 ++     else
   4.909 ++      {
   4.910 ++       preloaded = 0;
   4.911 ++       buffered_blocknum = cloop_load_buffer(clo,block_offset);
   4.912 ++       if(buffered_blocknum == -1) break; /* invalid data, leave inner loop */
   4.913 ++       /* Copy from buffer */
   4.914 ++       from_ptr = clo->buffer[buffered_blocknum];
   4.915 ++      }
   4.916 ++     /* Now, at least part of what we want will be in the buffer. */
   4.917  +     length_in_buffer = clo->head.block_size - offset_in_buffer;
   4.918 -      if(length_in_buffer > len)
   4.919 -       {
   4.920 - /*   DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
   4.921 -@@ -337,18 +442,19 @@
   4.922 -      len         -= length_in_buffer;
   4.923 -      offset      += length_in_buffer;
   4.924 -     } /* while inner loop */
   4.925 --   kunmap(bvec->bv_page);
   4.926 ++     if(length_in_buffer > len)
   4.927 ++      {
   4.928 ++/*   DEBUGP("Warning: length_in_buffer=%u > len=%u\n",
   4.929 ++                      length_in_buffer,len); */
   4.930 ++       length_in_buffer = len;
   4.931 ++      }
   4.932 ++     memcpy(to_ptr, from_ptr + offset_in_buffer, length_in_buffer);
   4.933 ++     to_ptr      += length_in_buffer;
   4.934 ++     len         -= length_in_buffer;
   4.935 ++     offset      += length_in_buffer;
   4.936 ++    } /* while inner loop */
   4.937  +   kunmap(bvec.bv_page);
   4.938  +   cond_resched();
   4.939 -   } /* end rq_for_each_segment*/
   4.940 -  return ((buffered_blocknum!=-1) || preloaded);
   4.941 - }
   4.942 - 
   4.943 - /* Adopted from loop.c, a kernel thread to handle physical reads and
   4.944 -- * decompression. */
   4.945 ++  } /* end rq_for_each_segment*/
   4.946 ++ return ((buffered_blocknum!=-1) || preloaded);
   4.947 ++}
   4.948 ++
   4.949 ++/* Adopted from loop.c, a kernel thread to handle physical reads and
   4.950  +   decompression. */
   4.951 - static int cloop_thread(void *data)
   4.952 - {
   4.953 -  struct cloop_device *clo = data;
   4.954 -  current->flags |= PF_NOFREEZE;
   4.955 -- set_user_nice(current, -15);
   4.956 ++static int cloop_thread(void *data)
   4.957 ++{
   4.958 ++ struct cloop_device *clo = data;
   4.959 ++ current->flags |= PF_NOFREEZE;
   4.960  + set_user_nice(current, 10);
   4.961 -  while (!kthread_should_stop()||!list_empty(&clo->clo_list))
   4.962 -   {
   4.963 -    int err;
   4.964 -@@ -390,10 +496,18 @@
   4.965 -    int rw;
   4.966 -  /* quick sanity checks */
   4.967 -    /* blk_fs_request() was removed in 2.6.36 */
   4.968 --   if (unlikely(req == NULL || (req->cmd_type != REQ_TYPE_FS)))
   4.969 ++ while (!kthread_should_stop()||!list_empty(&clo->clo_list))
   4.970 ++  {
   4.971 ++   int err;
   4.972 ++   err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) || 
   4.973 ++                                  kthread_should_stop());
   4.974 ++   if(unlikely(err))
   4.975 ++    {
   4.976 ++     DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n");
   4.977 ++     continue;
   4.978 +     }
   4.979 +-    /* Copy from buffer */
   4.980 +-    from_ptr = clo->buffer[buffered_blocknum];
   4.981 +-   }
   4.982 +-   /* Now, at least part of what we want will be in the buffer. */
   4.983 +-   length_in_buffer = clo->head.block_size - offset_in_buffer;
   4.984 +-   if(length_in_buffer > len)
   4.985 +-   {
   4.986 +-   /* DEBUGP("Warning: length_in_buffer=%u > len=%u\n", length_in_buffer,len); */
   4.987 +-    length_in_buffer = len;
   4.988 +-   }
   4.989 +-   to_ptr      = kmap_atomic(bvec.bv_page);
   4.990 +-   memcpy(to_ptr + to_offset, from_ptr + offset_in_buffer, length_in_buffer);
   4.991 +-   kunmap_atomic(to_ptr);
   4.992 +-   to_offset   += length_in_buffer;
   4.993 +-   len         -= length_in_buffer;
   4.994 +-   offset      += length_in_buffer;
   4.995 +-  } /* while inner loop */
   4.996 +- } /* rq_for_each_segment */
   4.997 +- return ret;
   4.998 +-}
   4.999 +-
  4.1000 +-static blk_status_t cloop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd)
  4.1001 +-{
  4.1002 +-//  struct request_queue *q  = hctx->queue;
  4.1003 +-//  struct cloop_device *clo = q->queuedata;
  4.1004 +- struct request *req = bd->rq;
  4.1005 +- struct cloop_device *clo = req->rq_disk->private_data;
  4.1006 +- blk_status_t ret         = BLK_STS_OK;
  4.1007 +-
  4.1008 +-#if 1 /* Does it work when loading libraries? */
  4.1009 +- /* Since we have a buffered block list as well as data to read */
  4.1010 +- /* from disk (slow), and are (probably) never called from an   */
  4.1011 +- /* interrupt, we use a simple mutex lock right here to ensure  */
  4.1012 +- /* consistency.                                                */
  4.1013 +-  mutex_lock(&clo->clo_rq_mutex);
  4.1014 +- #else
  4.1015 +-  spin_lock_irq(&clo->queue_lock);
  4.1016 +- #endif
  4.1017 +- blk_mq_start_request(req);
  4.1018 +- do {
  4.1019 +-  ret = cloop_handle_request(clo, req);
  4.1020 +- } while(blk_update_request(req, ret, blk_rq_cur_bytes(req)));
  4.1021 +- blk_mq_end_request(req, ret);
  4.1022 +- #if 1 /* See above */
  4.1023 +-  mutex_unlock(&clo->clo_rq_mutex);
  4.1024 +- #else
  4.1025 +-  spin_unlock_irq(&clo->queue_lock);
  4.1026 +- #endif
  4.1027 +- return ret;
  4.1028 ++   if(!list_empty(&clo->clo_list))
  4.1029 ++    {
  4.1030 ++     struct request *req;
  4.1031 ++     unsigned long flags;
  4.1032 ++     int uptodate;
  4.1033 ++     spin_lock_irq(&clo->queue_lock);
  4.1034 ++     req = list_entry(clo->clo_list.next, struct request, queuelist);
  4.1035 ++     list_del_init(&req->queuelist);
  4.1036 ++     spin_unlock_irq(&clo->queue_lock);
  4.1037 ++     uptodate = cloop_handle_request(clo, req);
  4.1038 ++     spin_lock_irqsave(&clo->queue_lock, flags);
  4.1039 ++     __blk_end_request_all(req, uptodate ? 0 : -EIO);
  4.1040 ++     spin_unlock_irqrestore(&clo->queue_lock, flags);
  4.1041 ++    }
  4.1042 ++  }
  4.1043 ++ DEBUGP(KERN_ERR "cloop_thread exited.\n");
  4.1044 ++ return 0;
  4.1045 ++}
  4.1046 ++
  4.1047 ++/* This is called by the kernel block queue management every now and then,
  4.1048 ++ * with successive read requests qeued and sorted in a (hopefully)
  4.1049 ++ * "most efficient way". spin_lock_irq() is being held by the kernel. */
  4.1050 ++static void cloop_do_request(struct request_queue *q)
  4.1051 ++{
  4.1052 ++ struct request *req;
  4.1053 ++ while((req = blk_fetch_request(q)) != NULL)
  4.1054 ++  {
  4.1055 ++   struct cloop_device *clo;
  4.1056 ++   int rw;
  4.1057 ++ /* quick sanity checks */
  4.1058 ++   /* blk_fs_request() was removed in 2.6.36 */
  4.1059  +   if (unlikely(req == NULL
  4.1060  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) /* field removed */
  4.1061  +   || (req->cmd_type != REQ_TYPE_FS)
  4.1062  +#endif
  4.1063  +   ))
  4.1064 -     goto error_continue;
  4.1065 -    rw = rq_data_dir(req);
  4.1066 --   if (unlikely(rw != READ && rw != READA))
  4.1067 ++    goto error_continue;
  4.1068 ++   rw = rq_data_dir(req);
  4.1069  +   if (unlikely(rw != READ
  4.1070  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
  4.1071  +                && rw != READA
  4.1072  +#endif
  4.1073  +    ))
  4.1074 -     {
  4.1075 -      DEBUGP("cloop_do_request: bad command\n");
  4.1076 -      goto error_continue;
  4.1077 -@@ -409,40 +523,51 @@
  4.1078 -    continue; /* next request */
  4.1079 -   error_continue:
  4.1080 -    DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
  4.1081 ++    {
  4.1082 ++     DEBUGP("cloop_do_request: bad command\n");
  4.1083 ++     goto error_continue;
  4.1084 ++    }
  4.1085 ++   clo = req->rq_disk->private_data;
  4.1086 ++   if (unlikely(!clo->backing_file && !clo->suspended))
  4.1087 ++    {
  4.1088 ++     DEBUGP("cloop_do_request: not connected to a file\n");
  4.1089 ++     goto error_continue;
  4.1090 ++    }
  4.1091 ++   list_add_tail(&req->queuelist, &clo->clo_list); /* Add to working list for thread */
  4.1092 ++   wake_up(&clo->clo_event);    /* Wake up cloop_thread */
  4.1093 ++   continue; /* next request */
  4.1094 ++  error_continue:
  4.1095 ++   DEBUGP(KERN_ERR "cloop_do_request: Discarding request %p.\n", req);
  4.1096  +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  4.1097 -    req->errors++;
  4.1098 ++   req->errors++;
  4.1099  +#else
  4.1100  +   req->error_count++;
  4.1101  +#endif
  4.1102 -    __blk_end_request_all(req, -EIO);
  4.1103 -   }
  4.1104 ++   __blk_end_request_all(req, -EIO);
  4.1105 ++  }
  4.1106   }
  4.1107   
  4.1108 --/* Read header and offsets from already opened file */
  4.1109 --static int cloop_set_file(int cloop_num, struct file *file, char *filename)
  4.1110 -+/* Read header, flags and offsets from already opened file */
  4.1111 -+static int cloop_set_file(int cloop_num, struct file *file)
  4.1112 - {
  4.1113 -  struct cloop_device *clo = cloop_dev[cloop_num];
  4.1114 -  struct inode *inode;
  4.1115 + /* Read header, flags and offsets from already opened file */
  4.1116 +@@ -508,7 +540,7 @@
  4.1117    char *bbuf=NULL;
  4.1118 -- unsigned int i, offsets_read, total_offsets;
  4.1119 -- int isblkdev;
  4.1120 -- int error = 0;
  4.1121 -+ unsigned int bbuf_size = 0;
  4.1122 -+ const unsigned int header_size = sizeof(struct cloop_head);
  4.1123 +  unsigned int bbuf_size = 0;
  4.1124 +  const unsigned int header_size = sizeof(struct cloop_head);
  4.1125 +- unsigned int i, offsets_read=0, total_offsets=0;
  4.1126  + unsigned int i, total_offsets=0;
  4.1127 -+ loff_t fs_read_position = 0, header_pos[2];
  4.1128 -+ int flags, isblkdev, bytes_read, error = 0;
  4.1129 -+ if (clo->suspended) return error;
  4.1130 -+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
  4.1131 -  inode = file->f_dentry->d_inode;
  4.1132 -+ clo->underlying_filename = kstrdup(file->f_dentry->d_name.name ? file->f_dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
  4.1133 -+ #else
  4.1134 -+ inode = file->f_path.dentry->d_inode;
  4.1135 -+ clo->underlying_filename = kstrdup(file->f_path.dentry->d_name.name ? file->f_path.dentry->d_name.name : (const unsigned char *)"anonymous filename", GFP_KERNEL);
  4.1136 -+ #endif
  4.1137 -  isblkdev=S_ISBLK(inode->i_mode)?1:0;
  4.1138 -  if(!isblkdev&&!S_ISREG(inode->i_mode))
  4.1139 +  loff_t fs_read_position = 0, header_pos[2];
  4.1140 +  int isblkdev, bytes_read, error = 0;
  4.1141 +  if (clo->suspended) return error;
  4.1142 +@@ -581,29 +613,19 @@
  4.1143 +     goto error_release;
  4.1144 +    }
  4.1145 +    memcpy(&clo->head, bbuf, header_size);
  4.1146 +-   if (strncmp(bbuf+CLOOP4_SIGNATURE_OFFSET, CLOOP4_SIGNATURE, CLOOP4_SIGNATURE_SIZE)==0)
  4.1147 ++   if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
  4.1148 +    {
  4.1149 +-    clo->file_format=4;
  4.1150 ++    clo->file_format++;
  4.1151 +     clo->head.block_size=ntohl(clo->head.block_size);
  4.1152 +     clo->head.num_blocks=ntohl(clo->head.num_blocks);
  4.1153 +     clo->header_first =  (i==0) ? 1 : 0;
  4.1154 +-    printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  4.1155 +-    break;
  4.1156 +-   }
  4.1157 +-   else if (strncmp(bbuf+CLOOP2_SIGNATURE_OFFSET, CLOOP2_SIGNATURE, CLOOP2_SIGNATURE_SIZE)==0)
  4.1158 +-   {
  4.1159 +-    clo->file_format=2;
  4.1160 +-    clo->head.block_size=ntohl(clo->head.block_size);
  4.1161 +-    clo->head.num_blocks=ntohl(clo->head.num_blocks);
  4.1162 +-    clo->header_first =  (i==0) ? 1 : 0;
  4.1163 +-    printk(KERN_INFO "%s: file %s version %d, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->file_format, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  4.1164 ++    printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  4.1165 +     break;
  4.1166 +    }
  4.1167 +   }
  4.1168 +  if (clo->file_format == 0)
  4.1169     {
  4.1170 -    printk(KERN_ERR "%s: %s not a regular file or block device\n",
  4.1171 --		   cloop_name, filename);
  4.1172 -+		   cloop_name, clo->underlying_filename);
  4.1173 +-   printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
  4.1174 +-                   "please use an older version of %s for this file.\n",
  4.1175 ++   printk(KERN_ERR "%s: Cannot detect %s format.\n",
  4.1176 +                    cloop_name, cloop_name);
  4.1177 +        error=-EBADF; goto error_release;
  4.1178 +   }
  4.1179 +@@ -613,67 +635,133 @@
  4.1180 +           cloop_name, clo->head.block_size);
  4.1181      error=-EBADF; goto error_release;
  4.1182     }
  4.1183 -  clo->backing_file = file;
  4.1184 -  clo->backing_inode= inode ;
  4.1185 -- if(!isblkdev&&inode->i_size<sizeof(struct cloop_head))
  4.1186 -+ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size;
  4.1187 -+ if(clo->underlying_total_size < header_size)
  4.1188 +- total_offsets=clo->head.num_blocks+1;
  4.1189 +- if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
  4.1190 ++ total_offsets=clo->head.num_blocks;
  4.1191 ++ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
  4.1192 +                       total_offsets > inode->i_size))
  4.1193     {
  4.1194 --   printk(KERN_ERR "%s: %lu bytes (must be >= %u bytes)\n",
  4.1195 --                   cloop_name, (unsigned long)inode->i_size,
  4.1196 --		   (unsigned)sizeof(struct cloop_head));
  4.1197 -+   printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n",
  4.1198 -+                   cloop_name, clo->underlying_total_size,
  4.1199 -+		   (unsigned int)header_size);
  4.1200 +    printk(KERN_ERR "%s: file %s too small for %u blocks\n",
  4.1201 +           cloop_name, clo->underlying_filename, clo->head.num_blocks);
  4.1202      error=-EBADF; goto error_release;
  4.1203     }
  4.1204 -- /* In suspended mode, we have done all checks necessary - FF */
  4.1205 -- if (clo->suspended)
  4.1206 --   return error;
  4.1207 -  if(isblkdev)
  4.1208 -   {
  4.1209 -    struct request_queue *q = bdev_get_queue(inode->i_bdev);
  4.1210 -@@ -451,104 +576,225 @@
  4.1211 -    /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */
  4.1212 -    blk_queue_max_segment_size(clo->clo_queue, queue_max_segment_size(q));
  4.1213 -    blk_queue_segment_boundary(clo->clo_queue, queue_segment_boundary(q));
  4.1214 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
  4.1215 -    blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn);
  4.1216 -+#endif
  4.1217 -    clo->underlying_blksize = block_size(inode->i_bdev);
  4.1218 -   }
  4.1219 -  else
  4.1220 -    clo->underlying_blksize = PAGE_SIZE;
  4.1221 -- DEBUGP("Underlying blocksize is %u\n", clo->underlying_blksize);
  4.1222 -- bbuf = cloop_malloc(clo->underlying_blksize);
  4.1223 -+
  4.1224 -+ DEBUGP(KERN_INFO "Underlying blocksize of %s is %u\n", clo->underlying_filename, clo->underlying_blksize);
  4.1225 -+ DEBUGP(KERN_INFO "Underlying total size of %s is %llu\n", clo->underlying_filename, clo->underlying_total_size);
  4.1226 -+
  4.1227 -+ /* clo->underlying_blksize should be larger than header_size, even if it's only PAGE_SIZE */
  4.1228 -+ bbuf_size = clo->underlying_blksize;
  4.1229 -+ bbuf = cloop_malloc(bbuf_size);
  4.1230 -  if(!bbuf)
  4.1231 -   {
  4.1232 --   printk(KERN_ERR "%s: out of kernel mem for block buffer (%lu bytes)\n",
  4.1233 --                   cloop_name, (unsigned long)clo->underlying_blksize);
  4.1234 -+   printk(KERN_ERR "%s: out of kernel mem for buffer (%u bytes)\n",
  4.1235 -+                   cloop_name, (unsigned int) bbuf_size);
  4.1236 -+   error=-ENOMEM; goto error_release;
  4.1237 -+  }
  4.1238 -+
  4.1239 -+ header_pos[0] = 0; /* header first */
  4.1240 -+ header_pos[1] = clo->underlying_total_size - sizeof(struct cloop_head); /* header last */
  4.1241 -+ for(i=0; i<2; i++)
  4.1242 -+  {
  4.1243 -+   /* Check for header */
  4.1244 -+   size_t bytes_readable = MIN(clo->underlying_blksize, clo->underlying_total_size - header_pos[i]);
  4.1245 -+   size_t bytes_read = cloop_read_from_file(clo, file, bbuf, header_pos[i], bytes_readable);
  4.1246 -+   if(bytes_read != bytes_readable)
  4.1247 -+   {
  4.1248 -+    printk(KERN_ERR "%s: Bad file %s, read() of %s %u bytes returned %d.\n",
  4.1249 -+                    cloop_name, clo->underlying_filename, (i==0)?"first":"last",
  4.1250 -+		    (unsigned int)header_size, (int)bytes_read);
  4.1251 -+    error=-EBADF;
  4.1252 -+    goto error_release;
  4.1253 -+   }
  4.1254 -+   memcpy(&clo->head, bbuf, header_size);
  4.1255 -+   if (strncmp(bbuf+CLOOP_SIGNATURE_OFFSET, CLOOP_SIGNATURE, CLOOP_SIGNATURE_SIZE)==0)
  4.1256 -+   {
  4.1257 -+    clo->file_format++;
  4.1258 -+    clo->head.block_size=ntohl(clo->head.block_size);
  4.1259 -+    clo->head.num_blocks=ntohl(clo->head.num_blocks);
  4.1260 -+    clo->header_first =  (i==0) ? 1 : 0;
  4.1261 -+    printk(KERN_INFO "%s: file %s, %d blocks of %d bytes, header %s.\n", cloop_name, clo->underlying_filename, clo->head.num_blocks, clo->head.block_size, (i==0)?"first":"last");
  4.1262 -+    break;
  4.1263 -+   }
  4.1264 -+  }
  4.1265 -+ if (clo->file_format == 0)
  4.1266 -+  {
  4.1267 -+   printk(KERN_ERR "%s: Cannot detect %s format.\n",
  4.1268 -+                   cloop_name, cloop_name);
  4.1269 -+       error=-EBADF; goto error_release;
  4.1270 -+  }
  4.1271 -+ if (clo->head.block_size % 512 != 0)
  4.1272 -+  {
  4.1273 -+   printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
  4.1274 -+          cloop_name, clo->head.block_size);
  4.1275 -+   error=-EBADF; goto error_release;
  4.1276 -+  }
  4.1277 -+ total_offsets=clo->head.num_blocks;
  4.1278 -+ if (!isblkdev && (sizeof(struct cloop_head)+sizeof(struct block_info)*
  4.1279 -+                      total_offsets > inode->i_size))
  4.1280 -+  {
  4.1281 -+   printk(KERN_ERR "%s: file %s too small for %u blocks\n",
  4.1282 -+          cloop_name, clo->underlying_filename, clo->head.num_blocks);
  4.1283 -+   error=-EBADF; goto error_release;
  4.1284 -+  }
  4.1285 +- clo->block_ptrs = cloop_malloc(sizeof(cloop_block_ptr) * total_offsets);
  4.1286 +- if (!clo->block_ptrs)
  4.1287  + /* Allocate Memory for decompressors */
  4.1288  +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  4.1289  + clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
  4.1290  + if(!clo->zstream.workspace)
  4.1291 -+  {
  4.1292 +   {
  4.1293 +-   printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
  4.1294  +   printk(KERN_ERR "%s: out of mem for zlib working area %u\n",
  4.1295  +          cloop_name, zlib_inflate_workspacesize());
  4.1296      error=-ENOMEM; goto error_release;
  4.1297     }
  4.1298 -- total_offsets = 1; /* Dummy total_offsets: will be filled in first time around */
  4.1299 -- for (i = 0, offsets_read = 0; offsets_read < total_offsets; i++)
  4.1300 +- /* Read them offsets! */
  4.1301 +- if(clo->header_first)
  4.1302  + zlib_inflateInit(&clo->zstream);
  4.1303  +#endif
  4.1304  +#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  4.1305 @@ -890,33 +713,20 @@
  4.1306  +#endif
  4.1307  + if (total_offsets + 1 == 0) /* Version 3 */
  4.1308     {
  4.1309 --   unsigned int offset = 0, num_readable;
  4.1310 --   size_t bytes_read = cloop_read_from_file(clo, file, bbuf,
  4.1311 --                                          i*clo->underlying_blksize,
  4.1312 --                                          clo->underlying_blksize);
  4.1313 --   if(bytes_read != clo->underlying_blksize)
  4.1314 +-   fs_read_position = sizeof(struct cloop_head);
  4.1315  +   struct cloop_tail tail;
  4.1316  +   if (isblkdev)
  4.1317 -     {
  4.1318 --     printk(KERN_ERR "%s: Bad file, read() of first %lu bytes returned %d.\n",
  4.1319 --                   cloop_name, (unsigned long)clo->underlying_blksize, (int)bytes_read);
  4.1320 --     error=-EBADF;
  4.1321 --     goto error_release;
  4.1322 ++    {
  4.1323  +    /* No end of file: can't find index */
  4.1324  +     printk(KERN_ERR "%s: no V3 support for block device\n", 
  4.1325  +            cloop_name);
  4.1326  +     error=-EBADF; goto error_release;
  4.1327 -     }
  4.1328 --   /* Header will be in block zero */
  4.1329 --   if(i==0)
  4.1330 ++    }
  4.1331  +   bytes_read = cloop_read_from_file(clo, file, (void *) &tail,
  4.1332  +			inode->i_size - sizeof(struct cloop_tail),
  4.1333  +			sizeof(struct cloop_tail));
  4.1334  +   if (bytes_read == sizeof(struct cloop_tail))
  4.1335 -     {
  4.1336 --     memcpy(&clo->head, bbuf, sizeof(struct cloop_head));
  4.1337 --     offset = sizeof(struct cloop_head);
  4.1338 --     if (ntohl(clo->head.block_size) % 512 != 0)
  4.1339 ++    {
  4.1340  +     unsigned long len, zlen;
  4.1341  +     int ret;
  4.1342  +     void *zbuf;
  4.1343 @@ -926,79 +736,47 @@
  4.1344  +     zlen = ntohl(tail.table_size);
  4.1345  +     zbuf = cloop_malloc(zlen);
  4.1346  +     if (!clo->block_ptrs || !zbuf)
  4.1347 -       {
  4.1348 --       printk(KERN_ERR "%s: blocksize %u not multiple of 512\n",
  4.1349 --              cloop_name, ntohl(clo->head.block_size));
  4.1350 --       error=-EBADF; goto error_release;
  4.1351 --      }
  4.1352 --     if (clo->head.preamble[0x0B]!='V'||clo->head.preamble[0x0C]<'1')
  4.1353 --      {
  4.1354 --       printk(KERN_ERR "%s: Cannot read old 32-bit (version 0.68) images, "
  4.1355 --		       "please use an older version of %s for this file.\n",
  4.1356 --		       cloop_name, cloop_name);
  4.1357 --       error=-EBADF; goto error_release;
  4.1358 ++      {
  4.1359  +       printk(KERN_ERR "%s: out of kernel mem for index\n", cloop_name);
  4.1360  +       error=-ENOMEM; goto error_release;
  4.1361 -       }
  4.1362 --     if (clo->head.preamble[0x0C]<'2')
  4.1363 ++      }
  4.1364  +     bytes_read = cloop_read_from_file(clo, file, zbuf,
  4.1365  +			inode->i_size - zlen - sizeof(struct cloop_tail),
  4.1366  +			zlen);
  4.1367  +     if (bytes_read != zlen)
  4.1368 -       {
  4.1369 --       printk(KERN_ERR "%s: Cannot read old architecture-dependent "
  4.1370 --		       "(format <= 1.0) images, please use an older "
  4.1371 --		       "version of %s for this file.\n",
  4.1372 --		       cloop_name, cloop_name);
  4.1373 ++      {
  4.1374  +       printk(KERN_ERR "%s: can't read index\n", cloop_name);
  4.1375 -        error=-EBADF; goto error_release;
  4.1376 -       }
  4.1377 --     total_offsets=ntohl(clo->head.num_blocks)+1;
  4.1378 --     if (!isblkdev && (sizeof(struct cloop_head)+sizeof(loff_t)*
  4.1379 --                       total_offsets > inode->i_size))
  4.1380 ++       error=-EBADF; goto error_release;
  4.1381 ++      }
  4.1382  +     len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
  4.1383 -+     flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
  4.1384 -+// May  3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
  4.1385 -+printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 
  4.1386 -+		clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
  4.1387 -+     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
  4.1388 -+// May  3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at   (null)
  4.1389 -+printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
  4.1390 ++     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
  4.1391  +     cloop_free(zbuf, zlen);
  4.1392  +     if (ret != 0)
  4.1393 -       {
  4.1394 --       printk(KERN_ERR "%s: file too small for %u blocks\n",
  4.1395 --              cloop_name, ntohl(clo->head.num_blocks));
  4.1396 -+        printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
  4.1397 -+               cloop_name, ret, flags);
  4.1398 -        error=-EBADF; goto error_release;
  4.1399 -       }
  4.1400 --     clo->offsets = cloop_malloc(sizeof(loff_t) * total_offsets);
  4.1401 --     if (!clo->offsets)
  4.1402 --      {
  4.1403 --       printk(KERN_ERR "%s: out of kernel mem for offsets\n", cloop_name);
  4.1404 --       error=-ENOMEM; goto error_release;
  4.1405 --      }
  4.1406 -     }
  4.1407 --   num_readable = MIN(total_offsets - offsets_read,
  4.1408 --                      (clo->underlying_blksize - offset) 
  4.1409 --                      / sizeof(loff_t));
  4.1410 --   memcpy(&clo->offsets[offsets_read], bbuf+offset, num_readable * sizeof(loff_t));
  4.1411 --   offsets_read += num_readable;
  4.1412 --  }
  4.1413 --  { /* Search for largest block rather than estimate. KK. */
  4.1414 --   int i;
  4.1415 --   for(i=0;i<total_offsets-1;i++)
  4.1416 ++      {
  4.1417 ++        printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
  4.1418 ++               cloop_name, ret);
  4.1419 ++       error=-EBADF; goto error_release;
  4.1420 ++      }
  4.1421 ++    }
  4.1422  +   else
  4.1423  +    {
  4.1424  +     printk(KERN_ERR "%s: can't find index\n", cloop_name);
  4.1425  +     error=-ENOMEM; goto error_release;
  4.1426  +    }
  4.1427 -+  }
  4.1428 -+ else
  4.1429 -+  {
  4.1430 +   }
  4.1431 +  else
  4.1432 +   {
  4.1433 +-   fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_offsets * sizeof(loff_t);
  4.1434 +-  }
  4.1435 +- for(offsets_read=0;offsets_read<total_offsets;)
  4.1436 +-  {
  4.1437 +-   size_t bytes_readable;
  4.1438 +-   unsigned int num_readable, offset = 0;
  4.1439 +-   bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position);
  4.1440 +-   if(bytes_readable <= 0) break; /* Done */
  4.1441 +-   bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
  4.1442 +-   if(bytes_read != bytes_readable)
  4.1443  +   unsigned int n, total_bytes;
  4.1444 -+   flags = 0;
  4.1445  +   clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
  4.1446  +   if (!clo->block_ptrs)
  4.1447  +    {
  4.1448 @@ -1007,14 +785,26 @@
  4.1449  +    }
  4.1450  +   /* Read them offsets! */
  4.1451  +   if(clo->header_first)
  4.1452 -+    {
  4.1453 +     {
  4.1454 +-     printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
  4.1455 +-            cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
  4.1456 +-     error=-EBADF;
  4.1457 +-     goto error_release;
  4.1458  +     total_bytes = total_offsets * sizeof(struct block_info);
  4.1459  +     fs_read_position = sizeof(struct cloop_head);
  4.1460 -+    }
  4.1461 +     }
  4.1462 +-   /* remember where to read the next blk from file */
  4.1463 +-   fs_read_position += bytes_read;
  4.1464 +-   /* calculate how many offsets can be taken from current bbuf */
  4.1465 +-   num_readable = MIN(total_offsets - offsets_read,
  4.1466 +-                      bytes_read / sizeof(loff_t));
  4.1467 +-   DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1);
  4.1468 +-   for (i=0,offset=0; i<num_readable; i++)
  4.1469  +   else
  4.1470       {
  4.1471 --     loff_t d=be64_to_cpu(clo->offsets[i+1]) - be64_to_cpu(clo->offsets[i]);
  4.1472 --     clo->largest_block=MAX(clo->largest_block,d);
  4.1473 +-     loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) );
  4.1474 +-     if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp);
  4.1475 +-     if(offsets_read > 0)
  4.1476  +     total_bytes = total_offsets * sizeof(loff_t);
  4.1477  +     fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes;
  4.1478  +    }
  4.1479 @@ -1025,35 +815,28 @@
  4.1480  +     if(bytes_readable <= 0) break; /* Done */
  4.1481  +     bytes_read = cloop_read_from_file(clo, file, bbuf, fs_read_position, bytes_readable);
  4.1482  +     if(bytes_read != bytes_readable)
  4.1483 -+      {
  4.1484 +       {
  4.1485 +-       loff_t d = CLOOP_BLOCK_OFFSET(tmp) - CLOOP_BLOCK_OFFSET(clo->block_ptrs[offsets_read-1]);
  4.1486 +-       if(d > clo->largest_block) clo->largest_block = d;
  4.1487  +       printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n",
  4.1488  +              cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read);
  4.1489  +       error=-EBADF;
  4.1490  +       goto error_release;
  4.1491 -+      }
  4.1492 +       }
  4.1493 +-     clo->block_ptrs[offsets_read++] = tmp;
  4.1494 +-     offset += sizeof(loff_t);
  4.1495  +     memcpy(((char *)clo->block_ptrs) + n, bbuf, bytes_read);
  4.1496  +     /* remember where to read the next blk from file */
  4.1497  +     fs_read_position += bytes_read;
  4.1498  +     n += bytes_read;
  4.1499       }
  4.1500 --   printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  4.1501 --          cloop_name, filename, ntohl(clo->head.num_blocks),
  4.1502 --          ntohl(clo->head.block_size), clo->largest_block);
  4.1503     }
  4.1504 --/* Combo kmalloc used too large chunks (>130000). */
  4.1505 +-  printk(KERN_INFO "%s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  4.1506 +-         cloop_name, clo->underlying_filename, clo->head.num_blocks,
  4.1507 +-         clo->head.block_size, clo->largest_block);
  4.1508    {
  4.1509     int i;
  4.1510 --  for(i=0;i<BUFFERED_BLOCKS;i++)
  4.1511 --   {
  4.1512 --    clo->buffer[i] = cloop_malloc(ntohl(clo->head.block_size));
  4.1513 --    if(!clo->buffer[i])
  4.1514 --     {
  4.1515 --      printk(KERN_ERR "%s: out of memory for buffer %lu\n",
  4.1516 --             cloop_name, (unsigned long) ntohl(clo->head.block_size));
  4.1517 --      error=-ENOMEM; goto error_release_free;
  4.1518 --     }
  4.1519 --   }
  4.1520 -+  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
  4.1521 ++  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
  4.1522  +  clo->largest_block = 0;
  4.1523  +  for (i = 0; i < clo->head.num_blocks; i++)
  4.1524  +    if (clo->block_ptrs[i].size > clo->largest_block)
  4.1525 @@ -1061,39 +844,15 @@
  4.1526  +  printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  4.1527  +         cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
  4.1528  +         clo->head.block_size, clo->largest_block);
  4.1529 -+ }
  4.1530 -+ {
  4.1531 -+  int i;
  4.1532 -+  clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  4.1533 -+                              (buffers / clo->head.block_size) : 1;
  4.1534 -+  clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  4.1535 -+  clo->buffer = cloop_malloc(clo->num_buffered_blocks * sizeof (char*));
  4.1536 -+  if (!clo->buffered_blocknum || !clo->buffer)
  4.1537 -+  {
  4.1538 -+   printk(KERN_ERR "%s: out of memory for index of cache buffer (%lu bytes)\n",
  4.1539 -+                    cloop_name, (unsigned long)clo->num_buffered_blocks * sizeof (u_int32_t) + sizeof(char*) );
  4.1540 -+                    error=-ENOMEM; goto error_release;
  4.1541 -+  }
  4.1542 -+  memset(clo->buffer, 0, clo->num_buffered_blocks * sizeof (char*));
  4.1543 -+  for(i=0;i<clo->num_buffered_blocks;i++)
  4.1544 -+  {
  4.1545 -+   clo->buffered_blocknum[i] = -1;
  4.1546 -+   clo->buffer[i] = cloop_malloc(clo->head.block_size);
  4.1547 -+   if(!clo->buffer[i])
  4.1548 -+    {
  4.1549 -+     printk(KERN_ERR "%s: out of memory for cache buffer %lu\n",
  4.1550 -+            cloop_name, (unsigned long) clo->head.block_size);
  4.1551 -+     error=-ENOMEM; goto error_release_free;
  4.1552 -+    }
  4.1553 -+  }
  4.1554 -+  clo->current_bufnum = 0;
  4.1555 -  }
  4.1556 -  clo->compressed_buffer = cloop_malloc(clo->largest_block);
  4.1557 -  if(!clo->compressed_buffer)
  4.1558 -@@ -557,31 +803,7 @@
  4.1559 +   clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  4.1560 +                               (buffers / clo->head.block_size) : 1;
  4.1561 +   clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  4.1562 +@@ -705,36 +793,14 @@
  4.1563             cloop_name, clo->largest_block);
  4.1564      error=-ENOMEM; goto error_release_free_buffer;
  4.1565     }
  4.1566 +- /* Allocate Memory for decompressors */
  4.1567 +-#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  4.1568  - clo->zstream.workspace = cloop_malloc(zlib_inflate_workspacesize());
  4.1569  - if(!clo->zstream.workspace)
  4.1570  -  {
  4.1571 @@ -1102,443 +861,48 @@
  4.1572  -   error=-ENOMEM; goto error_release_free_all;
  4.1573  -  }
  4.1574  - zlib_inflateInit(&clo->zstream);
  4.1575 -- if(!isblkdev &&
  4.1576 --    be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]) != inode->i_size)
  4.1577 --  {
  4.1578 --   printk(KERN_ERR "%s: final offset wrong (%Lu not %Lu)\n",
  4.1579 +-#endif
  4.1580 +-#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  4.1581 +-#if XZ_INTERNAL_CRC32
  4.1582 +-  /* This must be called before any other xz_* function to initialize the CRC32 lookup table. */
  4.1583 +-  xz_crc32_init(void);
  4.1584 +-#endif
  4.1585 +-  clo->xzdecoderstate = xz_dec_init(XZ_SINGLE, 0);
  4.1586 +-#endif
  4.1587 +- if(CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]) > clo->underlying_total_size)
  4.1588 ++ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  4.1589 ++ clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
  4.1590 ++ if(IS_ERR(clo->clo_thread))
  4.1591 +   {
  4.1592 +-   printk(KERN_ERR "%s: final offset wrong (%llu > %llu)\n",
  4.1593  -          cloop_name,
  4.1594 --          be64_to_cpu(clo->offsets[ntohl(clo->head.num_blocks)]),
  4.1595 --          inode->i_size);
  4.1596 +-	  CLOOP_BLOCK_OFFSET(clo->block_ptrs[clo->head.num_blocks]),
  4.1597 +-          clo->underlying_total_size);
  4.1598 +-#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  4.1599  -   cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace=NULL;
  4.1600 --   goto error_release_free_all;
  4.1601 --  }
  4.1602 -- {
  4.1603 --  int i;
  4.1604 --  for(i=0; i<BUFFERED_BLOCKS; i++) clo->buffered_blocknum[i] = -1;
  4.1605 --  clo->current_bufnum=0;
  4.1606 -- }
  4.1607 -- set_capacity(clo->clo_disk, (sector_t)(ntohl(clo->head.num_blocks)*
  4.1608 --              (ntohl(clo->head.block_size)>>9)));
  4.1609 -+ set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  4.1610 -  clo->clo_thread = kthread_create(cloop_thread, clo, "cloop%d", cloop_num);
  4.1611 -  if(IS_ERR(clo->clo_thread))
  4.1612 -   {
  4.1613 -@@ -591,17 +813,17 @@
  4.1614 +-#endif
  4.1615 ++   error = PTR_ERR(clo->clo_thread);
  4.1616 ++   clo->clo_thread=NULL;
  4.1617 +    goto error_release_free_all;
  4.1618     }
  4.1619 +- set_capacity(clo->clo_disk, (sector_t)(clo->head.num_blocks*(clo->head.block_size>>9)));
  4.1620    if(preload > 0)
  4.1621     {
  4.1622 --   clo->preload_array_size = ((preload<=ntohl(clo->head.num_blocks))?preload:ntohl(clo->head.num_blocks));
  4.1623 -+   clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
  4.1624 -    clo->preload_size = 0;
  4.1625 -    if((clo->preload_cache = cloop_malloc(clo->preload_array_size * sizeof(char *))) != NULL)
  4.1626 -     {
  4.1627 -      int i;
  4.1628 -      for(i=0; i<clo->preload_array_size; i++)
  4.1629 -       {
  4.1630 --       if((clo->preload_cache[i] = cloop_malloc(ntohl(clo->head.block_size))) == NULL)
  4.1631 -+       if((clo->preload_cache[i] = cloop_malloc(clo->head.block_size)) == NULL)
  4.1632 -         { /* Out of memory */
  4.1633 -          printk(KERN_WARNING "%s: cloop_malloc(%d) failed for preload_cache[%d] (ignored).\n",
  4.1634 --                             cloop_name, ntohl(clo->head.block_size), i);
  4.1635 -+                             cloop_name, clo->head.block_size, i);
  4.1636 - 	 break;
  4.1637 - 	}
  4.1638 -       }
  4.1639 -@@ -612,13 +834,13 @@
  4.1640 -        if(buffered_blocknum >= 0)
  4.1641 -         {
  4.1642 - 	 memcpy(clo->preload_cache[i], clo->buffer[buffered_blocknum],
  4.1643 --	        ntohl(clo->head.block_size));
  4.1644 -+	        clo->head.block_size);
  4.1645 - 	}
  4.1646 -        else
  4.1647 -         {
  4.1648 -          printk(KERN_WARNING "%s: can't read block %d into preload cache, set to zero.\n",
  4.1649 - 	                     cloop_name, i);
  4.1650 --	 memset(clo->preload_cache[i], 0, ntohl(clo->head.block_size));
  4.1651 -+	 memset(clo->preload_cache[i], 0, clo->head.block_size);
  4.1652 - 	}
  4.1653 -       }
  4.1654 -      printk(KERN_INFO "%s: preloaded %d blocks into cache.\n", cloop_name,
  4.1655 -@@ -641,22 +863,19 @@
  4.1656 -  cloop_free(clo->compressed_buffer, clo->largest_block);
  4.1657 -  clo->compressed_buffer=NULL;
  4.1658 - error_release_free_buffer:
  4.1659 -+ if(clo->buffer)
  4.1660 -  {
  4.1661 -   int i;
  4.1662 --  for(i=0; i<BUFFERED_BLOCKS; i++)
  4.1663 --   { 
  4.1664 --    if(clo->buffer[i])
  4.1665 --     {
  4.1666 --      cloop_free(clo->buffer[i], ntohl(clo->head.block_size));
  4.1667 --      clo->buffer[i]=NULL;
  4.1668 --     }
  4.1669 --   }
  4.1670 -+  for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) { cloop_free(clo->buffer[i], clo->head.block_size); clo->buffer[i]=NULL; }}
  4.1671 -+  cloop_free(clo->buffer, clo->num_buffered_blocks*sizeof(char*)); clo->buffer=NULL;
  4.1672 +    clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks);
  4.1673 +@@ -780,6 +846,7 @@
  4.1674 +      clo->preload_array_size = clo->preload_size = 0;
  4.1675 +     }
  4.1676 +   }
  4.1677 ++ wake_up_process(clo->clo_thread);
  4.1678 +  /* Uncheck */
  4.1679 +  return error;
  4.1680 + error_release_free_all:
  4.1681 +@@ -794,9 +861,13 @@
  4.1682    }
  4.1683 -+ if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
  4.1684 +  if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; }
  4.1685   error_release_free:
  4.1686 -- cloop_free(clo->offsets, sizeof(loff_t) * total_offsets);
  4.1687 -- clo->offsets=NULL;
  4.1688 +- cloop_free(clo->block_ptrs, sizeof(cloop_block_ptr) * total_offsets);
  4.1689  + cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
  4.1690 -+ clo->block_ptrs=NULL;
  4.1691 - error_release:
  4.1692 -  if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
  4.1693 -+ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  4.1694 -  clo->backing_file=NULL;
  4.1695 -  return error;
  4.1696 - }
  4.1697 -@@ -673,7 +892,7 @@
  4.1698 -  if(clo->backing_file) return -EBUSY;
  4.1699 -  file = fget(arg); /* get filp struct from ioctl arg fd */
  4.1700 -  if(!file) return -EBADF;
  4.1701 -- error=cloop_set_file(cloop_num,file,"losetup_file");
  4.1702 -+ error=cloop_set_file(cloop_num,file);
  4.1703 -  set_device_ro(bdev, 1);
  4.1704 -  if(error) fput(file);
  4.1705 -  return error;
  4.1706 -@@ -684,29 +903,48 @@
  4.1707 - {
  4.1708 -  struct cloop_device *clo = cloop_dev[cloop_num];
  4.1709 -  struct file *filp = clo->backing_file;
  4.1710 -- int i;
  4.1711 -  if(clo->refcnt > 1)	/* we needed one fd for the ioctl */
  4.1712 -    return -EBUSY;
  4.1713 -  if(filp==NULL) return -EINVAL;
  4.1714 -  if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
  4.1715 -- if(filp!=initial_file) fput(filp);
  4.1716 -- else { filp_close(initial_file,0); initial_file=NULL; }
  4.1717 -+ if(filp!=initial_file)
  4.1718 -+  fput(filp);
  4.1719 -+ else
  4.1720 -+ {
  4.1721 -+  filp_close(initial_file,0);
  4.1722 -+  initial_file=NULL;
  4.1723 -+ }
  4.1724 -  clo->backing_file  = NULL;
  4.1725 -  clo->backing_inode = NULL;
  4.1726 -- if(clo->offsets) { cloop_free(clo->offsets, clo->underlying_blksize); clo->offsets = NULL; }
  4.1727 -+ if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  4.1728 -+ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
  4.1729 -  if(clo->preload_cache)
  4.1730 --  {
  4.1731 --   for(i=0; i < clo->preload_size; i++)
  4.1732 --    cloop_free(clo->preload_cache[i], ntohl(clo->head.block_size));
  4.1733 --   cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
  4.1734 --   clo->preload_cache = NULL;
  4.1735 --   clo->preload_size = clo->preload_array_size = 0;
  4.1736 --  }
  4.1737 -- for(i=0; i<BUFFERED_BLOCKS; i++)
  4.1738 --      if(clo->buffer[i]) { cloop_free(clo->buffer[i], ntohl(clo->head.block_size)); clo->buffer[i]=NULL; }
  4.1739 -+ {
  4.1740 -+  int i;
  4.1741 -+  for(i=0; i < clo->preload_size; i++)
  4.1742 -+   cloop_free(clo->preload_cache[i], clo->head.block_size);
  4.1743 -+  cloop_free(clo->preload_cache, clo->preload_array_size * sizeof(char *));
  4.1744 -+  clo->preload_cache = NULL;
  4.1745 -+  clo->preload_size = clo->preload_array_size = 0;
  4.1746 -+ }
  4.1747 -+ if (clo->buffered_blocknum)
  4.1748 -+ {
  4.1749 -+  cloop_free(clo->buffered_blocknum, sizeof(int) * clo->num_buffered_blocks); clo->buffered_blocknum = NULL;
  4.1750 -+ }
  4.1751 -+ if (clo->buffer)
  4.1752 -+ {
  4.1753 -+  int i;
  4.1754 -+  for(i=0; i<clo->num_buffered_blocks; i++) { if(clo->buffer[i]) cloop_free(clo->buffer[i], clo->head.block_size); }
  4.1755 -+  cloop_free(clo->buffer, sizeof(char*) * clo->num_buffered_blocks); clo->buffer = NULL;
  4.1756 -+ }
  4.1757 -  if(clo->compressed_buffer) { cloop_free(clo->compressed_buffer, clo->largest_block); clo->compressed_buffer = NULL; }
  4.1758 -+#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  4.1759 -  zlib_inflateEnd(&clo->zstream);
  4.1760 -  if(clo->zstream.workspace) { cloop_free(clo->zstream.workspace, zlib_inflate_workspacesize()); clo->zstream.workspace = NULL; }
  4.1761 -+#endif
  4.1762 -+#if (defined(CONFIG_DECOMPRESS_XZ) || defined(CONFIG_DECOMPRESS_XZ_MODULE))
  4.1763 -+  xz_dec_end(clo->xzdecoderstate);
  4.1764 -+#endif
  4.1765 -  if(bdev) invalidate_bdev(bdev);
  4.1766 -  if(clo->clo_disk) set_capacity(clo->clo_disk, 0);
  4.1767 -  return 0;
  4.1768 -@@ -731,8 +969,8 @@
  4.1769 -                             const struct loop_info64 *info)
  4.1770 - {
  4.1771 -  if (!clo->backing_file) return -ENXIO;
  4.1772 -- memcpy(clo->clo_file_name, info->lo_file_name, LO_NAME_SIZE);
  4.1773 -- clo->clo_file_name[LO_NAME_SIZE-1] = 0;
  4.1774 -+ if(clo->underlying_filename) kfree(clo->underlying_filename);
  4.1775 -+ clo->underlying_filename = kstrdup(info->lo_file_name, GFP_KERNEL);
  4.1776 -  return 0;
  4.1777 - }
  4.1778 - 
  4.1779 -@@ -743,7 +981,11 @@
  4.1780 -  struct kstat stat;
  4.1781 -  int err;
  4.1782 -  if (!file) return -ENXIO;
  4.1783 -- err = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat);
  4.1784 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
  4.1785 -+ err = vfs_getattr(&file->f_path, &stat);
  4.1786 -+#else
  4.1787 -+ err = vfs_getattr(&file->f_path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
  4.1788 -+#endif
  4.1789 -  if (err) return err;
  4.1790 -  memset(info, 0, sizeof(*info));
  4.1791 -  info->lo_number  = clo->clo_number;
  4.1792 -@@ -753,7 +995,8 @@
  4.1793 -  info->lo_offset  = 0;
  4.1794 -  info->lo_sizelimit = 0;
  4.1795 -  info->lo_flags   = 0;
  4.1796 -- memcpy(info->lo_file_name, clo->clo_file_name, LO_NAME_SIZE);
  4.1797 -+ strncpy(info->lo_file_name, clo->underlying_filename, LO_NAME_SIZE);
  4.1798 -+ info->lo_file_name[LO_NAME_SIZE-1]=0;
  4.1799 -  return 0;
  4.1800 - }
  4.1801 - 
  4.1802 -@@ -833,8 +1076,6 @@
  4.1803 -  if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT;
  4.1804 -  return err;
  4.1805 - }
  4.1806 --/* EOF get/set_status */
  4.1807 --
  4.1808 - 
  4.1809 - static int cloop_ioctl(struct block_device *bdev, fmode_t mode,
  4.1810 - 	unsigned int cmd, unsigned long arg)
  4.1811 -@@ -914,21 +1155,20 @@
  4.1812 -  /* losetup uses write-open and flags=0x8002 to set a new file */
  4.1813 -  if(mode & FMODE_WRITE)
  4.1814 -   {
  4.1815 --   printk(KERN_WARNING "%s: Can't open device read-write in mode 0x%x\n", cloop_name, mode);
  4.1816 -+   printk(KERN_INFO "%s: Open in read-write mode 0x%x requested, ignored.\n", cloop_name, mode);
  4.1817 -    return -EROFS;
  4.1818 -   }
  4.1819 -  cloop_dev[cloop_num]->refcnt+=1;
  4.1820 -  return 0;
  4.1821 - }
  4.1822 - 
  4.1823 --static int cloop_close(struct gendisk *disk, fmode_t mode)
  4.1824 -+static void cloop_close(struct gendisk *disk, fmode_t mode)
  4.1825 - {
  4.1826 -- int cloop_num, err=0;
  4.1827 -- if(!disk) return 0;
  4.1828 -+ int cloop_num;
  4.1829 -+ if(!disk) return;
  4.1830 -  cloop_num=((struct cloop_device *)disk->private_data)->clo_number;
  4.1831 -- if(cloop_num < 0 || cloop_num > (cloop_count-1)) return 0;
  4.1832 -+ if(cloop_num < 0 || cloop_num > (cloop_count-1)) return;
  4.1833 -  cloop_dev[cloop_num]->refcnt-=1;
  4.1834 -- return err;
  4.1835 - }
  4.1836 - 
  4.1837 - static struct block_device_operations clo_fops =
  4.1838 -@@ -973,6 +1213,10 @@
  4.1839 -    goto error_out;
  4.1840 -   }
  4.1841 -  clo->clo_queue->queuedata = clo;
  4.1842 -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  4.1843 -+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
  4.1844 -+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
  4.1845 -+#endif
  4.1846 -  clo->clo_disk = alloc_disk(1);
  4.1847 -  if(!clo->clo_disk)
  4.1848 -   {
  4.1849 -@@ -1004,6 +1248,11 @@
  4.1850 -  cloop_dev[cloop_num] = NULL;
  4.1851 - }
  4.1852 - 
  4.1853 -+/* LZ4 Stuff */
  4.1854 -+#if (defined USE_LZ4_INTERNAL)
  4.1855 -+#include "lz4_kmod.c"
  4.1856 -+#endif
  4.1857 -+
  4.1858 - static int __init cloop_init(void)
  4.1859 - {
  4.1860 -  int error=0;
  4.1861 -@@ -1044,7 +1293,7 @@
  4.1862 -      initial_file=NULL; /* if IS_ERR, it's NOT open. */
  4.1863 -     }
  4.1864 -    else
  4.1865 --     error=cloop_set_file(0,initial_file,file);
  4.1866 -+     error=cloop_set_file(0,initial_file);
  4.1867 -    if(error)
  4.1868 -     {
  4.1869 -      printk(KERN_ERR
  4.1870 -@@ -1052,9 +1301,6 @@
  4.1871 -             cloop_name, file, error);
  4.1872 -      goto init_out_dealloc;
  4.1873 -     }
  4.1874 --   if(namelen >= LO_NAME_SIZE) namelen = LO_NAME_SIZE-1;
  4.1875 --   memcpy(cloop_dev[0]->clo_file_name, file, namelen);
  4.1876 --   cloop_dev[0]->clo_file_name[namelen] = 0;
  4.1877 -   }
  4.1878 -  return 0;
  4.1879 - init_out_dealloc:
  4.1880 ---- cloop.h
  4.1881 -+++ cloop.h
  4.1882 -@@ -86,11 +86,8 @@
  4.1883 - struct cloop_tail
  4.1884 - {
  4.1885 - 	u_int32_t table_size; 
  4.1886 --	u_int32_t index_size; /* size:4 comp:3 ctrl-c:1 lastlen:24 */
  4.1887 -+	u_int32_t index_size; /* size:4 unused:3 ctrl-c:1 lastlen:24 */
  4.1888 - #define CLOOP3_INDEX_SIZE(x)    ((unsigned int)((x) & 0xF))
  4.1889 --#define CLOOP3_BLOCKS_FLAGS(x)  ((unsigned int)((x) & 0x70) >> 4)
  4.1890 --#define CLOOP3_TRUNCATED(x)     ((unsigned int)((x) & 0x80) >> 7)
  4.1891 --#define CLOOP3_LASTLEN(x)       (unsigned int)((x) >> 8)
  4.1892 - 	u_int32_t num_blocks;
  4.1893 - };
  4.1894 - 
  4.1895 -@@ -104,8 +101,10 @@
  4.1896 - };
  4.1897 - 
  4.1898 - static inline char *build_index(struct block_info *offsets, unsigned long n, 
  4.1899 --			unsigned long block_size, unsigned global_flags)
  4.1900 -+			unsigned long block_size)
  4.1901 - {
  4.1902 -+	static char v[11];
  4.1903 -+	u_int32_t flags = 0;
  4.1904 - 	u_int32_t *ofs32 = (u_int32_t *) offsets;
  4.1905 - 	loff_t    *ofs64 = (loff_t *) offsets;
  4.1906 - 
  4.1907 -@@ -130,8 +129,6 @@
  4.1908 - 		}
  4.1909 - 		else { /* V2.0/V4.0 */
  4.1910 - 			loff_t last = CLOOP_BLOCK_OFFSET(__be64_to_cpu(ofs64[n]));
  4.1911 --			u_int32_t flags;
  4.1912 --			static char v4[11];
  4.1913 - 			unsigned long i = n;
  4.1914 - 
  4.1915 - 			for (flags = 0; n-- ;) {
  4.1916 -@@ -149,12 +146,7 @@
  4.1917 - 					offsets[i] = offsets[offsets[i].offset];
  4.1918 - 				}
  4.1919 - 			}
  4.1920 --			strcpy(v4, (char *) "64BE v4.0a");
  4.1921 --			v4[10] = 'a' + ((flags-1) & 0xF);	// compressors used
  4.1922 --			if (flags > 0x10) {			// with links ?
  4.1923 --				v4[10] += 'A' - 'a';
  4.1924 --			}
  4.1925 --			return v4;
  4.1926 -+			strcpy(v, (char *) "64BE v4.0a");
  4.1927 - 		}
  4.1928 - 	}
  4.1929 - 	else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
  4.1930 -@@ -170,7 +162,6 @@
  4.1931 - 	else { /* V3.0 or V0.68 */
  4.1932 - 		unsigned long i;
  4.1933 - 		loff_t j;
  4.1934 --		static char v3[11];
  4.1935 - 		
  4.1936 - 		for (i = 0; i < n && ntohl(ofs32[i]) < ntohl(ofs32[i+1]); i++);
  4.1937 - 		if (i == n && ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
  4.1938 -@@ -185,28 +176,33 @@
  4.1939 - 		}
  4.1940 - 		
  4.1941 - 		v3_64 = (ofs32[1] == 0);
  4.1942 --		for (i = n; i-- != 0; )
  4.1943 -+		for (i = n; i-- != 0; ) {
  4.1944 - 			offsets[i].size = ntohl(ofs32[i << v3_64]); 
  4.1945 --		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
  4.1946 --			offsets[i].offset = j;
  4.1947 --			offsets[i].flags = global_flags;
  4.1948 - 			if (offsets[i].size == 0xFFFFFFFF) {
  4.1949 --				offsets[i].flags = CLOOP_COMPRESSOR_NONE;
  4.1950 --				offsets[i].size = block_size;
  4.1951 -+				offsets[i].size = 0x10000000 | block_size;
  4.1952 - 			}
  4.1953 --			if ((offsets[i].size & 0x80000000) == 0) {
  4.1954 -+			offsets[i].flags = (offsets[i].size >> 28);
  4.1955 -+			offsets[i].size &= 0x0FFFFFFF; 
  4.1956 -+		}
  4.1957 -+		for (i = 0, j = sizeof(struct cloop_head); i < n; i++) {
  4.1958 -+			offsets[i].offset = j;
  4.1959 -+			if (offsets[i].flags < 8) {
  4.1960 - 				j += offsets[i].size;
  4.1961 - 			}
  4.1962 - 		}
  4.1963 - 		for (i = 0; i < n; i++) {
  4.1964 --			if (offsets[i].size & 0x80000000) {
  4.1965 --				offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
  4.1966 -+			flags |= 1 << offsets[i].flags;
  4.1967 -+			if (offsets[i].flags >= 8) {
  4.1968 -+				offsets[i] = offsets[offsets[i].size];
  4.1969 - 			}
  4.1970 - 		}
  4.1971 --		strcpy(v3, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
  4.1972 --		v3[10] += global_flags;
  4.1973 --		return v3;
  4.1974 -+		strcpy(v, (char *) (v3_64) ? "64BE v3.0a" : "32BE v3.0a");
  4.1975 -+	}
  4.1976 -+	v[10] = 'a' + ((flags-1) & 0xF);	// compressors used
  4.1977 -+	if (flags > 0x10) {			// with links ?
  4.1978 -+		v[10] += 'A' - 'a';
  4.1979 - 	}
  4.1980 -+	return v;
  4.1981 - }
  4.1982 - 
  4.1983 - /* Cloop suspend IOCTL */
  4.1984 ---- cloop.c
  4.1985 -+++ cloop.c
  4.1986 -@@ -542,7 +542,7 @@
  4.1987 -  const unsigned int header_size = sizeof(struct cloop_head);
  4.1988 -  unsigned int i, total_offsets=0;
  4.1989 -  loff_t fs_read_position = 0, header_pos[2];
  4.1990 -- int flags, isblkdev, bytes_read, error = 0;
  4.1991 -+ int isblkdev, bytes_read, error = 0;
  4.1992 -  if (clo->suspended) return error;
  4.1993 -  #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
  4.1994 -  inode = file->f_dentry->d_inode;
  4.1995 -@@ -698,18 +698,12 @@
  4.1996 -        error=-EBADF; goto error_release;
  4.1997 -       }
  4.1998 -      len = CLOOP3_INDEX_SIZE(ntohl(tail.index_size)) * total_offsets;
  4.1999 --     flags = CLOOP3_BLOCKS_FLAGS(ntohl(tail.index_size));
  4.2000 --// May  3 19:45:20 (none) user.info kernel: cloop: uncompress(clo=e0a78000, block_ptrs=e0c9c000, &len(1440)=ddc05e6c, zbuf=e0c9f000, zlen=43, flag=0)
  4.2001 --printk(KERN_INFO "%s: uncompress(clo=%p, block_ptrs=%p, &len(%ld)=%p, zbuf=%p, zlen=%ld, flag=%d)\n", cloop_name, 
  4.2002 --		clo, clo->block_ptrs, len, &len, zbuf, zlen, flags);
  4.2003 --     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, flags);
  4.2004 --// May  3 19:45:20 (none) user.alert kernel: BUG: unable to handle kernel NULL pointer dereference at   (null)
  4.2005 --printk(KERN_INFO "%s: uncompressed !\n", cloop_name);
  4.2006 -+     ret = uncompress(clo, (void *) clo->block_ptrs, &len, zbuf, zlen, CLOOP_COMPRESSOR_ZLIB);
  4.2007 -      cloop_free(zbuf, zlen);
  4.2008 -      if (ret != 0)
  4.2009 -       {
  4.2010 --        printk(KERN_ERR "%s: decompression error %i uncompressing index, flags %u\n",
  4.2011 --               cloop_name, ret, flags);
  4.2012 -+        printk(KERN_ERR "%s: decompression error %i uncompressing index\n",
  4.2013 -+               cloop_name, ret);
  4.2014 -        error=-EBADF; goto error_release;
  4.2015 -       }
  4.2016 -     }
  4.2017 -@@ -722,7 +716,6 @@
  4.2018 -  else
  4.2019 -   {
  4.2020 -    unsigned int n, total_bytes;
  4.2021 --   flags = 0;
  4.2022 -    clo->block_ptrs = cloop_malloc(sizeof(struct block_info) * total_offsets);
  4.2023 -    if (!clo->block_ptrs)
  4.2024 -     {
  4.2025 -@@ -761,7 +754,7 @@
  4.2026 -   }
  4.2027 -  {
  4.2028 -   int i;
  4.2029 --  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size, flags);
  4.2030 -+  char *version = build_index(clo->block_ptrs, clo->head.num_blocks, clo->head.block_size);
  4.2031 -   clo->largest_block = 0;
  4.2032 -   for (i = 0; i < clo->head.num_blocks; i++)
  4.2033 -     if (clo->block_ptrs[i].size > clo->largest_block)
  4.2034 -@@ -769,9 +762,6 @@
  4.2035 -   printk(KERN_INFO "%s: %s: %s: %u blocks, %u bytes/block, largest block is %lu bytes.\n",
  4.2036 -          cloop_name, clo->underlying_filename, version, clo->head.num_blocks,
  4.2037 -          clo->head.block_size, clo->largest_block);
  4.2038 -- }
  4.2039 -- {
  4.2040 --  int i;
  4.2041 -   clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ?
  4.2042 -                               (buffers / clo->head.block_size) : 1;
  4.2043 -   clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t));
  4.2044 -@@ -874,6 +864,10 @@
  4.2045 -  cloop_free(clo->block_ptrs, sizeof(struct block_info) * total_offsets);
  4.2046    clo->block_ptrs=NULL;
  4.2047   error_release:
  4.2048  +#if (defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE))
  4.2049 @@ -1548,3 +912,146 @@
  4.2050    if(bbuf) cloop_free(bbuf, clo->underlying_blksize);
  4.2051    if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  4.2052    clo->backing_file=NULL;
  4.2053 +@@ -829,6 +900,7 @@
  4.2054 +  if(clo->refcnt > 1)	/* we needed one fd for the ioctl */
  4.2055 +    return -EBUSY;
  4.2056 +  if(filp==NULL) return -EINVAL;
  4.2057 ++ if(clo->clo_thread) { kthread_stop(clo->clo_thread); clo->clo_thread=NULL; }
  4.2058 +  if(filp!=initial_file)
  4.2059 +   fput(filp);
  4.2060 +  else
  4.2061 +@@ -839,7 +911,7 @@
  4.2062 +  clo->backing_file  = NULL;
  4.2063 +  clo->backing_inode = NULL;
  4.2064 +  if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; }
  4.2065 +- if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks+1); clo->block_ptrs = NULL; }
  4.2066 ++ if(clo->block_ptrs) { cloop_free(clo->block_ptrs, clo->head.num_blocks); clo->block_ptrs = NULL; }
  4.2067 +  if(clo->preload_cache)
  4.2068 +  {
  4.2069 +   int i;
  4.2070 +@@ -1054,15 +1126,15 @@
  4.2071 +   case LOOP_CLR_FD:       /* Change arg */ 
  4.2072 +   case LOOP_GET_STATUS64: /* Change arg */ 
  4.2073 +   case LOOP_SET_STATUS64: /* Change arg */ 
  4.2074 +-    return cloop_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
  4.2075 ++	arg = (unsigned long) compat_ptr(arg);
  4.2076 +   case LOOP_SET_STATUS:   /* unchanged */
  4.2077 +   case LOOP_GET_STATUS:   /* unchanged */
  4.2078 +   case LOOP_SET_FD:       /* unchanged */
  4.2079 +   case LOOP_CHANGE_FD:    /* unchanged */
  4.2080 +-    return cloop_ioctl(bdev, mode, cmd, arg);
  4.2081 +-  default:
  4.2082 +-    return -ENOIOCTLCMD;
  4.2083 ++	return cloop_ioctl(bdev, mode, cmd, arg);
  4.2084 ++	break;
  4.2085 +  }
  4.2086 ++ return -ENOIOCTLCMD;
  4.2087 + }
  4.2088 + #endif
  4.2089 + 
  4.2090 +@@ -1093,7 +1165,7 @@
  4.2091 +  cloop_dev[cloop_num]->refcnt-=1;
  4.2092 + }
  4.2093 + 
  4.2094 +-static const struct block_device_operations clo_fops =
  4.2095 ++static struct block_device_operations clo_fops =
  4.2096 + {
  4.2097 +         owner:		THIS_MODULE,
  4.2098 +         open:           cloop_open,
  4.2099 +@@ -1105,12 +1177,6 @@
  4.2100 + 	/* locked_ioctl ceased to exist in 2.6.36 */
  4.2101 + };
  4.2102 + 
  4.2103 +-static const struct blk_mq_ops cloop_mq_ops = {
  4.2104 +-	.queue_rq       = cloop_queue_rq,
  4.2105 +-/*	.init_request	= cloop_init_request, */
  4.2106 +-/*	.complete	= cloop_complete_rq, */
  4.2107 +-};
  4.2108 +-
  4.2109 + static int cloop_register_blkdev(int major_nr)
  4.2110 + {
  4.2111 +  return register_blkdev(major_nr, cloop_name);
  4.2112 +@@ -1124,37 +1190,33 @@
  4.2113 + 
  4.2114 + static int cloop_alloc(int cloop_num)
  4.2115 + {
  4.2116 +- struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));
  4.2117 ++ struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));;
  4.2118 +  if(clo == NULL) goto error_out;
  4.2119 +  cloop_dev[cloop_num] = clo;
  4.2120 +  memset(clo, 0, sizeof(struct cloop_device));
  4.2121 +  clo->clo_number = cloop_num;
  4.2122 +- clo->tag_set.ops = &cloop_mq_ops;
  4.2123 +- clo->tag_set.nr_hw_queues = 1;
  4.2124 +- clo->tag_set.queue_depth = 128;
  4.2125 +- clo->tag_set.numa_node = NUMA_NO_NODE;
  4.2126 +- clo->tag_set.cmd_size = 0; /* No extra data needed */
  4.2127 +- /* BLK_MQ_F_BLOCKING is extremely important if we want to call blocking functions like vfs_read */
  4.2128 +- clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
  4.2129 +- clo->tag_set.driver_data = clo;
  4.2130 +- if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo;
  4.2131 +- clo->clo_queue = blk_mq_init_queue(&clo->tag_set);
  4.2132 +- if(IS_ERR(clo->clo_queue))
  4.2133 ++ clo->clo_thread = NULL;
  4.2134 ++ init_waitqueue_head(&clo->clo_event);
  4.2135 ++ spin_lock_init(&clo->queue_lock);
  4.2136 ++ mutex_init(&clo->clo_ctl_mutex);
  4.2137 ++ INIT_LIST_HEAD(&clo->clo_list);
  4.2138 ++ clo->clo_queue = blk_init_queue(cloop_do_request, &clo->queue_lock);
  4.2139 ++ if(!clo->clo_queue)
  4.2140 +   {
  4.2141 +    printk(KERN_ERR "%s: Unable to alloc queue[%d]\n", cloop_name, cloop_num);
  4.2142 +-   goto error_out_free_tags;
  4.2143 ++   goto error_out;
  4.2144 +   }
  4.2145 +  clo->clo_queue->queuedata = clo;
  4.2146 +- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS);
  4.2147 ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
  4.2148 ++ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue);
  4.2149 ++ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue);
  4.2150 ++#endif
  4.2151 +  clo->clo_disk = alloc_disk(1);
  4.2152 +  if(!clo->clo_disk)
  4.2153 +   {
  4.2154 +    printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num);
  4.2155 +-   goto error_out_free_queue;
  4.2156 ++   goto error_disk;
  4.2157 +   }
  4.2158 +- spin_lock_init(&clo->queue_lock);
  4.2159 +- mutex_init(&clo->clo_ctl_mutex);
  4.2160 +- mutex_init(&clo->clo_rq_mutex);
  4.2161 +  clo->clo_disk->major = cloop_major;
  4.2162 +  clo->clo_disk->first_minor = cloop_num;
  4.2163 +  clo->clo_disk->fops = &clo_fops;
  4.2164 +@@ -1163,12 +1225,8 @@
  4.2165 +  sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num);
  4.2166 +  add_disk(clo->clo_disk);
  4.2167 +  return 0;
  4.2168 +-error_out_free_queue:
  4.2169 ++error_disk:
  4.2170 +  blk_cleanup_queue(clo->clo_queue);
  4.2171 +-error_out_free_tags:
  4.2172 +- blk_mq_free_tag_set(&clo->tag_set);
  4.2173 +-error_out_free_clo:
  4.2174 +- cloop_free(clo, sizeof(struct cloop_device));
  4.2175 + error_out:
  4.2176 +  return -ENOMEM;
  4.2177 + }
  4.2178 +@@ -1179,7 +1237,6 @@
  4.2179 +  if(clo == NULL) return;
  4.2180 +  del_gendisk(clo->clo_disk);
  4.2181 +  blk_cleanup_queue(clo->clo_queue);
  4.2182 +- blk_mq_free_tag_set(&clo->tag_set);
  4.2183 +  put_disk(clo->clo_disk);
  4.2184 +  cloop_free(clo, sizeof(struct cloop_device));
  4.2185 +  cloop_dev[cloop_num] = NULL;
  4.2186 +--- cloop_suspend.c
  4.2187 ++++ cloop_suspend.c
  4.2188 +@@ -14,6 +14,7 @@
  4.2189 + #include <fcntl.h>
  4.2190 + #include <unistd.h>
  4.2191 + #include <stdio.h>
  4.2192 ++#include <stdint.h>
  4.2193 + 
  4.2194 + /* We don't use the structure, so that define does not hurt */
  4.2195 + #define dev_t int
     5.1 --- a/wbar/receipt	Sun May 08 13:06:36 2022 +0000
     5.2 +++ b/wbar/receipt	Sun May 08 16:45:21 2022 +0000
     5.3 @@ -8,7 +8,7 @@
     5.4  LICENSE="GPL"
     5.5  TARBALL="$PACKAGE-$VERSION.tbz2"
     5.6  WEB_SITE="https://github.com/rodolf0/wbar"
     5.7 -WGET_URL="http://www.tecapli.com.ar/warlock/$TARBALL"
     5.8 +WGET_URL="https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/$PACKAGE/$TARBALL"
     5.9  CONFIG_FILES="/etc/wbar/dot.wbar"
    5.10  TAGS="desktop launchbar"
    5.11  
     6.1 --- a/xa/receipt	Sun May 08 13:06:36 2022 +0000
     6.2 +++ b/xa/receipt	Sun May 08 16:45:21 2022 +0000
     6.3 @@ -10,7 +10,7 @@
     6.4  WEB_SITE="https://www.floodgap.com/retrotech/xa/"
     6.5  
     6.6  TARBALL="$PACKAGE-$VERSION.tar.gz"
     6.7 -WGET_URL="${WEB_SITE}dists/$TARBALL"
     6.8 +WGET_URL="${WEB_SITE}dists/unsupported/$TARBALL"
     6.9  
    6.10  # What is the latest version available today?
    6.11  current_version()
     7.1 --- a/xautomation/receipt	Sun May 08 13:06:36 2022 +0000
     7.2 +++ b/xautomation/receipt	Sun May 08 16:45:21 2022 +0000
     7.3 @@ -7,9 +7,8 @@
     7.4  MAINTAINER="pankso@slitaz.org"
     7.5  LICENSE="GPL2"
     7.6  TARBALL="$PACKAGE-$VERSION.tar.gz"
     7.7 -#WEB_SITE="http://hoopajoo.net/projects/xautomation.html"
     7.8 -WEB_SITE="https://sourceforge.net/projects/xautomation/"
     7.9 -WGET_URL="http://hoopajoo.net/static/projects/$TARBALL"
    7.10 +WEB_SITE="https://www.hoopajoo.net/projects/xautomation.html"
    7.11 +WGET_URL="https://www.hoopajoo.net/static/projects/$TARBALL"
    7.12  #HOST_ARCH="i486 arm"
    7.13  
    7.14  DEPENDS="xorg-libX11 libpng"
     8.1 --- a/xcursor-aero/receipt	Sun May 08 13:06:36 2022 +0000
     8.2 +++ b/xcursor-aero/receipt	Sun May 08 16:45:21 2022 +0000
     8.3 @@ -8,7 +8,7 @@
     8.4  LICENSE="GPL"
     8.5  WEB_SITE="https://www.gnome-look.org/p/999972/"
     8.6  TARBALL="$PACKAGE-$VERSION.tar.gz"
     8.7 -WGET_URL="http://www.infinality.net/files/aero.tar.gz"
     8.8 +WGET_URL="https://github.com/Infinality/mouse-cursors/raw/master/aero.tar.gz"
     8.9  TAGS="cursor-theme"
    8.10  
    8.11  DEPENDS="xorg-libXcursor"
     9.1 --- a/xcursor-comix/receipt	Sun May 08 13:06:36 2022 +0000
     9.2 +++ b/xcursor-comix/receipt	Sun May 08 16:45:21 2022 +0000
     9.3 @@ -1,7 +1,7 @@
     9.4  # SliTaz package receipt.
     9.5  
     9.6  PACKAGE="xcursor-comix"
     9.7 -VERSION="0.9.1"
     9.8 +VERSION="0.9.2"
     9.9  CATEGORY="customization"
    9.10  TAGS="cursor-theme"
    9.11  SHORT_DESC="Comix cursor theme."
    9.12 @@ -10,6 +10,7 @@
    9.13  WEB_SITE="https://limitland.de/comixcursors"
    9.14  
    9.15  TARBALL="ComixCursors-$VERSION.tar.bz2"
    9.16 +#WGET_URL="https://gitlab.com/limitland/comixcursors/-/archive/$VERSION/comixcursors-$VERSION.tar.bz2"
    9.17  WGET_URL="https://limitland.gitlab.io/comixcursors/$TARBALL"
    9.18  
    9.19  DEPENDS="xorg-libXcursor"