wok-6.x rev 7650
Add squashfs+xz to kernel. This allows for greater compressing then lzma and it will work without cause a kernel opps. As long as the custom code for freeing lzma initramfs stays with lzma initramfs.
author | Christopher Rogers <slaxemulator@gmail.com> |
---|---|
date | Tue Dec 14 21:45:09 2010 +0000 (2010-12-14) |
parents | 13e4d4e6fcc1 |
children | 5e64ecd01686 |
files | linux/receipt linux/stuff/001-squashfs-decompressors-add-xz-decompressor-module.patch linux/stuff/002-squashfs-decompressors-add-boot-time-xz-support.patch linux/stuff/003-squashfs-x86-support-xz-compressed-kernel.patch linux/stuff/004-squashfs-add-xz-compression-support.patch linux/stuff/005-squashfs-add-xz-compression-configuration-option.patch linux/stuff/linux-2.6.36-slitaz.config linux/stuff/modules-2.6.36.list |
line diff
1.1 --- a/linux/receipt Tue Dec 14 21:34:46 2010 +0000 1.2 +++ b/linux/receipt Tue Dec 14 21:45:09 2010 +0000 1.3 @@ -26,13 +26,13 @@ 1.4 TARBALL=$SOURCES_REPOSITORY/$AUFSDIR.tar.gz 1.5 if [ -f $TARBALL ]; then 1.6 tar xzf $TARBALL 1.7 - cd $AUFSDIR && git checkout origin/aufs2 1.8 + cd $AUFSDIR && git checkout origin/aufs2.1-36 1.9 cd $WOK/$PACKAGE 1.10 else 1.11 # Aufs2 from git repository 1.12 git clone http://git.c3sl.ufpr.br/pub/scm/aufs/aufs2-standalone.git $AUFSDIR 1.13 tar czf $TARBALL $AUFSDIR 1.14 - cd $AUFSDIR && git checkout origin/aufs2 1.15 + cd $AUFSDIR && git checkout origin/aufs2.1-36 1.16 cd $WOK/$PACKAGE 1.17 fi 1.18 cp -a $AUFSDIR/Documentation $AUFSDIR/fs $AUFSDIR/include $src 1.19 @@ -64,8 +64,11 @@ 1.20 $PACKAGE-freeinitrd-$VERSION.u 1.21 aufs2-base.patch 1.22 aufs2-standalone.patch 1.23 -aufs2-module-2.6.36.patch 1.24 -aufs2-2.6.36-fix.patch 1.25 +001-squashfs-decompressors-add-xz-decompressor-module.patch 1.26 +002-squashfs-decompressors-add-boot-time-xz-support.patch 1.27 +003-squashfs-x86-support-xz-compressed-kernel.patch 1.28 +004-squashfs-add-xz-compression-support.patch 1.29 +005-squashfs-add-xz-compression-configuration-option.patch 1.30 EOT 1.31 make mrproper 1.32 cd Documentation/lguest
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/linux/stuff/001-squashfs-decompressors-add-xz-decompressor-module.patch Tue Dec 14 21:45:09 2010 +0000 2.3 @@ -0,0 +1,3934 @@ 2.4 +From: Lasse Collin <lasse.collin@tukaani.org> 2.5 +Date: Thu, 2 Dec 2010 19:14:19 +0000 (+0200) 2.6 +Subject: Decompressors: Add XZ decompressor module 2.7 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fpkl%2Fsquashfs-xz.git;a=commitdiff_plain;h=3dbc3fe7878e53b43064a12d4ab31ca4c18ce85f 2.8 + 2.9 +Decompressors: Add XZ decompressor module 2.10 + 2.11 +In userspace, the .lzma format has become mostly a legacy 2.12 +file format that got superseded by the .xz format. Similarly, 2.13 +LZMA Utils was superseded by XZ Utils. 2.14 + 2.15 +These patches add support for XZ decompression into 2.16 +the kernel. Most of the code is as is from XZ Embedded 2.17 +<http://tukaani.org/xz/embedded.html>. It was written for 2.18 +the Linux kernel but is usable in other projects too. 2.19 + 2.20 +Advantages of XZ over the current LZMA code in the kernel: 2.21 + - Nice API that can be used by other kernel modules; it's 2.22 + not limited to kernel, initramfs, and initrd decompression. 2.23 + - Integrity check support (CRC32) 2.24 + - BCJ filters improve compression of executable code on 2.25 + certain architectures. These together with LZMA2 can 2.26 + produce a few percent smaller kernel or Squashfs images 2.27 + than plain LZMA without making the decompression slower. 2.28 + 2.29 +This patch: Add the main decompression code (xz_dec), testing 2.30 +module (xz_dec_test), wrapper script (xz_wrap.sh) for the xz 2.31 +command line tool, and documentation. The xz_dec module is 2.32 +enough to have a usable XZ decompressor e.g. for Squashfs. 2.33 + 2.34 +Signed-off-by: Lasse Collin <lasse.collin@tukaani.org> 2.35 +--- 2.36 + 2.37 +diff --git a/Documentation/xz.txt b/Documentation/xz.txt 2.38 +new file mode 100644 2.39 +index 0000000..68329ac 2.40 +--- /dev/null 2.41 ++++ b/Documentation/xz.txt 2.42 +@@ -0,0 +1,122 @@ 2.43 ++ 2.44 ++XZ data compression in Linux 2.45 ++============================ 2.46 ++ 2.47 ++Introduction 2.48 ++ 2.49 ++ XZ is a general purpose data compression format with high compression 2.50 ++ ratio and relatively fast decompression. The primary compression 2.51 ++ algorithm (filter) is LZMA2. Additional filters can be used to improve 2.52 ++ compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters 2.53 ++ improve compression ratio of executable data. 2.54 ++ 2.55 ++ The XZ decompressor in Linux is called XZ Embedded. It supports 2.56 ++ the LZMA2 filter and optionally also BCJ filters. CRC32 is supported 2.57 ++ for integrity checking. The home page of XZ Embedded is at 2.58 ++ <http://tukaani.org/xz/embedded.html>, where you can find the 2.59 ++ latest version and also information about using the code outside 2.60 ++ the Linux kernel. 2.61 ++ 2.62 ++ For userspace, XZ Utils provide a zlib-like compression library 2.63 ++ and a gzip-like command line tool. XZ Utils can be downloaded from 2.64 ++ <http://tukaani.org/xz/>. 2.65 ++ 2.66 ++XZ related components in the kernel 2.67 ++ 2.68 ++ The xz_dec module provides XZ decompressor with single-call (buffer 2.69 ++ to buffer) and multi-call (stateful) APIs. The usage of the xz_dec 2.70 ++ module is documented in include/linux/xz.h. 2.71 ++ 2.72 ++ The xz_dec_test module is for testing xz_dec. xz_dec_test is not 2.73 ++ useful unless you are hacking the XZ decompressor. xz_dec_test 2.74 ++ allocates a char device major dynamically to which one can write 2.75 ++ .xz files from userspace. The decompressed output is thrown away. 2.76 ++ Keep an eye on dmesg to see diagnostics printed by xz_dec_test. 2.77 ++ See the xz_dec_test source code for the details. 2.78 ++ 2.79 ++ For decompressing the kernel image, initramfs, and initrd, there 2.80 ++ is a wrapper function in lib/decompress_unxz.c. Its API is the 2.81 ++ same as in other decompress_*.c files, which is defined in 2.82 ++ include/linux/decompress/generic.h. 2.83 ++ 2.84 ++ scripts/xz_wrap.sh is a wrapper for the xz command line tool found 2.85 ++ from XZ Utils. The wrapper sets compression options to values suitable 2.86 ++ for compressing the kernel image. 2.87 ++ 2.88 ++ For kernel makefiles, two commands are provided for use with 2.89 ++ $(call if_needed). The kernel image should be compressed with 2.90 ++ $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2 2.91 ++ dictionary. It will also append a four-byte trailer containing the 2.92 ++ uncompressed size of the file, which is needed by the boot code. 2.93 ++ Other things should be compressed with $(call if_needed,xzmisc) 2.94 ++ which will use no BCJ filter and 1 MiB LZMA2 dictionary. 2.95 ++ 2.96 ++Notes on compression options 2.97 ++ 2.98 ++ Since the XZ Embedded supports only streams with no integrity check or 2.99 ++ CRC32, make sure that you don't use some other integrity check type 2.100 ++ when encoding files that are supposed to be decoded by the kernel. With 2.101 ++ liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32 2.102 ++ when encoding. With the xz command line tool, use --check=none or 2.103 ++ --check=crc32. 2.104 ++ 2.105 ++ Using CRC32 is strongly recommended unless there is some other layer 2.106 ++ which will verify the integrity of the uncompressed data anyway. 2.107 ++ Double checking the integrity would probably be waste of CPU cycles. 2.108 ++ Note that the headers will always have a CRC32 which will be validated 2.109 ++ by the decoder; you can only change the integrity check type (or 2.110 ++ disable it) for the actual uncompressed data. 2.111 ++ 2.112 ++ In userspace, LZMA2 is typically used with dictionary sizes of several 2.113 ++ megabytes. The decoder needs to have the dictionary in RAM, thus big 2.114 ++ dictionaries cannot be used for files that are intended to be decoded 2.115 ++ by the kernel. 1 MiB is probably the maximum reasonable dictionary 2.116 ++ size for in-kernel use (maybe more is OK for initramfs). The presets 2.117 ++ in XZ Utils may not be optimal when creating files for the kernel, 2.118 ++ so don't hesitate to use custom settings. Example: 2.119 ++ 2.120 ++ xz --check=crc32 --lzma2=dict=512KiB inputfile 2.121 ++ 2.122 ++ An exception to above dictionary size limitation is when the decoder 2.123 ++ is used in single-call mode. Decompressing the kernel itself is an 2.124 ++ example of this situation. In single-call mode, the memory usage 2.125 ++ doesn't depend on the dictionary size, and it is perfectly fine to 2.126 ++ use a big dictionary: for maximum compression, the dictionary should 2.127 ++ be at least as big as the uncompressed data itself. 2.128 ++ 2.129 ++Future plans 2.130 ++ 2.131 ++ Creating a limited XZ encoder may be considered if people think it is 2.132 ++ useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at 2.133 ++ the fastest settings, so it isn't clear if LZMA2 encoder is wanted 2.134 ++ into the kernel. 2.135 ++ 2.136 ++ Support for limited random-access reading is planned for the 2.137 ++ decompression code. I don't know if it could have any use in the 2.138 ++ kernel, but I know that it would be useful in some embedded projects 2.139 ++ outside the Linux kernel. 2.140 ++ 2.141 ++Conformance to the .xz file format specification 2.142 ++ 2.143 ++ There are a couple of corner cases where things have been simplified 2.144 ++ at expense of detecting errors as early as possible. These should not 2.145 ++ matter in practice all, since they don't cause security issues. But 2.146 ++ it is good to know this if testing the code e.g. with the test files 2.147 ++ from XZ Utils. 2.148 ++ 2.149 ++Reporting bugs 2.150 ++ 2.151 ++ Before reporting a bug, please check that it's not fixed already 2.152 ++ at upstream. See <http://tukaani.org/xz/embedded.html> to get the 2.153 ++ latest code. 2.154 ++ 2.155 ++ Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on 2.156 ++ Freenode and talk to Larhzu. I don't actively read LKML or other 2.157 ++ kernel-related mailing lists, so if there's something I should know, 2.158 ++ you should email to me personally or use IRC. 2.159 ++ 2.160 ++ Don't bother Igor Pavlov with questions about the XZ implementation 2.161 ++ in the kernel or about XZ Utils. While these two implementations 2.162 ++ include essential code that is directly based on Igor Pavlov's code, 2.163 ++ these implementations aren't maintained nor supported by him. 2.164 ++ 2.165 +diff --git a/include/linux/xz.h b/include/linux/xz.h 2.166 +new file mode 100644 2.167 +index 0000000..64cffa6 2.168 +--- /dev/null 2.169 ++++ b/include/linux/xz.h 2.170 +@@ -0,0 +1,264 @@ 2.171 ++/* 2.172 ++ * XZ decompressor 2.173 ++ * 2.174 ++ * Authors: Lasse Collin <lasse.collin@tukaani.org> 2.175 ++ * Igor Pavlov <http://7-zip.org/> 2.176 ++ * 2.177 ++ * This file has been put into the public domain. 2.178 ++ * You can do whatever you want with this file. 2.179 ++ */ 2.180 ++ 2.181 ++#ifndef XZ_H 2.182 ++#define XZ_H 2.183 ++ 2.184 ++#ifdef __KERNEL__ 2.185 ++# include <linux/stddef.h> 2.186 ++# include <linux/types.h> 2.187 ++#else 2.188 ++# include <stddef.h> 2.189 ++# include <stdint.h> 2.190 ++#endif 2.191 ++ 2.192 ++/* In Linux, this is used to make extern functions static when needed. */ 2.193 ++#ifndef XZ_EXTERN 2.194 ++# define XZ_EXTERN extern 2.195 ++#endif 2.196 ++ 2.197 ++/** 2.198 ++ * enum xz_mode - Operation mode 2.199 ++ * 2.200 ++ * @XZ_SINGLE: Single-call mode. This uses less RAM than 2.201 ++ * than multi-call modes, because the LZMA2 2.202 ++ * dictionary doesn't need to be allocated as 2.203 ++ * part of the decoder state. All required data 2.204 ++ * structures are allocated at initialization, 2.205 ++ * so xz_dec_run() cannot return XZ_MEM_ERROR. 2.206 ++ * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2 2.207 ++ * dictionary buffer. All data structures are 2.208 ++ * allocated at initialization, so xz_dec_run() 2.209 ++ * cannot return XZ_MEM_ERROR. 2.210 ++ * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is 2.211 ++ * allocated once the required size has been 2.212 ++ * parsed from the stream headers. If the 2.213 ++ * allocation fails, xz_dec_run() will return 2.214 ++ * XZ_MEM_ERROR. 2.215 ++ * 2.216 ++ * It is possible to enable support only for a subset of the above 2.217 ++ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC, 2.218 ++ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled 2.219 ++ * with support for all operation modes, but the preboot code may 2.220 ++ * be built with fewer features to minimize code size. 2.221 ++ */ 2.222 ++enum xz_mode { 2.223 ++ XZ_SINGLE, 2.224 ++ XZ_PREALLOC, 2.225 ++ XZ_DYNALLOC 2.226 ++}; 2.227 ++ 2.228 ++/** 2.229 ++ * enum xz_ret - Return codes 2.230 ++ * @XZ_OK: Everything is OK so far. More input or more 2.231 ++ * output space is required to continue. This 2.232 ++ * return code is possible only in multi-call mode 2.233 ++ * (XZ_PREALLOC or XZ_DYNALLOC). 2.234 ++ * @XZ_STREAM_END: Operation finished successfully. 2.235 ++ * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding 2.236 ++ * is still possible in multi-call mode by simply 2.237 ++ * calling xz_dec_run() again. 2.238 ++ * Note that this return value is used only if 2.239 ++ * XZ_DEC_ANY_CHECK was defined at build time, 2.240 ++ * which is not used in the kernel. Unsupported 2.241 ++ * check types return XZ_OPTIONS_ERROR if 2.242 ++ * XZ_DEC_ANY_CHECK was not defined at build time. 2.243 ++ * @XZ_MEM_ERROR: Allocating memory failed. This return code is 2.244 ++ * possible only if the decoder was initialized 2.245 ++ * with XZ_DYNALLOC. The amount of memory that was 2.246 ++ * tried to be allocated was no more than the 2.247 ++ * dict_max argument given to xz_dec_init(). 2.248 ++ * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than 2.249 ++ * allowed by the dict_max argument given to 2.250 ++ * xz_dec_init(). This return value is possible 2.251 ++ * only in multi-call mode (XZ_PREALLOC or 2.252 ++ * XZ_DYNALLOC); the single-call mode (XZ_SINGLE) 2.253 ++ * ignores the dict_max argument. 2.254 ++ * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic 2.255 ++ * bytes). 2.256 ++ * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested 2.257 ++ * compression options. In the decoder this means 2.258 ++ * that the header CRC32 matches, but the header 2.259 ++ * itself specifies something that we don't support. 2.260 ++ * @XZ_DATA_ERROR: Compressed data is corrupt. 2.261 ++ * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly 2.262 ++ * different between multi-call and single-call 2.263 ++ * mode; more information below. 2.264 ++ * 2.265 ++ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls 2.266 ++ * to XZ code cannot consume any input and cannot produce any new output. 2.267 ++ * This happens when there is no new input available, or the output buffer 2.268 ++ * is full while at least one output byte is still pending. Assuming your 2.269 ++ * code is not buggy, you can get this error only when decoding a compressed 2.270 ++ * stream that is truncated or otherwise corrupt. 2.271 ++ * 2.272 ++ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer 2.273 ++ * is too small or the compressed input is corrupt in a way that makes the 2.274 ++ * decoder produce more output than the caller expected. When it is 2.275 ++ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR 2.276 ++ * is used instead of XZ_BUF_ERROR. 2.277 ++ */ 2.278 ++enum xz_ret { 2.279 ++ XZ_OK, 2.280 ++ XZ_STREAM_END, 2.281 ++ XZ_UNSUPPORTED_CHECK, 2.282 ++ XZ_MEM_ERROR, 2.283 ++ XZ_MEMLIMIT_ERROR, 2.284 ++ XZ_FORMAT_ERROR, 2.285 ++ XZ_OPTIONS_ERROR, 2.286 ++ XZ_DATA_ERROR, 2.287 ++ XZ_BUF_ERROR 2.288 ++}; 2.289 ++ 2.290 ++/** 2.291 ++ * struct xz_buf - Passing input and output buffers to XZ code 2.292 ++ * @in: Beginning of the input buffer. This may be NULL if and only 2.293 ++ * if in_pos is equal to in_size. 2.294 ++ * @in_pos: Current position in the input buffer. This must not exceed 2.295 ++ * in_size. 2.296 ++ * @in_size: Size of the input buffer 2.297 ++ * @out: Beginning of the output buffer. This may be NULL if and only 2.298 ++ * if out_pos is equal to out_size. 2.299 ++ * @out_pos: Current position in the output buffer. This must not exceed 2.300 ++ * out_size. 2.301 ++ * @out_size: Size of the output buffer 2.302 ++ * 2.303 ++ * Only the contents of the output buffer from out[out_pos] onward, and 2.304 ++ * the variables in_pos and out_pos are modified by the XZ code. 2.305 ++ */ 2.306 ++struct xz_buf { 2.307 ++ const uint8_t *in; 2.308 ++ size_t in_pos; 2.309 ++ size_t in_size; 2.310 ++ 2.311 ++ uint8_t *out; 2.312 ++ size_t out_pos; 2.313 ++ size_t out_size; 2.314 ++}; 2.315 ++ 2.316 ++/** 2.317 ++ * struct xz_dec - Opaque type to hold the XZ decoder state 2.318 ++ */ 2.319 ++struct xz_dec; 2.320 ++ 2.321 ++/** 2.322 ++ * xz_dec_init() - Allocate and initialize a XZ decoder state 2.323 ++ * @mode: Operation mode 2.324 ++ * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for 2.325 ++ * multi-call decoding. This is ignored in single-call mode 2.326 ++ * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes 2.327 ++ * or 2^n + 2^(n-1) bytes (the latter sizes are less common 2.328 ++ * in practice), so other values for dict_max don't make sense. 2.329 ++ * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB, 2.330 ++ * 512 KiB, and 1 MiB are probably the only reasonable values, 2.331 ++ * except for kernel and initramfs images where a bigger 2.332 ++ * dictionary can be fine and useful. 2.333 ++ * 2.334 ++ * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at 2.335 ++ * once. The caller must provide enough output space or the decoding will 2.336 ++ * fail. The output space is used as the dictionary buffer, which is why 2.337 ++ * there is no need to allocate the dictionary as part of the decoder's 2.338 ++ * internal state. 2.339 ++ * 2.340 ++ * Because the output buffer is used as the workspace, streams encoded using 2.341 ++ * a big dictionary are not a problem in single-call mode. It is enough that 2.342 ++ * the output buffer is big enough to hold the actual uncompressed data; it 2.343 ++ * can be smaller than the dictionary size stored in the stream headers. 2.344 ++ * 2.345 ++ * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes 2.346 ++ * of memory is preallocated for the LZMA2 dictionary. This way there is no 2.347 ++ * risk that xz_dec_run() could run out of memory, since xz_dec_run() will 2.348 ++ * never allocate any memory. Instead, if the preallocated dictionary is too 2.349 ++ * small for decoding the given input stream, xz_dec_run() will return 2.350 ++ * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be 2.351 ++ * decoded to avoid allocating excessive amount of memory for the dictionary. 2.352 ++ * 2.353 ++ * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC): 2.354 ++ * dict_max specifies the maximum allowed dictionary size that xz_dec_run() 2.355 ++ * may allocate once it has parsed the dictionary size from the stream 2.356 ++ * headers. This way excessive allocations can be avoided while still 2.357 ++ * limiting the maximum memory usage to a sane value to prevent running the 2.358 ++ * system out of memory when decompressing streams from untrusted sources. 2.359 ++ * 2.360 ++ * On success, xz_dec_init() returns a pointer to struct xz_dec, which is 2.361 ++ * ready to be used with xz_dec_run(). If memory allocation fails, 2.362 ++ * xz_dec_init() returns NULL. 2.363 ++ */ 2.364 ++XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); 2.365 ++ 2.366 ++/** 2.367 ++ * xz_dec_run() - Run the XZ decoder 2.368 ++ * @s: Decoder state allocated using xz_dec_init() 2.369 ++ * @b: Input and output buffers 2.370 ++ * 2.371 ++ * The possible return values depend on build options and operation mode. 2.372 ++ * See enum xz_ret for details. 2.373 ++ * 2.374 ++ * Note that if an error occurs in single-call mode (return value is not 2.375 ++ * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the 2.376 ++ * contents of the output buffer from b->out[b->out_pos] onward are 2.377 ++ * undefined. This is true even after XZ_BUF_ERROR, because with some filter 2.378 ++ * chains, there may be a second pass over the output buffer, and this pass 2.379 ++ * cannot be properly done if the output buffer is truncated. Thus, you 2.380 ++ * cannot give the single-call decoder a too small buffer and then expect to 2.381 ++ * get that amount valid data from the beginning of the stream. You must use 2.382 ++ * the multi-call decoder if you don't want to uncompress the whole stream. 2.383 ++ */ 2.384 ++XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); 2.385 ++ 2.386 ++/** 2.387 ++ * xz_dec_reset() - Reset an already allocated decoder state 2.388 ++ * @s: Decoder state allocated using xz_dec_init() 2.389 ++ * 2.390 ++ * This function can be used to reset the multi-call decoder state without 2.391 ++ * freeing and reallocating memory with xz_dec_end() and xz_dec_init(). 2.392 ++ * 2.393 ++ * In single-call mode, xz_dec_reset() is always called in the beginning of 2.394 ++ * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in 2.395 ++ * multi-call mode. 2.396 ++ */ 2.397 ++XZ_EXTERN void xz_dec_reset(struct xz_dec *s); 2.398 ++ 2.399 ++/** 2.400 ++ * xz_dec_end() - Free the memory allocated for the decoder state 2.401 ++ * @s: Decoder state allocated using xz_dec_init(). If s is NULL, 2.402 ++ * this function does nothing. 2.403 ++ */ 2.404 ++XZ_EXTERN void xz_dec_end(struct xz_dec *s); 2.405 ++ 2.406 ++/* 2.407 ++ * Standalone build (userspace build or in-kernel build for boot time use) 2.408 ++ * needs a CRC32 implementation. For normal in-kernel use, kernel's own 2.409 ++ * CRC32 module is used instead, and users of this module don't need to 2.410 ++ * care about the functions below. 2.411 ++ */ 2.412 ++#ifndef XZ_INTERNAL_CRC32 2.413 ++# ifdef __KERNEL__ 2.414 ++# define XZ_INTERNAL_CRC32 0 2.415 ++# else 2.416 ++# define XZ_INTERNAL_CRC32 1 2.417 ++# endif 2.418 ++#endif 2.419 ++ 2.420 ++#if XZ_INTERNAL_CRC32 2.421 ++/* 2.422 ++ * This must be called before any other xz_* function to initialize 2.423 ++ * the CRC32 lookup table. 2.424 ++ */ 2.425 ++XZ_EXTERN void xz_crc32_init(void); 2.426 ++ 2.427 ++/* 2.428 ++ * Update CRC32 value using the polynomial from IEEE-802.3. To start a new 2.429 ++ * calculation, the third argument must be zero. To continue the calculation, 2.430 ++ * the previously returned value is passed as the third argument. 2.431 ++ */ 2.432 ++XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); 2.433 ++#endif 2.434 ++#endif 2.435 +diff --git a/lib/Kconfig b/lib/Kconfig 2.436 +index fa9bf2c..6090314 100644 2.437 +--- a/lib/Kconfig 2.438 ++++ b/lib/Kconfig 2.439 +@@ -106,6 +106,8 @@ config LZO_COMPRESS 2.440 + config LZO_DECOMPRESS 2.441 + tristate 2.442 + 2.443 ++source "lib/xz/Kconfig" 2.444 ++ 2.445 + # 2.446 + # These all provide a common interface (hence the apparent duplication with 2.447 + # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) 2.448 +diff --git a/lib/Makefile b/lib/Makefile 2.449 +index e6a3763..f2f98dd 100644 2.450 +--- a/lib/Makefile 2.451 ++++ b/lib/Makefile 2.452 +@@ -69,6 +69,7 @@ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ 2.453 + obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ 2.454 + obj-$(CONFIG_LZO_COMPRESS) += lzo/ 2.455 + obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ 2.456 ++obj-$(CONFIG_XZ_DEC) += xz/ 2.457 + obj-$(CONFIG_RAID6_PQ) += raid6/ 2.458 + 2.459 + lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o 2.460 +diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig 2.461 +new file mode 100644 2.462 +index 0000000..e3b6e18 2.463 +--- /dev/null 2.464 ++++ b/lib/xz/Kconfig 2.465 +@@ -0,0 +1,59 @@ 2.466 ++config XZ_DEC 2.467 ++ tristate "XZ decompression support" 2.468 ++ select CRC32 2.469 ++ help 2.470 ++ LZMA2 compression algorithm and BCJ filters are supported using 2.471 ++ the .xz file format as the container. For integrity checking, 2.472 ++ CRC32 is supported. See Documentation/xz.txt for more information. 2.473 ++ 2.474 ++config XZ_DEC_X86 2.475 ++ bool "x86 BCJ filter decoder" if EMBEDDED 2.476 ++ default y 2.477 ++ depends on XZ_DEC 2.478 ++ select XZ_DEC_BCJ 2.479 ++ 2.480 ++config XZ_DEC_POWERPC 2.481 ++ bool "PowerPC BCJ filter decoder" if EMBEDDED 2.482 ++ default y 2.483 ++ depends on XZ_DEC 2.484 ++ select XZ_DEC_BCJ 2.485 ++ 2.486 ++config XZ_DEC_IA64 2.487 ++ bool "IA-64 BCJ filter decoder" if EMBEDDED 2.488 ++ default y 2.489 ++ depends on XZ_DEC 2.490 ++ select XZ_DEC_BCJ 2.491 ++ 2.492 ++config XZ_DEC_ARM 2.493 ++ bool "ARM BCJ filter decoder" if EMBEDDED 2.494 ++ default y 2.495 ++ depends on XZ_DEC 2.496 ++ select XZ_DEC_BCJ 2.497 ++ 2.498 ++config XZ_DEC_ARMTHUMB 2.499 ++ bool "ARM-Thumb BCJ filter decoder" if EMBEDDED 2.500 ++ default y 2.501 ++ depends on XZ_DEC 2.502 ++ select XZ_DEC_BCJ 2.503 ++ 2.504 ++config XZ_DEC_SPARC 2.505 ++ bool "SPARC BCJ filter decoder" if EMBEDDED 2.506 ++ default y 2.507 ++ depends on XZ_DEC 2.508 ++ select XZ_DEC_BCJ 2.509 ++ 2.510 ++config XZ_DEC_BCJ 2.511 ++ bool 2.512 ++ default n 2.513 ++ 2.514 ++config XZ_DEC_TEST 2.515 ++ tristate "XZ decompressor tester" 2.516 ++ default n 2.517 ++ depends on XZ_DEC 2.518 ++ help 2.519 ++ This allows passing .xz files to the in-kernel XZ decoder via 2.520 ++ a character special file. It calculates CRC32 of the decompressed 2.521 ++ data and writes diagnostics to the system log. 2.522 ++ 2.523 ++ Unless you are developing the XZ decoder, you don't need this 2.524 ++ and should say N. 2.525 +diff --git a/lib/xz/Makefile b/lib/xz/Makefile 2.526 +new file mode 100644 2.527 +index 0000000..a7fa769 2.528 +--- /dev/null 2.529 ++++ b/lib/xz/Makefile 2.530 +@@ -0,0 +1,5 @@ 2.531 ++obj-$(CONFIG_XZ_DEC) += xz_dec.o 2.532 ++xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o 2.533 ++xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o 2.534 ++ 2.535 ++obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o 2.536 +diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c 2.537 +new file mode 100644 2.538 +index 0000000..34532d1 2.539 +--- /dev/null 2.540 ++++ b/lib/xz/xz_crc32.c 2.541 +@@ -0,0 +1,59 @@ 2.542 ++/* 2.543 ++ * CRC32 using the polynomial from IEEE-802.3 2.544 ++ * 2.545 ++ * Authors: Lasse Collin <lasse.collin@tukaani.org> 2.546 ++ * Igor Pavlov <http://7-zip.org/> 2.547 ++ * 2.548 ++ * This file has been put into the public domain. 2.549 ++ * You can do whatever you want with this file. 2.550 ++ */ 2.551 ++ 2.552 ++/* 2.553 ++ * This is not the fastest implementation, but it is pretty compact. 2.554 ++ * The fastest versions of xz_crc32() on modern CPUs without hardware 2.555 ++ * accelerated CRC instruction are 3-5 times as fast as this version, 2.556 ++ * but they are bigger and use more memory for the lookup table. 2.557 ++ */ 2.558 ++ 2.559 ++#include "xz_private.h" 2.560 ++ 2.561 ++/* 2.562 ++ * STATIC_RW_DATA is used in the pre-boot environment on some architectures. 2.563 ++ * See <linux/decompress/mm.h> for details. 2.564 ++ */ 2.565 ++#ifndef STATIC_RW_DATA 2.566 ++# define STATIC_RW_DATA static 2.567 ++#endif 2.568 ++ 2.569 ++STATIC_RW_DATA uint32_t xz_crc32_table[256]; 2.570 ++ 2.571 ++XZ_EXTERN void xz_crc32_init(void) 2.572 ++{ 2.573 ++ const uint32_t poly = 0xEDB88320; 2.574 ++ 2.575 ++ uint32_t i; 2.576 ++ uint32_t j; 2.577 ++ uint32_t r; 2.578 ++ 2.579 ++ for (i = 0; i < 256; ++i) { 2.580 ++ r = i; 2.581 ++ for (j = 0; j < 8; ++j) 2.582 ++ r = (r >> 1) ^ (poly & ~((r & 1) - 1)); 2.583 ++ 2.584 ++ xz_crc32_table[i] = r; 2.585 ++ } 2.586 ++ 2.587 ++ return; 2.588 ++} 2.589 ++ 2.590 ++XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc) 2.591 ++{ 2.592 ++ crc = ~crc; 2.593 ++ 2.594 ++ while (size != 0) { 2.595 ++ crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8); 2.596 ++ --size; 2.597 ++ } 2.598 ++ 2.599 ++ return ~crc; 2.600 ++} 2.601 +diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c 2.602 +new file mode 100644 2.603 +index 0000000..e51e255 2.604 +--- /dev/null 2.605 ++++ b/lib/xz/xz_dec_bcj.c 2.606 +@@ -0,0 +1,561 @@ 2.607 ++/* 2.608 ++ * Branch/Call/Jump (BCJ) filter decoders 2.609 ++ * 2.610 ++ * Authors: Lasse Collin <lasse.collin@tukaani.org> 2.611 ++ * Igor Pavlov <http://7-zip.org/> 2.612 ++ * 2.613 ++ * This file has been put into the public domain. 2.614 ++ * You can do whatever you want with this file. 2.615 ++ */ 2.616 ++ 2.617 ++#include "xz_private.h" 2.618 ++ 2.619 ++/* 2.620 ++ * The rest of the file is inside this ifdef. It makes things a little more 2.621 ++ * convenient when building without support for any BCJ filters. 2.622 ++ */ 2.623 ++#ifdef XZ_DEC_BCJ 2.624 ++ 2.625 ++struct xz_dec_bcj { 2.626 ++ /* Type of the BCJ filter being used */ 2.627 ++ enum { 2.628 ++ BCJ_X86 = 4, /* x86 or x86-64 */ 2.629 ++ BCJ_POWERPC = 5, /* Big endian only */ 2.630 ++ BCJ_IA64 = 6, /* Big or little endian */ 2.631 ++ BCJ_ARM = 7, /* Little endian only */ 2.632 ++ BCJ_ARMTHUMB = 8, /* Little endian only */ 2.633 ++ BCJ_SPARC = 9 /* Big or little endian */ 2.634 ++ } type; 2.635 ++ 2.636 ++ /* 2.637 ++ * Return value of the next filter in the chain. We need to preserve 2.638 ++ * this information across calls, because we must not call the next 2.639 ++ * filter anymore once it has returned XZ_STREAM_END. 2.640 ++ */ 2.641 ++ enum xz_ret ret; 2.642 ++ 2.643 ++ /* True if we are operating in single-call mode. */ 2.644 ++ bool single_call; 2.645 ++ 2.646 ++ /* 2.647 ++ * Absolute position relative to the beginning of the uncompressed 2.648 ++ * data (in a single .xz Block). We care only about the lowest 32 2.649 ++ * bits so this doesn't need to be uint64_t even with big files. 2.650 ++ */ 2.651 ++ uint32_t pos; 2.652 ++ 2.653 ++ /* x86 filter state */ 2.654 ++ uint32_t x86_prev_mask; 2.655 ++ 2.656 ++ /* Temporary space to hold the variables from struct xz_buf */ 2.657 ++ uint8_t *out; 2.658 ++ size_t out_pos; 2.659 ++ size_t out_size; 2.660 ++ 2.661 ++ struct { 2.662 ++ /* Amount of already filtered data in the beginning of buf */ 2.663 ++ size_t filtered; 2.664 ++ 2.665 ++ /* Total amount of data currently stored in buf */ 2.666 ++ size_t size; 2.667 ++ 2.668 ++ /* 2.669 ++ * Buffer to hold a mix of filtered and unfiltered data. This 2.670 ++ * needs to be big enough to hold Alignment + 2 * Look-ahead: 2.671 ++ * 2.672 ++ * Type Alignment Look-ahead 2.673 ++ * x86 1 4 2.674 ++ * PowerPC 4 0 2.675 ++ * IA-64 16 0 2.676 ++ * ARM 4 0 2.677 ++ * ARM-Thumb 2 2 2.678 ++ * SPARC 4 0 2.679 ++ */ 2.680 ++ uint8_t buf[16]; 2.681 ++ } temp; 2.682 ++}; 2.683 ++ 2.684 ++#ifdef XZ_DEC_X86 2.685 ++/* 2.686 ++ * This is used to test the most significant byte of a memory address 2.687 ++ * in an x86 instruction. 2.688 ++ */ 2.689 ++static inline int bcj_x86_test_msbyte(uint8_t b) 2.690 ++{ 2.691 ++ return b == 0x00 || b == 0xFF; 2.692 ++} 2.693 ++ 2.694 ++static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.695 ++{ 2.696 ++ static const bool mask_to_allowed_status[8] 2.697 ++ = { true, true, true, false, true, false, false, false }; 2.698 ++ 2.699 ++ static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 }; 2.700 ++ 2.701 ++ size_t i; 2.702 ++ size_t prev_pos = (size_t)-1; 2.703 ++ uint32_t prev_mask = s->x86_prev_mask; 2.704 ++ uint32_t src; 2.705 ++ uint32_t dest; 2.706 ++ uint32_t j; 2.707 ++ uint8_t b; 2.708 ++ 2.709 ++ if (size <= 4) 2.710 ++ return 0; 2.711 ++ 2.712 ++ size -= 4; 2.713 ++ for (i = 0; i < size; ++i) { 2.714 ++ if ((buf[i] & 0xFE) != 0xE8) 2.715 ++ continue; 2.716 ++ 2.717 ++ prev_pos = i - prev_pos; 2.718 ++ if (prev_pos > 3) { 2.719 ++ prev_mask = 0; 2.720 ++ } else { 2.721 ++ prev_mask = (prev_mask << (prev_pos - 1)) & 7; 2.722 ++ if (prev_mask != 0) { 2.723 ++ b = buf[i + 4 - mask_to_bit_num[prev_mask]]; 2.724 ++ if (!mask_to_allowed_status[prev_mask] 2.725 ++ || bcj_x86_test_msbyte(b)) { 2.726 ++ prev_pos = i; 2.727 ++ prev_mask = (prev_mask << 1) | 1; 2.728 ++ continue; 2.729 ++ } 2.730 ++ } 2.731 ++ } 2.732 ++ 2.733 ++ prev_pos = i; 2.734 ++ 2.735 ++ if (bcj_x86_test_msbyte(buf[i + 4])) { 2.736 ++ src = get_unaligned_le32(buf + i + 1); 2.737 ++ while (true) { 2.738 ++ dest = src - (s->pos + (uint32_t)i + 5); 2.739 ++ if (prev_mask == 0) 2.740 ++ break; 2.741 ++ 2.742 ++ j = mask_to_bit_num[prev_mask] * 8; 2.743 ++ b = (uint8_t)(dest >> (24 - j)); 2.744 ++ if (!bcj_x86_test_msbyte(b)) 2.745 ++ break; 2.746 ++ 2.747 ++ src = dest ^ (((uint32_t)1 << (32 - j)) - 1); 2.748 ++ } 2.749 ++ 2.750 ++ dest &= 0x01FFFFFF; 2.751 ++ dest |= (uint32_t)0 - (dest & 0x01000000); 2.752 ++ put_unaligned_le32(dest, buf + i + 1); 2.753 ++ i += 4; 2.754 ++ } else { 2.755 ++ prev_mask = (prev_mask << 1) | 1; 2.756 ++ } 2.757 ++ } 2.758 ++ 2.759 ++ prev_pos = i - prev_pos; 2.760 ++ s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1); 2.761 ++ return i; 2.762 ++} 2.763 ++#endif 2.764 ++ 2.765 ++#ifdef XZ_DEC_POWERPC 2.766 ++static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.767 ++{ 2.768 ++ size_t i; 2.769 ++ uint32_t instr; 2.770 ++ 2.771 ++ for (i = 0; i + 4 <= size; i += 4) { 2.772 ++ instr = get_unaligned_be32(buf + i); 2.773 ++ if ((instr & 0xFC000003) == 0x48000001) { 2.774 ++ instr &= 0x03FFFFFC; 2.775 ++ instr -= s->pos + (uint32_t)i; 2.776 ++ instr &= 0x03FFFFFC; 2.777 ++ instr |= 0x48000001; 2.778 ++ put_unaligned_be32(instr, buf + i); 2.779 ++ } 2.780 ++ } 2.781 ++ 2.782 ++ return i; 2.783 ++} 2.784 ++#endif 2.785 ++ 2.786 ++#ifdef XZ_DEC_IA64 2.787 ++static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.788 ++{ 2.789 ++ static const uint8_t branch_table[32] = { 2.790 ++ 0, 0, 0, 0, 0, 0, 0, 0, 2.791 ++ 0, 0, 0, 0, 0, 0, 0, 0, 2.792 ++ 4, 4, 6, 6, 0, 0, 7, 7, 2.793 ++ 4, 4, 0, 0, 4, 4, 0, 0 2.794 ++ }; 2.795 ++ 2.796 ++ /* 2.797 ++ * The local variables take a little bit stack space, but it's less 2.798 ++ * than what LZMA2 decoder takes, so it doesn't make sense to reduce 2.799 ++ * stack usage here without doing that for the LZMA2 decoder too. 2.800 ++ */ 2.801 ++ 2.802 ++ /* Loop counters */ 2.803 ++ size_t i; 2.804 ++ size_t j; 2.805 ++ 2.806 ++ /* Instruction slot (0, 1, or 2) in the 128-bit instruction word */ 2.807 ++ uint32_t slot; 2.808 ++ 2.809 ++ /* Bitwise offset of the instruction indicated by slot */ 2.810 ++ uint32_t bit_pos; 2.811 ++ 2.812 ++ /* bit_pos split into byte and bit parts */ 2.813 ++ uint32_t byte_pos; 2.814 ++ uint32_t bit_res; 2.815 ++ 2.816 ++ /* Address part of an instruction */ 2.817 ++ uint32_t addr; 2.818 ++ 2.819 ++ /* Mask used to detect which instructions to convert */ 2.820 ++ uint32_t mask; 2.821 ++ 2.822 ++ /* 41-bit instruction stored somewhere in the lowest 48 bits */ 2.823 ++ uint64_t instr; 2.824 ++ 2.825 ++ /* Instruction normalized with bit_res for easier manipulation */ 2.826 ++ uint64_t norm; 2.827 ++ 2.828 ++ for (i = 0; i + 16 <= size; i += 16) { 2.829 ++ mask = branch_table[buf[i] & 0x1F]; 2.830 ++ for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) { 2.831 ++ if (((mask >> slot) & 1) == 0) 2.832 ++ continue; 2.833 ++ 2.834 ++ byte_pos = bit_pos >> 3; 2.835 ++ bit_res = bit_pos & 7; 2.836 ++ instr = 0; 2.837 ++ for (j = 0; j < 6; ++j) 2.838 ++ instr |= (uint64_t)(buf[i + j + byte_pos]) 2.839 ++ << (8 * j); 2.840 ++ 2.841 ++ norm = instr >> bit_res; 2.842 ++ 2.843 ++ if (((norm >> 37) & 0x0F) == 0x05 2.844 ++ && ((norm >> 9) & 0x07) == 0) { 2.845 ++ addr = (norm >> 13) & 0x0FFFFF; 2.846 ++ addr |= ((uint32_t)(norm >> 36) & 1) << 20; 2.847 ++ addr <<= 4; 2.848 ++ addr -= s->pos + (uint32_t)i; 2.849 ++ addr >>= 4; 2.850 ++ 2.851 ++ norm &= ~((uint64_t)0x8FFFFF << 13); 2.852 ++ norm |= (uint64_t)(addr & 0x0FFFFF) << 13; 2.853 ++ norm |= (uint64_t)(addr & 0x100000) 2.854 ++ << (36 - 20); 2.855 ++ 2.856 ++ instr &= (1 << bit_res) - 1; 2.857 ++ instr |= norm << bit_res; 2.858 ++ 2.859 ++ for (j = 0; j < 6; j++) 2.860 ++ buf[i + j + byte_pos] 2.861 ++ = (uint8_t)(instr >> (8 * j)); 2.862 ++ } 2.863 ++ } 2.864 ++ } 2.865 ++ 2.866 ++ return i; 2.867 ++} 2.868 ++#endif 2.869 ++ 2.870 ++#ifdef XZ_DEC_ARM 2.871 ++static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.872 ++{ 2.873 ++ size_t i; 2.874 ++ uint32_t addr; 2.875 ++ 2.876 ++ for (i = 0; i + 4 <= size; i += 4) { 2.877 ++ if (buf[i + 3] == 0xEB) { 2.878 ++ addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8) 2.879 ++ | ((uint32_t)buf[i + 2] << 16); 2.880 ++ addr <<= 2; 2.881 ++ addr -= s->pos + (uint32_t)i + 8; 2.882 ++ addr >>= 2; 2.883 ++ buf[i] = (uint8_t)addr; 2.884 ++ buf[i + 1] = (uint8_t)(addr >> 8); 2.885 ++ buf[i + 2] = (uint8_t)(addr >> 16); 2.886 ++ } 2.887 ++ } 2.888 ++ 2.889 ++ return i; 2.890 ++} 2.891 ++#endif 2.892 ++ 2.893 ++#ifdef XZ_DEC_ARMTHUMB 2.894 ++static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.895 ++{ 2.896 ++ size_t i; 2.897 ++ uint32_t addr; 2.898 ++ 2.899 ++ for (i = 0; i + 4 <= size; i += 2) { 2.900 ++ if ((buf[i + 1] & 0xF8) == 0xF0 2.901 ++ && (buf[i + 3] & 0xF8) == 0xF8) { 2.902 ++ addr = (((uint32_t)buf[i + 1] & 0x07) << 19) 2.903 ++ | ((uint32_t)buf[i] << 11) 2.904 ++ | (((uint32_t)buf[i + 3] & 0x07) << 8) 2.905 ++ | (uint32_t)buf[i + 2]; 2.906 ++ addr <<= 1; 2.907 ++ addr -= s->pos + (uint32_t)i + 4; 2.908 ++ addr >>= 1; 2.909 ++ buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07)); 2.910 ++ buf[i] = (uint8_t)(addr >> 11); 2.911 ++ buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07)); 2.912 ++ buf[i + 2] = (uint8_t)addr; 2.913 ++ i += 2; 2.914 ++ } 2.915 ++ } 2.916 ++ 2.917 ++ return i; 2.918 ++} 2.919 ++#endif 2.920 ++ 2.921 ++#ifdef XZ_DEC_SPARC 2.922 ++static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size) 2.923 ++{ 2.924 ++ size_t i; 2.925 ++ uint32_t instr; 2.926 ++ 2.927 ++ for (i = 0; i + 4 <= size; i += 4) { 2.928 ++ instr = get_unaligned_be32(buf + i); 2.929 ++ if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) { 2.930 ++ instr <<= 2; 2.931 ++ instr -= s->pos + (uint32_t)i; 2.932 ++ instr >>= 2; 2.933 ++ instr = ((uint32_t)0x40000000 - (instr & 0x400000)) 2.934 ++ | 0x40000000 | (instr & 0x3FFFFF); 2.935 ++ put_unaligned_be32(instr, buf + i); 2.936 ++ } 2.937 ++ } 2.938 ++ 2.939 ++ return i; 2.940 ++} 2.941 ++#endif 2.942 ++ 2.943 ++/* 2.944 ++ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount 2.945 ++ * of data that got filtered. 2.946 ++ * 2.947 ++ * NOTE: This is implemented as a switch statement to avoid using function 2.948 ++ * pointers, which could be problematic in the kernel boot code, which must 2.949 ++ * avoid pointers to static data (at least on x86). 2.950 ++ */ 2.951 ++static void bcj_apply(struct xz_dec_bcj *s, 2.952 ++ uint8_t *buf, size_t *pos, size_t size) 2.953 ++{ 2.954 ++ size_t filtered; 2.955 ++ 2.956 ++ buf += *pos; 2.957 ++ size -= *pos; 2.958 ++ 2.959 ++ switch (s->type) { 2.960 ++#ifdef XZ_DEC_X86 2.961 ++ case BCJ_X86: 2.962 ++ filtered = bcj_x86(s, buf, size); 2.963 ++ break; 2.964 ++#endif 2.965 ++#ifdef XZ_DEC_POWERPC 2.966 ++ case BCJ_POWERPC: 2.967 ++ filtered = bcj_powerpc(s, buf, size); 2.968 ++ break; 2.969 ++#endif 2.970 ++#ifdef XZ_DEC_IA64 2.971 ++ case BCJ_IA64: 2.972 ++ filtered = bcj_ia64(s, buf, size); 2.973 ++ break; 2.974 ++#endif 2.975 ++#ifdef XZ_DEC_ARM 2.976 ++ case BCJ_ARM: 2.977 ++ filtered = bcj_arm(s, buf, size); 2.978 ++ break; 2.979 ++#endif 2.980 ++#ifdef XZ_DEC_ARMTHUMB 2.981 ++ case BCJ_ARMTHUMB: 2.982 ++ filtered = bcj_armthumb(s, buf, size); 2.983 ++ break; 2.984 ++#endif 2.985 ++#ifdef XZ_DEC_SPARC 2.986 ++ case BCJ_SPARC: 2.987 ++ filtered = bcj_sparc(s, buf, size); 2.988 ++ break; 2.989 ++#endif 2.990 ++ default: 2.991 ++ /* Never reached but silence compiler warnings. */ 2.992 ++ filtered = 0; 2.993 ++ break; 2.994 ++ } 2.995 ++ 2.996 ++ *pos += filtered; 2.997 ++ s->pos += filtered; 2.998 ++} 2.999 ++ 2.1000 ++/* 2.1001 ++ * Flush pending filtered data from temp to the output buffer. 2.1002 ++ * Move the remaining mixture of possibly filtered and unfiltered 2.1003 ++ * data to the beginning of temp. 2.1004 ++ */ 2.1005 ++static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b) 2.1006 ++{ 2.1007 ++ size_t copy_size; 2.1008 ++ 2.1009 ++ copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos); 2.1010 ++ memcpy(b->out + b->out_pos, s->temp.buf, copy_size); 2.1011 ++ b->out_pos += copy_size; 2.1012 ++ 2.1013 ++ s->temp.filtered -= copy_size; 2.1014 ++ s->temp.size -= copy_size; 2.1015 ++ memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size); 2.1016 ++} 2.1017 ++ 2.1018 ++/* 2.1019 ++ * The BCJ filter functions are primitive in sense that they process the 2.1020 ++ * data in chunks of 1-16 bytes. To hide this issue, this function does 2.1021 ++ * some buffering. 2.1022 ++ */ 2.1023 ++XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, 2.1024 ++ struct xz_dec_lzma2 *lzma2, 2.1025 ++ struct xz_buf *b) 2.1026 ++{ 2.1027 ++ size_t out_start; 2.1028 ++ 2.1029 ++ /* 2.1030 ++ * Flush pending already filtered data to the output buffer. Return 2.1031 ++ * immediatelly if we couldn't flush everything, or if the next 2.1032 ++ * filter in the chain had already returned XZ_STREAM_END. 2.1033 ++ */ 2.1034 ++ if (s->temp.filtered > 0) { 2.1035 ++ bcj_flush(s, b); 2.1036 ++ if (s->temp.filtered > 0) 2.1037 ++ return XZ_OK; 2.1038 ++ 2.1039 ++ if (s->ret == XZ_STREAM_END) 2.1040 ++ return XZ_STREAM_END; 2.1041 ++ } 2.1042 ++ 2.1043 ++ /* 2.1044 ++ * If we have more output space than what is currently pending in 2.1045 ++ * temp, copy the unfiltered data from temp to the output buffer 2.1046 ++ * and try to fill the output buffer by decoding more data from the 2.1047 ++ * next filter in the chain. Apply the BCJ filter on the new data 2.1048 ++ * in the output buffer. If everything cannot be filtered, copy it 2.1049 ++ * to temp and rewind the output buffer position accordingly. 2.1050 ++ */ 2.1051 ++ if (s->temp.size < b->out_size - b->out_pos) { 2.1052 ++ out_start = b->out_pos; 2.1053 ++ memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size); 2.1054 ++ b->out_pos += s->temp.size; 2.1055 ++ 2.1056 ++ s->ret = xz_dec_lzma2_run(lzma2, b); 2.1057 ++ if (s->ret != XZ_STREAM_END 2.1058 ++ && (s->ret != XZ_OK || s->single_call)) 2.1059 ++ return s->ret; 2.1060 ++ 2.1061 ++ bcj_apply(s, b->out, &out_start, b->out_pos); 2.1062 ++ 2.1063 ++ /* 2.1064 ++ * As an exception, if the next filter returned XZ_STREAM_END, 2.1065 ++ * we can do that too, since the last few bytes that remain 2.1066 ++ * unfiltered are meant to remain unfiltered. 2.1067 ++ */ 2.1068 ++ if (s->ret == XZ_STREAM_END) 2.1069 ++ return XZ_STREAM_END; 2.1070 ++ 2.1071 ++ s->temp.size = b->out_pos - out_start; 2.1072 ++ b->out_pos -= s->temp.size; 2.1073 ++ memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size); 2.1074 ++ } 2.1075 ++ 2.1076 ++ /* 2.1077 ++ * If we have unfiltered data in temp, try to fill by decoding more 2.1078 ++ * data from the next filter. Apply the BCJ filter on temp. Then we 2.1079 ++ * hopefully can fill the actual output buffer by copying filtered 2.1080 ++ * data from temp. A mix of filtered and unfiltered data may be left 2.1081 ++ * in temp; it will be taken care on the next call to this function. 2.1082 ++ */ 2.1083 ++ if (s->temp.size > 0) { 2.1084 ++ /* Make b->out{,_pos,_size} temporarily point to s->temp. */ 2.1085 ++ s->out = b->out; 2.1086 ++ s->out_pos = b->out_pos; 2.1087 ++ s->out_size = b->out_size; 2.1088 ++ b->out = s->temp.buf; 2.1089 ++ b->out_pos = s->temp.size; 2.1090 ++ b->out_size = sizeof(s->temp.buf); 2.1091 ++ 2.1092 ++ s->ret = xz_dec_lzma2_run(lzma2, b); 2.1093 ++ 2.1094 ++ s->temp.size = b->out_pos; 2.1095 ++ b->out = s->out; 2.1096 ++ b->out_pos = s->out_pos; 2.1097 ++ b->out_size = s->out_size; 2.1098 ++ 2.1099 ++ if (s->ret != XZ_OK && s->ret != XZ_STREAM_END) 2.1100 ++ return s->ret; 2.1101 ++ 2.1102 ++ bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size); 2.1103 ++ 2.1104 ++ /* 2.1105 ++ * If the next filter returned XZ_STREAM_END, we mark that 2.1106 ++ * everything is filtered, since the last unfiltered bytes 2.1107 ++ * of the stream are meant to be left as is. 2.1108 ++ */ 2.1109 ++ if (s->ret == XZ_STREAM_END) 2.1110 ++ s->temp.filtered = s->temp.size; 2.1111 ++ 2.1112 ++ bcj_flush(s, b); 2.1113 ++ if (s->temp.filtered > 0) 2.1114 ++ return XZ_OK; 2.1115 ++ } 2.1116 ++ 2.1117 ++ return s->ret; 2.1118 ++} 2.1119 ++ 2.1120 ++XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call) 2.1121 ++{ 2.1122 ++ struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL); 2.1123 ++ if (s != NULL) 2.1124 ++ s->single_call = single_call; 2.1125 ++ 2.1126 ++ return s; 2.1127 ++} 2.1128 ++ 2.1129 ++XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id) 2.1130 ++{ 2.1131 ++ switch (id) { 2.1132 ++#ifdef XZ_DEC_X86 2.1133 ++ case BCJ_X86: 2.1134 ++#endif 2.1135 ++#ifdef XZ_DEC_POWERPC 2.1136 ++ case BCJ_POWERPC: 2.1137 ++#endif 2.1138 ++#ifdef XZ_DEC_IA64 2.1139 ++ case BCJ_IA64: 2.1140 ++#endif 2.1141 ++#ifdef XZ_DEC_ARM 2.1142 ++ case BCJ_ARM: 2.1143 ++#endif 2.1144 ++#ifdef XZ_DEC_ARMTHUMB 2.1145 ++ case BCJ_ARMTHUMB: 2.1146 ++#endif 2.1147 ++#ifdef XZ_DEC_SPARC 2.1148 ++ case BCJ_SPARC: 2.1149 ++#endif 2.1150 ++ break; 2.1151 ++ 2.1152 ++ default: 2.1153 ++ /* Unsupported Filter ID */ 2.1154 ++ return XZ_OPTIONS_ERROR; 2.1155 ++ } 2.1156 ++ 2.1157 ++ s->type = id; 2.1158 ++ s->ret = XZ_OK; 2.1159 ++ s->pos = 0; 2.1160 ++ s->x86_prev_mask = 0; 2.1161 ++ s->temp.filtered = 0; 2.1162 ++ s->temp.size = 0; 2.1163 ++ 2.1164 ++ return XZ_OK; 2.1165 ++} 2.1166 ++ 2.1167 ++#endif 2.1168 +diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c 2.1169 +new file mode 100644 2.1170 +index 0000000..ea5fa4f 2.1171 +--- /dev/null 2.1172 ++++ b/lib/xz/xz_dec_lzma2.c 2.1173 +@@ -0,0 +1,1171 @@ 2.1174 ++/* 2.1175 ++ * LZMA2 decoder 2.1176 ++ * 2.1177 ++ * Authors: Lasse Collin <lasse.collin@tukaani.org> 2.1178 ++ * Igor Pavlov <http://7-zip.org/> 2.1179 ++ * 2.1180 ++ * This file has been put into the public domain. 2.1181 ++ * You can do whatever you want with this file. 2.1182 ++ */ 2.1183 ++ 2.1184 ++#include "xz_private.h" 2.1185 ++#include "xz_lzma2.h" 2.1186 ++ 2.1187 ++/* 2.1188 ++ * Range decoder initialization eats the first five bytes of each LZMA chunk. 2.1189 ++ */ 2.1190 ++#define RC_INIT_BYTES 5 2.1191 ++ 2.1192 ++/* 2.1193 ++ * Minimum number of usable input buffer to safely decode one LZMA symbol. 2.1194 ++ * The worst case is that we decode 22 bits using probabilities and 26 2.1195 ++ * direct bits. This may decode at maximum of 20 bytes of input. However, 2.1196 ++ * lzma_main() does an extra normalization before returning, thus we 2.1197 ++ * need to put 21 here. 2.1198 ++ */ 2.1199 ++#define LZMA_IN_REQUIRED 21 2.1200 ++ 2.1201 ++/* 2.1202 ++ * Dictionary (history buffer) 2.1203 ++ * 2.1204 ++ * These are always true: 2.1205 ++ * start <= pos <= full <= end 2.1206 ++ * pos <= limit <= end 2.1207 ++ * 2.1208 ++ * In multi-call mode, also these are true: 2.1209 ++ * end == size 2.1210 ++ * size <= size_max 2.1211 ++ * allocated <= size 2.1212 ++ * 2.1213 ++ * Most of these variables are size_t to support single-call mode, 2.1214 ++ * in which the dictionary variables address the actual output 2.1215 ++ * buffer directly. 2.1216 ++ */ 2.1217 ++struct dictionary { 2.1218 ++ /* Beginning of the history buffer */ 2.1219 ++ uint8_t *buf; 2.1220 ++ 2.1221 ++ /* Old position in buf (before decoding more data) */ 2.1222 ++ size_t start; 2.1223 ++ 2.1224 ++ /* Position in buf */ 2.1225 ++ size_t pos; 2.1226 ++ 2.1227 ++ /* 2.1228 ++ * How full dictionary is. This is used to detect corrupt input that 2.1229 ++ * would read beyond the beginning of the uncompressed stream. 2.1230 ++ */ 2.1231 ++ size_t full; 2.1232 ++ 2.1233 ++ /* Write limit; we don't write to buf[limit] or later bytes. */ 2.1234 ++ size_t limit; 2.1235 ++ 2.1236 ++ /* 2.1237 ++ * End of the dictionary buffer. In multi-call mode, this is 2.1238 ++ * the same as the dictionary size. In single-call mode, this 2.1239 ++ * indicates the size of the output buffer. 2.1240 ++ */ 2.1241 ++ size_t end; 2.1242 ++ 2.1243 ++ /* 2.1244 ++ * Size of the dictionary as specified in Block Header. This is used 2.1245 ++ * together with "full" to detect corrupt input that would make us 2.1246 ++ * read beyond the beginning of the uncompressed stream. 2.1247 ++ */ 2.1248 ++ uint32_t size; 2.1249 ++ 2.1250 ++ /* 2.1251 ++ * Maximum allowed dictionary size in multi-call mode. 2.1252 ++ * This is ignored in single-call mode. 2.1253 ++ */ 2.1254 ++ uint32_t size_max; 2.1255 ++ 2.1256 ++ /* 2.1257 ++ * Amount of memory currently allocated for the dictionary. 2.1258 ++ * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC, 2.1259 ++ * size_max is always the same as the allocated size.) 2.1260 ++ */ 2.1261 ++ uint32_t allocated; 2.1262 ++ 2.1263 ++ /* Operation mode */ 2.1264 ++ enum xz_mode mode; 2.1265 ++}; 2.1266 ++ 2.1267 ++/* Range decoder */ 2.1268 ++struct rc_dec { 2.1269 ++ uint32_t range; 2.1270 ++ uint32_t code; 2.1271 ++ 2.1272 ++ /* 2.1273 ++ * Number of initializing bytes remaining to be read 2.1274 ++ * by rc_read_init(). 2.1275 ++ */ 2.1276 ++ uint32_t init_bytes_left; 2.1277 ++ 2.1278 ++ /* 2.1279 ++ * Buffer from which we read our input. It can be either 2.1280 ++ * temp.buf or the caller-provided input buffer. 2.1281 ++ */ 2.1282 ++ const uint8_t *in; 2.1283 ++ size_t in_pos; 2.1284 ++ size_t in_limit; 2.1285 ++}; 2.1286 ++ 2.1287 ++/* Probabilities for a length decoder. */ 2.1288 ++struct lzma_len_dec { 2.1289 ++ /* Probability of match length being at least 10 */ 2.1290 ++ uint16_t choice; 2.1291 ++ 2.1292 ++ /* Probability of match length being at least 18 */ 2.1293 ++ uint16_t choice2; 2.1294 ++ 2.1295 ++ /* Probabilities for match lengths 2-9 */ 2.1296 ++ uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS]; 2.1297 ++ 2.1298 ++ /* Probabilities for match lengths 10-17 */ 2.1299 ++ uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS]; 2.1300 ++ 2.1301 ++ /* Probabilities for match lengths 18-273 */ 2.1302 ++ uint16_t high[LEN_HIGH_SYMBOLS]; 2.1303 ++}; 2.1304 ++ 2.1305 ++struct lzma_dec { 2.1306 ++ /* Distances of latest four matches */ 2.1307 ++ uint32_t rep0; 2.1308 ++ uint32_t rep1; 2.1309 ++ uint32_t rep2; 2.1310 ++ uint32_t rep3; 2.1311 ++ 2.1312 ++ /* Types of the most recently seen LZMA symbols */ 2.1313 ++ enum lzma_state state; 2.1314 ++ 2.1315 ++ /* 2.1316 ++ * Length of a match. This is updated so that dict_repeat can 2.1317 ++ * be called again to finish repeating the whole match. 2.1318 ++ */ 2.1319 ++ uint32_t len; 2.1320 ++ 2.1321 ++ /* 2.1322 ++ * LZMA properties or related bit masks (number of literal 2.1323 ++ * context bits, a mask dervied from the number of literal 2.1324 ++ * position bits, and a mask dervied from the number 2.1325 ++ * position bits) 2.1326 ++ */ 2.1327 ++ uint32_t lc; 2.1328 ++ uint32_t literal_pos_mask; /* (1 << lp) - 1 */ 2.1329 ++ uint32_t pos_mask; /* (1 << pb) - 1 */ 2.1330 ++ 2.1331 ++ /* If 1, it's a match. Otherwise it's a single 8-bit literal. */ 2.1332 ++ uint16_t is_match[STATES][POS_STATES_MAX]; 2.1333 ++ 2.1334 ++ /* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */ 2.1335 ++ uint16_t is_rep[STATES]; 2.1336 ++ 2.1337 ++ /* 2.1338 ++ * If 0, distance of a repeated match is rep0. 2.1339 ++ * Otherwise check is_rep1. 2.1340 ++ */ 2.1341 ++ uint16_t is_rep0[STATES]; 2.1342 ++ 2.1343 ++ /* 2.1344 ++ * If 0, distance of a repeated match is rep1. 2.1345 ++ * Otherwise check is_rep2. 2.1346 ++ */ 2.1347 ++ uint16_t is_rep1[STATES]; 2.1348 ++ 2.1349 ++ /* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */ 2.1350 ++ uint16_t is_rep2[STATES]; 2.1351 ++ 2.1352 ++ /* 2.1353 ++ * If 1, the repeated match has length of one byte. Otherwise 2.1354 ++ * the length is decoded from rep_len_decoder. 2.1355 ++ */ 2.1356 ++ uint16_t is_rep0_long[STATES][POS_STATES_MAX]; 2.1357 ++ 2.1358 ++ /* 2.1359 ++ * Probability tree for the highest two bits of the match 2.1360 ++ * distance. There is a separate probability tree for match 2.1361 ++ * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273]. 2.1362 ++ */ 2.1363 ++ uint16_t dist_slot[DIST_STATES][DIST_SLOTS]; 2.1364 ++ 2.1365 ++ /* 2.1366 ++ * Probility trees for additional bits for match distance 2.1367 ++ * when the distance is in the range [4, 127]. 2.1368 ++ */ 2.1369 ++ uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END]; 2.1370 ++ 2.1371 ++ /* 2.1372 ++ * Probability tree for the lowest four bits of a match 2.1373 ++ * distance that is equal to or greater than 128. 2.1374 ++ */ 2.1375 ++ uint16_t dist_align[ALIGN_SIZE]; 2.1376 ++ 2.1377 ++ /* Length of a normal match */ 2.1378 ++ struct lzma_len_dec match_len_dec; 2.1379 ++ 2.1380 ++ /* Length of a repeated match */ 2.1381 ++ struct lzma_len_dec rep_len_dec; 2.1382 ++ 2.1383 ++ /* Probabilities of literals */ 2.1384 ++ uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE]; 2.1385 ++}; 2.1386 ++ 2.1387 ++struct lzma2_dec { 2.1388 ++ /* Position in xz_dec_lzma2_run(). */ 2.1389 ++ enum lzma2_seq { 2.1390 ++ SEQ_CONTROL, 2.1391 ++ SEQ_UNCOMPRESSED_1, 2.1392 ++ SEQ_UNCOMPRESSED_2, 2.1393 ++ SEQ_COMPRESSED_0, 2.1394 ++ SEQ_COMPRESSED_1, 2.1395 ++ SEQ_PROPERTIES, 2.1396 ++ SEQ_LZMA_PREPARE, 2.1397 ++ SEQ_LZMA_RUN, 2.1398 ++ SEQ_COPY 2.1399 ++ } sequence; 2.1400 ++ 2.1401 ++ /* Next position after decoding the compressed size of the chunk. */ 2.1402 ++ enum lzma2_seq next_sequence; 2.1403 ++ 2.1404 ++ /* Uncompressed size of LZMA chunk (2 MiB at maximum) */ 2.1405 ++ uint32_t uncompressed; 2.1406 ++ 2.1407 ++ /* 2.1408 ++ * Compressed size of LZMA chunk or compressed/uncompressed 2.1409 ++ * size of uncompressed chunk (64 KiB at maximum) 2.1410 ++ */ 2.1411 ++ uint32_t compressed; 2.1412 ++ 2.1413 ++ /* 2.1414 ++ * True if dictionary reset is needed. This is false before 2.1415 ++ * the first chunk (LZMA or uncompressed). 2.1416 ++ */ 2.1417 ++ bool need_dict_reset; 2.1418 ++ 2.1419 ++ /* 2.1420 ++ * True if new LZMA properties are needed. This is false 2.1421 ++ * before the first LZMA chunk. 2.1422 ++ */ 2.1423 ++ bool need_props; 2.1424 ++}; 2.1425 ++ 2.1426 ++struct xz_dec_lzma2 { 2.1427 ++ /* 2.1428 ++ * The order below is important on x86 to reduce code size and 2.1429 ++ * it shouldn't hurt on other platforms. Everything up to and 2.1430 ++ * including lzma.pos_mask are in the first 128 bytes on x86-32, 2.1431 ++ * which allows using smaller instructions to access those 2.1432 ++ * variables. On x86-64, fewer variables fit into the first 128 2.1433 ++ * bytes, but this is still the best order without sacrificing 2.1434 ++ * the readability by splitting the structures. 2.1435 ++ */ 2.1436 ++ struct rc_dec rc; 2.1437 ++ struct dictionary dict; 2.1438 ++ struct lzma2_dec lzma2; 2.1439 ++ struct lzma_dec lzma; 2.1440 ++ 2.1441 ++ /* 2.1442 ++ * Temporary buffer which holds small number of input bytes between 2.1443 ++ * decoder calls. See lzma2_lzma() for details. 2.1444 ++ */ 2.1445 ++ struct { 2.1446 ++ uint32_t size; 2.1447 ++ uint8_t buf[3 * LZMA_IN_REQUIRED]; 2.1448 ++ } temp; 2.1449 ++}; 2.1450 ++ 2.1451 ++/************** 2.1452 ++ * Dictionary * 2.1453 ++ **************/ 2.1454 ++ 2.1455 ++/* 2.1456 ++ * Reset the dictionary state. When in single-call mode, set up the beginning 2.1457 ++ * of the dictionary to point to the actual output buffer. 2.1458 ++ */ 2.1459 ++static void dict_reset(struct dictionary *dict, struct xz_buf *b) 2.1460 ++{ 2.1461 ++ if (DEC_IS_SINGLE(dict->mode)) { 2.1462 ++ dict->buf = b->out + b->out_pos; 2.1463 ++ dict->end = b->out_size - b->out_pos; 2.1464 ++ } 2.1465 ++ 2.1466 ++ dict->start = 0; 2.1467 ++ dict->pos = 0; 2.1468 ++ dict->limit = 0; 2.1469 ++ dict->full = 0; 2.1470 ++} 2.1471 ++ 2.1472 ++/* Set dictionary write limit */ 2.1473 ++static void dict_limit(struct dictionary *dict, size_t out_max) 2.1474 ++{ 2.1475 ++ if (dict->end - dict->pos <= out_max) 2.1476 ++ dict->limit = dict->end; 2.1477 ++ else 2.1478 ++ dict->limit = dict->pos + out_max; 2.1479 ++} 2.1480 ++ 2.1481 ++/* Return true if at least one byte can be written into the dictionary. */ 2.1482 ++static inline bool dict_has_space(const struct dictionary *dict) 2.1483 ++{ 2.1484 ++ return dict->pos < dict->limit; 2.1485 ++} 2.1486 ++ 2.1487 ++/* 2.1488 ++ * Get a byte from the dictionary at the given distance. The distance is 2.1489 ++ * assumed to valid, or as a special case, zero when the dictionary is 2.1490 ++ * still empty. This special case is needed for single-call decoding to 2.1491 ++ * avoid writing a '\0' to the end of the destination buffer. 2.1492 ++ */ 2.1493 ++static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist) 2.1494 ++{ 2.1495 ++ size_t offset = dict->pos - dist - 1; 2.1496 ++ 2.1497 ++ if (dist >= dict->pos) 2.1498 ++ offset += dict->end; 2.1499 ++ 2.1500 ++ return dict->full > 0 ? dict->buf[offset] : 0; 2.1501 ++} 2.1502 ++ 2.1503 ++/* 2.1504 ++ * Put one byte into the dictionary. It is assumed that there is space for it. 2.1505 ++ */ 2.1506 ++static inline void dict_put(struct dictionary *dict, uint8_t byte) 2.1507 ++{ 2.1508 ++ dict->buf[dict->pos++] = byte; 2.1509 ++ 2.1510 ++ if (dict->full < dict->pos) 2.1511 ++ dict->full = dict->pos; 2.1512 ++} 2.1513 ++ 2.1514 ++/* 2.1515 ++ * Repeat given number of bytes from the given distance. If the distance is 2.1516 ++ * invalid, false is returned. On success, true is returned and *len is 2.1517 ++ * updated to indicate how many bytes were left to be repeated. 2.1518 ++ */ 2.1519 ++static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist) 2.1520 ++{ 2.1521 ++ size_t back; 2.1522 ++ uint32_t left; 2.1523 ++ 2.1524 ++ if (dist >= dict->full || dist >= dict->size) 2.1525 ++ return false; 2.1526 ++ 2.1527 ++ left = min_t(size_t, dict->limit - dict->pos, *len); 2.1528 ++ *len -= left; 2.1529 ++ 2.1530 ++ back = dict->pos - dist - 1; 2.1531 ++ if (dist >= dict->pos) 2.1532 ++ back += dict->end; 2.1533 ++ 2.1534 ++ do { 2.1535 ++ dict->buf[dict->pos++] = dict->buf[back++]; 2.1536 ++ if (back == dict->end) 2.1537 ++ back = 0; 2.1538 ++ } while (--left > 0); 2.1539 ++ 2.1540 ++ if (dict->full < dict->pos) 2.1541 ++ dict->full = dict->pos; 2.1542 ++ 2.1543 ++ return true; 2.1544 ++} 2.1545 ++ 2.1546 ++/* Copy uncompressed data as is from input to dictionary and output buffers. */ 2.1547 ++static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b, 2.1548 ++ uint32_t *left) 2.1549 ++{ 2.1550 ++ size_t copy_size; 2.1551 ++ 2.1552 ++ while (*left > 0 && b->in_pos < b->in_size 2.1553 ++ && b->out_pos < b->out_size) { 2.1554 ++ copy_size = min(b->in_size - b->in_pos, 2.1555 ++ b->out_size - b->out_pos); 2.1556 ++ if (copy_size > dict->end - dict->pos) 2.1557 ++ copy_size = dict->end - dict->pos; 2.1558 ++ if (copy_size > *left) 2.1559 ++ copy_size = *left; 2.1560 ++ 2.1561 ++ *left -= copy_size; 2.1562 ++ 2.1563 ++ memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size); 2.1564 ++ dict->pos += copy_size; 2.1565 ++ 2.1566 ++ if (dict->full < dict->pos) 2.1567 ++ dict->full = dict->pos; 2.1568 ++ 2.1569 ++ if (DEC_IS_MULTI(dict->mode)) { 2.1570 ++ if (dict->pos == dict->end) 2.1571 ++ dict->pos = 0; 2.1572 ++ 2.1573 ++ memcpy(b->out + b->out_pos, b->in + b->in_pos, 2.1574 ++ copy_size); 2.1575 ++ } 2.1576 ++ 2.1577 ++ dict->start = dict->pos; 2.1578 ++ 2.1579 ++ b->out_pos += copy_size; 2.1580 ++ b->in_pos += copy_size; 2.1581 ++ } 2.1582 ++} 2.1583 ++ 2.1584 ++/* 2.1585 ++ * Flush pending data from dictionary to b->out. It is assumed that there is 2.1586 ++ * enough space in b->out. This is guaranteed because caller uses dict_limit() 2.1587 ++ * before decoding data into the dictionary. 2.1588 ++ */ 2.1589 ++static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b) 2.1590 ++{ 2.1591 ++ size_t copy_size = dict->pos - dict->start; 2.1592 ++ 2.1593 ++ if (DEC_IS_MULTI(dict->mode)) { 2.1594 ++ if (dict->pos == dict->end) 2.1595 ++ dict->pos = 0; 2.1596 ++ 2.1597 ++ memcpy(b->out + b->out_pos, dict->buf + dict->start, 2.1598 ++ copy_size); 2.1599 ++ } 2.1600 ++ 2.1601 ++ dict->start = dict->pos; 2.1602 ++ b->out_pos += copy_size; 2.1603 ++ return copy_size; 2.1604 ++} 2.1605 ++ 2.1606 ++/***************** 2.1607 ++ * Range decoder * 2.1608 ++ *****************/ 2.1609 ++ 2.1610 ++/* Reset the range decoder. */ 2.1611 ++static void rc_reset(struct rc_dec *rc) 2.1612 ++{ 2.1613 ++ rc->range = (uint32_t)-1; 2.1614 ++ rc->code = 0; 2.1615 ++ rc->init_bytes_left = RC_INIT_BYTES; 2.1616 ++} 2.1617 ++ 2.1618 ++/* 2.1619 ++ * Read the first five initial bytes into rc->code if they haven't been 2.1620 ++ * read already. (Yes, the first byte gets completely ignored.) 2.1621 ++ */ 2.1622 ++static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b) 2.1623 ++{ 2.1624 ++ while (rc->init_bytes_left > 0) { 2.1625 ++ if (b->in_pos == b->in_size) 2.1626 ++ return false; 2.1627 ++ 2.1628 ++ rc->code = (rc->code << 8) + b->in[b->in_pos++]; 2.1629 ++ --rc->init_bytes_left; 2.1630 ++ } 2.1631 ++ 2.1632 ++ return true; 2.1633 ++} 2.1634 ++ 2.1635 ++/* Return true if there may not be enough input for the next decoding loop. */ 2.1636 ++static inline bool rc_limit_exceeded(const struct rc_dec *rc) 2.1637 ++{ 2.1638 ++ return rc->in_pos > rc->in_limit; 2.1639 ++} 2.1640 ++ 2.1641 ++/* 2.1642 ++ * Return true if it is possible (from point of view of range decoder) that 2.1643 ++ * we have reached the end of the LZMA chunk. 2.1644 ++ */ 2.1645 ++static inline bool rc_is_finished(const struct rc_dec *rc) 2.1646 ++{ 2.1647 ++ return rc->code == 0; 2.1648 ++} 2.1649 ++ 2.1650 ++/* Read the next input byte if needed. */ 2.1651 ++static __always_inline void rc_normalize(struct rc_dec *rc) 2.1652 ++{ 2.1653 ++ if (rc->range < RC_TOP_VALUE) { 2.1654 ++ rc->range <<= RC_SHIFT_BITS; 2.1655 ++ rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++]; 2.1656 ++ } 2.1657 ++} 2.1658 ++ 2.1659 ++/* 2.1660 ++ * Decode one bit. In some versions, this function has been splitted in three 2.1661 ++ * functions so that the compiler is supposed to be able to more easily avoid 2.1662 ++ * an extra branch. In this particular version of the LZMA decoder, this 2.1663 ++ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3 2.1664 ++ * on x86). Using a non-splitted version results in nicer looking code too. 2.1665 ++ * 2.1666 ++ * NOTE: This must return an int. Do not make it return a bool or the speed 2.1667 ++ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care, 2.1668 ++ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.) 2.1669 ++ */ 2.1670 ++static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob) 2.1671 ++{ 2.1672 ++ uint32_t bound; 2.1673 ++ int bit; 2.1674 ++ 2.1675 ++ rc_normalize(rc); 2.1676 ++ bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob; 2.1677 ++ if (rc->code < bound) { 2.1678 ++ rc->range = bound; 2.1679 ++ *prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS; 2.1680 ++ bit = 0; 2.1681 ++ } else { 2.1682 ++ rc->range -= bound; 2.1683 ++ rc->code -= bound; 2.1684 ++ *prob -= *prob >> RC_MOVE_BITS; 2.1685 ++ bit = 1; 2.1686 ++ } 2.1687 ++ 2.1688 ++ return bit; 2.1689 ++} 2.1690 ++ 2.1691 ++/* Decode a bittree starting from the most significant bit. */ 2.1692 ++static __always_inline uint32_t rc_bittree(struct rc_dec *rc, 2.1693 ++ uint16_t *probs, uint32_t limit) 2.1694 ++{ 2.1695 ++ uint32_t symbol = 1; 2.1696 ++ 2.1697 ++ do { 2.1698 ++ if (rc_bit(rc, &probs[symbol])) 2.1699 ++ symbol = (symbol << 1) + 1; 2.1700 ++ else 2.1701 ++ symbol <<= 1; 2.1702 ++ } while (symbol < limit); 2.1703 ++ 2.1704 ++ return symbol; 2.1705 ++} 2.1706 ++ 2.1707 ++/* Decode a bittree starting from the least significant bit. */ 2.1708 ++static __always_inline void rc_bittree_reverse(struct rc_dec *rc, 2.1709 ++ uint16_t *probs, 2.1710 ++ uint32_t *dest, uint32_t limit) 2.1711 ++{ 2.1712 ++ uint32_t symbol = 1; 2.1713 ++ uint32_t i = 0; 2.1714 ++ 2.1715 ++ do { 2.1716 ++ if (rc_bit(rc, &probs[symbol])) { 2.1717 ++ symbol = (symbol << 1) + 1; 2.1718 ++ *dest += 1 << i; 2.1719 ++ } else { 2.1720 ++ symbol <<= 1; 2.1721 ++ } 2.1722 ++ } while (++i < limit); 2.1723 ++} 2.1724 ++ 2.1725 ++/* Decode direct bits (fixed fifty-fifty probability) */ 2.1726 ++static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit) 2.1727 ++{ 2.1728 ++ uint32_t mask; 2.1729 ++ 2.1730 ++ do { 2.1731 ++ rc_normalize(rc); 2.1732 ++ rc->range >>= 1; 2.1733 ++ rc->code -= rc->range; 2.1734 ++ mask = (uint32_t)0 - (rc->code >> 31); 2.1735 ++ rc->code += rc->range & mask; 2.1736 ++ *dest = (*dest << 1) + (mask + 1); 2.1737 ++ } while (--limit > 0); 2.1738 ++} 2.1739 ++ 2.1740 ++/******** 2.1741 ++ * LZMA * 2.1742 ++ ********/ 2.1743 ++ 2.1744 ++/* Get pointer to literal coder probability array. */ 2.1745 ++static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s) 2.1746 ++{ 2.1747 ++ uint32_t prev_byte = dict_get(&s->dict, 0); 2.1748 ++ uint32_t low = prev_byte >> (8 - s->lzma.lc); 2.1749 ++ uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc; 2.1750 ++ return s->lzma.literal[low + high]; 2.1751 ++} 2.1752 ++ 2.1753 ++/* Decode a literal (one 8-bit byte) */ 2.1754 ++static void lzma_literal(struct xz_dec_lzma2 *s) 2.1755 ++{ 2.1756 ++ uint16_t *probs; 2.1757 ++ uint32_t symbol; 2.1758 ++ uint32_t match_byte; 2.1759 ++ uint32_t match_bit; 2.1760 ++ uint32_t offset; 2.1761 ++ uint32_t i; 2.1762 ++ 2.1763 ++ probs = lzma_literal_probs(s); 2.1764 ++ 2.1765 ++ if (lzma_state_is_literal(s->lzma.state)) { 2.1766 ++ symbol = rc_bittree(&s->rc, probs, 0x100); 2.1767 ++ } else { 2.1768 ++ symbol = 1; 2.1769 ++ match_byte = dict_get(&s->dict, s->lzma.rep0) << 1; 2.1770 ++ offset = 0x100; 2.1771 ++ 2.1772 ++ do { 2.1773 ++ match_bit = match_byte & offset; 2.1774 ++ match_byte <<= 1; 2.1775 ++ i = offset + match_bit + symbol; 2.1776 ++ 2.1777 ++ if (rc_bit(&s->rc, &probs[i])) { 2.1778 ++ symbol = (symbol << 1) + 1; 2.1779 ++ offset &= match_bit; 2.1780 ++ } else { 2.1781 ++ symbol <<= 1; 2.1782 ++ offset &= ~match_bit; 2.1783 ++ } 2.1784 ++ } while (symbol < 0x100); 2.1785 ++ } 2.1786 ++ 2.1787 ++ dict_put(&s->dict, (uint8_t)symbol); 2.1788 ++ lzma_state_literal(&s->lzma.state); 2.1789 ++} 2.1790 ++ 2.1791 ++/* Decode the length of the match into s->lzma.len. */ 2.1792 ++static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l, 2.1793 ++ uint32_t pos_state) 2.1794 ++{ 2.1795 ++ uint16_t *probs; 2.1796 ++ uint32_t limit; 2.1797 ++ 2.1798 ++ if (!rc_bit(&s->rc, &l->choice)) { 2.1799 ++ probs = l->low[pos_state]; 2.1800 ++ limit = LEN_LOW_SYMBOLS; 2.1801 ++ s->lzma.len = MATCH_LEN_MIN; 2.1802 ++ } else { 2.1803 ++ if (!rc_bit(&s->rc, &l->choice2)) { 2.1804 ++ probs = l->mid[pos_state]; 2.1805 ++ limit = LEN_MID_SYMBOLS; 2.1806 ++ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS; 2.1807 ++ } else { 2.1808 ++ probs = l->high; 2.1809 ++ limit = LEN_HIGH_SYMBOLS; 2.1810 ++ s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS 2.1811 ++ + LEN_MID_SYMBOLS; 2.1812 ++ } 2.1813 ++ } 2.1814 ++ 2.1815 ++ s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit; 2.1816 ++} 2.1817 ++ 2.1818 ++/* Decode a match. The distance will be stored in s->lzma.rep0. */ 2.1819 ++static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state) 2.1820 ++{ 2.1821 ++ uint16_t *probs; 2.1822 ++ uint32_t dist_slot; 2.1823 ++ uint32_t limit; 2.1824 ++ 2.1825 ++ lzma_state_match(&s->lzma.state); 2.1826 ++ 2.1827 ++ s->lzma.rep3 = s->lzma.rep2; 2.1828 ++ s->lzma.rep2 = s->lzma.rep1; 2.1829 ++ s->lzma.rep1 = s->lzma.rep0; 2.1830 ++ 2.1831 ++ lzma_len(s, &s->lzma.match_len_dec, pos_state); 2.1832 ++ 2.1833 ++ probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)]; 2.1834 ++ dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS; 2.1835 ++ 2.1836 ++ if (dist_slot < DIST_MODEL_START) { 2.1837 ++ s->lzma.rep0 = dist_slot; 2.1838 ++ } else { 2.1839 ++ limit = (dist_slot >> 1) - 1; 2.1840 ++ s->lzma.rep0 = 2 + (dist_slot & 1); 2.1841 ++ 2.1842 ++ if (dist_slot < DIST_MODEL_END) { 2.1843 ++ s->lzma.rep0 <<= limit; 2.1844 ++ probs = s->lzma.dist_special + s->lzma.rep0 2.1845 ++ - dist_slot - 1; 2.1846 ++ rc_bittree_reverse(&s->rc, probs, 2.1847 ++ &s->lzma.rep0, limit); 2.1848 ++ } else { 2.1849 ++ rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS); 2.1850 ++ s->lzma.rep0 <<= ALIGN_BITS; 2.1851 ++ rc_bittree_reverse(&s->rc, s->lzma.dist_align, 2.1852 ++ &s->lzma.rep0, ALIGN_BITS); 2.1853 ++ } 2.1854 ++ } 2.1855 ++} 2.1856 ++ 2.1857 ++/* 2.1858 ++ * Decode a repeated match. The distance is one of the four most recently 2.1859 ++ * seen matches. The distance will be stored in s->lzma.rep0. 2.1860 ++ */ 2.1861 ++static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state) 2.1862 ++{ 2.1863 ++ uint32_t tmp; 2.1864 ++ 2.1865 ++ if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) { 2.1866 ++ if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[ 2.1867 ++ s->lzma.state][pos_state])) { 2.1868 ++ lzma_state_short_rep(&s->lzma.state); 2.1869 ++ s->lzma.len = 1; 2.1870 ++ return; 2.1871 ++ } 2.1872 ++ } else { 2.1873 ++ if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) { 2.1874 ++ tmp = s->lzma.rep1; 2.1875 ++ } else { 2.1876 ++ if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) { 2.1877 ++ tmp = s->lzma.rep2; 2.1878 ++ } else { 2.1879 ++ tmp = s->lzma.rep3; 2.1880 ++ s->lzma.rep3 = s->lzma.rep2; 2.1881 ++ } 2.1882 ++ 2.1883 ++ s->lzma.rep2 = s->lzma.rep1; 2.1884 ++ } 2.1885 ++ 2.1886 ++ s->lzma.rep1 = s->lzma.rep0; 2.1887 ++ s->lzma.rep0 = tmp; 2.1888 ++ } 2.1889 ++ 2.1890 ++ lzma_state_long_rep(&s->lzma.state); 2.1891 ++ lzma_len(s, &s->lzma.rep_len_dec, pos_state); 2.1892 ++} 2.1893 ++ 2.1894 ++/* LZMA decoder core */ 2.1895 ++static bool lzma_main(struct xz_dec_lzma2 *s) 2.1896 ++{ 2.1897 ++ uint32_t pos_state; 2.1898 ++ 2.1899 ++ /* 2.1900 ++ * If the dictionary was reached during the previous call, try to 2.1901 ++ * finish the possibly pending repeat in the dictionary. 2.1902 ++ */ 2.1903 ++ if (dict_has_space(&s->dict) && s->lzma.len > 0) 2.1904 ++ dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0); 2.1905 ++ 2.1906 ++ /* 2.1907 ++ * Decode more LZMA symbols. One iteration may consume up to 2.1908 ++ * LZMA_IN_REQUIRED - 1 bytes. 2.1909 ++ */ 2.1910 ++ while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) { 2.1911 ++ pos_state = s->dict.pos & s->lzma.pos_mask; 2.1912 ++ 2.1913 ++ if (!rc_bit(&s->rc, &s->lzma.is_match[ 2.1914 ++ s->lzma.state][pos_state])) { 2.1915 ++ lzma_literal(s); 2.1916 ++ } else { 2.1917 ++ if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state])) 2.1918 ++ lzma_rep_match(s, pos_state); 2.1919 ++ else 2.1920 ++ lzma_match(s, pos_state); 2.1921 ++ 2.1922 ++ if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0)) 2.1923 ++ return false; 2.1924 ++ } 2.1925 ++ } 2.1926 ++ 2.1927 ++ /* 2.1928 ++ * Having the range decoder always normalized when we are outside 2.1929 ++ * this function makes it easier to correctly handle end of the chunk. 2.1930 ++ */ 2.1931 ++ rc_normalize(&s->rc); 2.1932 ++ 2.1933 ++ return true; 2.1934 ++} 2.1935 ++ 2.1936 ++/* 2.1937 ++ * Reset the LZMA decoder and range decoder state. Dictionary is nore reset 2.1938 ++ * here, because LZMA state may be reset without resetting the dictionary. 2.1939 ++ */ 2.1940 ++static void lzma_reset(struct xz_dec_lzma2 *s) 2.1941 ++{ 2.1942 ++ uint16_t *probs; 2.1943 ++ size_t i; 2.1944 ++ 2.1945 ++ s->lzma.state = STATE_LIT_LIT; 2.1946 ++ s->lzma.rep0 = 0; 2.1947 ++ s->lzma.rep1 = 0; 2.1948 ++ s->lzma.rep2 = 0; 2.1949 ++ s->lzma.rep3 = 0; 2.1950 ++ 2.1951 ++ /* 2.1952 ++ * All probabilities are initialized to the same value. This hack 2.1953 ++ * makes the code smaller by avoiding a separate loop for each 2.1954 ++ * probability array. 2.1955 ++ * 2.1956 ++ * This could be optimized so that only that part of literal 2.1957 ++ * probabilities that are actually required. In the common case 2.1958 ++ * we would write 12 KiB less. 2.1959 ++ */ 2.1960 ++ probs = s->lzma.is_match[0]; 2.1961 ++ for (i = 0; i < PROBS_TOTAL; ++i) 2.1962 ++ probs[i] = RC_BIT_MODEL_TOTAL / 2; 2.1963 ++ 2.1964 ++ rc_reset(&s->rc); 2.1965 ++} 2.1966 ++ 2.1967 ++/* 2.1968 ++ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks 2.1969 ++ * from the decoded lp and pb values. On success, the LZMA decoder state is 2.1970 ++ * reset and true is returned. 2.1971 ++ */ 2.1972 ++static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props) 2.1973 ++{ 2.1974 ++ if (props > (4 * 5 + 4) * 9 + 8) 2.1975 ++ return false; 2.1976 ++ 2.1977 ++ s->lzma.pos_mask = 0; 2.1978 ++ while (props >= 9 * 5) { 2.1979 ++ props -= 9 * 5; 2.1980 ++ ++s->lzma.pos_mask; 2.1981 ++ } 2.1982 ++ 2.1983 ++ s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1; 2.1984 ++ 2.1985 ++ s->lzma.literal_pos_mask = 0; 2.1986 ++ while (props >= 9) { 2.1987 ++ props -= 9; 2.1988 ++ ++s->lzma.literal_pos_mask; 2.1989 ++ } 2.1990 ++ 2.1991 ++ s->lzma.lc = props; 2.1992 ++ 2.1993 ++ if (s->lzma.lc + s->lzma.literal_pos_mask > 4) 2.1994 ++ return false; 2.1995 ++ 2.1996 ++ s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1; 2.1997 ++ 2.1998 ++ lzma_reset(s); 2.1999 ++ 2.2000 ++ return true; 2.2001 ++} 2.2002 ++ 2.2003 ++/********* 2.2004 ++ * LZMA2 * 2.2005 ++ *********/ 2.2006 ++ 2.2007 ++/* 2.2008 ++ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't 2.2009 ++ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This 2.2010 ++ * wrapper function takes care of making the LZMA decoder's assumption safe. 2.2011 ++ * 2.2012 ++ * As long as there is plenty of input left to be decoded in the current LZMA 2.2013 ++ * chunk, we decode directly from the caller-supplied input buffer until 2.2014 ++ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into 2.2015 ++ * s->temp.buf, which (hopefully) gets filled on the next call to this 2.2016 ++ * function. We decode a few bytes from the temporary buffer so that we can 2.2017 ++ * continue decoding from the caller-supplied input buffer again. 2.2018 ++ */ 2.2019 ++static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b) 2.2020 ++{ 2.2021 ++ size_t in_avail; 2.2022 ++ uint32_t tmp; 2.2023 ++ 2.2024 ++ in_avail = b->in_size - b->in_pos; 2.2025 ++ if (s->temp.size > 0 || s->lzma2.compressed == 0) { 2.2026 ++ tmp = 2 * LZMA_IN_REQUIRED - s->temp.size; 2.2027 ++ if (tmp > s->lzma2.compressed - s->temp.size) 2.2028 ++ tmp = s->lzma2.compressed - s->temp.size; 2.2029 ++ if (tmp > in_avail) 2.2030 ++ tmp = in_avail; 2.2031 ++ 2.2032 ++ memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp); 2.2033 ++ 2.2034 ++ if (s->temp.size + tmp == s->lzma2.compressed) { 2.2035 ++ memzero(s->temp.buf + s->temp.size + tmp, 2.2036 ++ sizeof(s->temp.buf) 2.2037 ++ - s->temp.size - tmp); 2.2038 ++ s->rc.in_limit = s->temp.size + tmp; 2.2039 ++ } else if (s->temp.size + tmp < LZMA_IN_REQUIRED) { 2.2040 ++ s->temp.size += tmp; 2.2041 ++ b->in_pos += tmp; 2.2042 ++ return true; 2.2043 ++ } else { 2.2044 ++ s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED; 2.2045 ++ } 2.2046 ++ 2.2047 ++ s->rc.in = s->temp.buf; 2.2048 ++ s->rc.in_pos = 0; 2.2049 ++ 2.2050 ++ if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp) 2.2051 ++ return false; 2.2052 ++ 2.2053 ++ s->lzma2.compressed -= s->rc.in_pos; 2.2054 ++ 2.2055 ++ if (s->rc.in_pos < s->temp.size) { 2.2056 ++ s->temp.size -= s->rc.in_pos; 2.2057 ++ memmove(s->temp.buf, s->temp.buf + s->rc.in_pos, 2.2058 ++ s->temp.size); 2.2059 ++ return true; 2.2060 ++ } 2.2061 ++ 2.2062 ++ b->in_pos += s->rc.in_pos - s->temp.size; 2.2063 ++ s->temp.size = 0; 2.2064 ++ } 2.2065 ++ 2.2066 ++ in_avail = b->in_size - b->in_pos; 2.2067 ++ if (in_avail >= LZMA_IN_REQUIRED) { 2.2068 ++ s->rc.in = b->in; 2.2069 ++ s->rc.in_pos = b->in_pos; 2.2070 ++ 2.2071 ++ if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED) 2.2072 ++ s->rc.in_limit = b->in_pos + s->lzma2.compressed; 2.2073 ++ else 2.2074 ++ s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED; 2.2075 ++ 2.2076 ++ if (!lzma_main(s)) 2.2077 ++ return false; 2.2078 ++ 2.2079 ++ in_avail = s->rc.in_pos - b->in_pos; 2.2080 ++ if (in_avail > s->lzma2.compressed) 2.2081 ++ return false; 2.2082 ++ 2.2083 ++ s->lzma2.compressed -= in_avail; 2.2084 ++ b->in_pos = s->rc.in_pos; 2.2085 ++ } 2.2086 ++ 2.2087 ++ in_avail = b->in_size - b->in_pos; 2.2088 ++ if (in_avail < LZMA_IN_REQUIRED) { 2.2089 ++ if (in_avail > s->lzma2.compressed) 2.2090 ++ in_avail = s->lzma2.compressed; 2.2091 ++ 2.2092 ++ memcpy(s->temp.buf, b->in + b->in_pos, in_avail); 2.2093 ++ s->temp.size = in_avail; 2.2094 ++ b->in_pos += in_avail; 2.2095 ++ } 2.2096 ++ 2.2097 ++ return true; 2.2098 ++} 2.2099 ++ 2.2100 ++/* 2.2101 ++ * Take care of the LZMA2 control layer, and forward the job of actual LZMA 2.2102 ++ * decoding or copying of uncompressed chunks to other functions. 2.2103 ++ */ 2.2104 ++XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, 2.2105 ++ struct xz_buf *b) 2.2106 ++{ 2.2107 ++ uint32_t tmp; 2.2108 ++ 2.2109 ++ while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) { 2.2110 ++ switch (s->lzma2.sequence) { 2.2111 ++ case SEQ_CONTROL: 2.2112 ++ /* 2.2113 ++ * LZMA2 control byte 2.2114 ++ * 2.2115 ++ * Exact values: 2.2116 ++ * 0x00 End marker 2.2117 ++ * 0x01 Dictionary reset followed by 2.2118 ++ * an uncompressed chunk 2.2119 ++ * 0x02 Uncompressed chunk (no dictionary reset) 2.2120 ++ * 2.2121 ++ * Highest three bits (s->control & 0xE0): 2.2122 ++ * 0xE0 Dictionary reset, new properties and state 2.2123 ++ * reset, followed by LZMA compressed chunk 2.2124 ++ * 0xC0 New properties and state reset, followed 2.2125 ++ * by LZMA compressed chunk (no dictionary 2.2126 ++ * reset) 2.2127 ++ * 0xA0 State reset using old properties, 2.2128 ++ * followed by LZMA compressed chunk (no 2.2129 ++ * dictionary reset) 2.2130 ++ * 0x80 LZMA chunk (no dictionary or state reset) 2.2131 ++ * 2.2132 ++ * For LZMA compressed chunks, the lowest five bits 2.2133 ++ * (s->control & 1F) are the highest bits of the 2.2134 ++ * uncompressed size (bits 16-20). 2.2135 ++ * 2.2136 ++ * A new LZMA2 stream must begin with a dictionary 2.2137 ++ * reset. The first LZMA chunk must set new 2.2138 ++ * properties and reset the LZMA state. 2.2139 ++ * 2.2140 ++ * Values that don't match anything described above 2.2141 ++ * are invalid and we return XZ_DATA_ERROR. 2.2142 ++ */ 2.2143 ++ tmp = b->in[b->in_pos++]; 2.2144 ++ 2.2145 ++ if (tmp >= 0xE0 || tmp == 0x01) { 2.2146 ++ s->lzma2.need_props = true; 2.2147 ++ s->lzma2.need_dict_reset = false; 2.2148 ++ dict_reset(&s->dict, b); 2.2149 ++ } else if (s->lzma2.need_dict_reset) { 2.2150 ++ return XZ_DATA_ERROR; 2.2151 ++ } 2.2152 ++ 2.2153 ++ if (tmp >= 0x80) { 2.2154 ++ s->lzma2.uncompressed = (tmp & 0x1F) << 16; 2.2155 ++ s->lzma2.sequence = SEQ_UNCOMPRESSED_1; 2.2156 ++ 2.2157 ++ if (tmp >= 0xC0) { 2.2158 ++ /* 2.2159 ++ * When there are new properties, 2.2160 ++ * state reset is done at 2.2161 ++ * SEQ_PROPERTIES. 2.2162 ++ */ 2.2163 ++ s->lzma2.need_props = false; 2.2164 ++ s->lzma2.next_sequence 2.2165 ++ = SEQ_PROPERTIES; 2.2166 ++ 2.2167 ++ } else if (s->lzma2.need_props) { 2.2168 ++ return XZ_DATA_ERROR; 2.2169 ++ 2.2170 ++ } else { 2.2171 ++ s->lzma2.next_sequence 2.2172 ++ = SEQ_LZMA_PREPARE; 2.2173 ++ if (tmp >= 0xA0) 2.2174 ++ lzma_reset(s); 2.2175 ++ } 2.2176 ++ } else { 2.2177 ++ if (tmp == 0x00) 2.2178 ++ return XZ_STREAM_END; 2.2179 ++ 2.2180 ++ if (tmp > 0x02) 2.2181 ++ return XZ_DATA_ERROR; 2.2182 ++ 2.2183 ++ s->lzma2.sequence = SEQ_COMPRESSED_0; 2.2184 ++ s->lzma2.next_sequence = SEQ_COPY; 2.2185 ++ } 2.2186 ++ 2.2187 ++ break; 2.2188 ++ 2.2189 ++ case SEQ_UNCOMPRESSED_1: 2.2190 ++ s->lzma2.uncompressed 2.2191 ++ += (uint32_t)b->in[b->in_pos++] << 8; 2.2192 ++ s->lzma2.sequence = SEQ_UNCOMPRESSED_2; 2.2193 ++ break; 2.2194 ++ 2.2195 ++ case SEQ_UNCOMPRESSED_2: 2.2196 ++ s->lzma2.uncompressed 2.2197 ++ += (uint32_t)b->in[b->in_pos++] + 1; 2.2198 ++ s->lzma2.sequence = SEQ_COMPRESSED_0; 2.2199 ++ break; 2.2200 ++ 2.2201 ++ case SEQ_COMPRESSED_0: 2.2202 ++ s->lzma2.compressed 2.2203 ++ = (uint32_t)b->in[b->in_pos++] << 8; 2.2204 ++ s->lzma2.sequence = SEQ_COMPRESSED_1; 2.2205 ++ break; 2.2206 ++ 2.2207 ++ case SEQ_COMPRESSED_1: 2.2208 ++ s->lzma2.compressed 2.2209 ++ += (uint32_t)b->in[b->in_pos++] + 1; 2.2210 ++ s->lzma2.sequence = s->lzma2.next_sequence; 2.2211 ++ break; 2.2212 ++ 2.2213 ++ case SEQ_PROPERTIES: 2.2214 ++ if (!lzma_props(s, b->in[b->in_pos++])) 2.2215 ++ return XZ_DATA_ERROR; 2.2216 ++ 2.2217 ++ s->lzma2.sequence = SEQ_LZMA_PREPARE; 2.2218 ++ 2.2219 ++ case SEQ_LZMA_PREPARE: 2.2220 ++ if (s->lzma2.compressed < RC_INIT_BYTES) 2.2221 ++ return XZ_DATA_ERROR; 2.2222 ++ 2.2223 ++ if (!rc_read_init(&s->rc, b)) 2.2224 ++ return XZ_OK; 2.2225 ++ 2.2226 ++ s->lzma2.compressed -= RC_INIT_BYTES; 2.2227 ++ s->lzma2.sequence = SEQ_LZMA_RUN; 2.2228 ++ 2.2229 ++ case SEQ_LZMA_RUN: 2.2230 ++ /* 2.2231 ++ * Set dictionary limit to indicate how much we want 2.2232 ++ * to be encoded at maximum. Decode new data into the 2.2233 ++ * dictionary. Flush the new data from dictionary to 2.2234 ++ * b->out. Check if we finished decoding this chunk. 2.2235 ++ * In case the dictionary got full but we didn't fill 2.2236 ++ * the output buffer yet, we may run this loop 2.2237 ++ * multiple times without changing s->lzma2.sequence. 2.2238 ++ */ 2.2239 ++ dict_limit(&s->dict, min_t(size_t, 2.2240 ++ b->out_size - b->out_pos, 2.2241 ++ s->lzma2.uncompressed)); 2.2242 ++ if (!lzma2_lzma(s, b)) 2.2243 ++ return XZ_DATA_ERROR; 2.2244 ++ 2.2245 ++ s->lzma2.uncompressed -= dict_flush(&s->dict, b); 2.2246 ++ 2.2247 ++ if (s->lzma2.uncompressed == 0) { 2.2248 ++ if (s->lzma2.compressed > 0 || s->lzma.len > 0 2.2249 ++ || !rc_is_finished(&s->rc)) 2.2250 ++ return XZ_DATA_ERROR; 2.2251 ++ 2.2252 ++ rc_reset(&s->rc); 2.2253 ++ s->lzma2.sequence = SEQ_CONTROL; 2.2254 ++ 2.2255 ++ } else if (b->out_pos == b->out_size 2.2256 ++ || (b->in_pos == b->in_size 2.2257 ++ && s->temp.size 2.2258 ++ < s->lzma2.compressed)) { 2.2259 ++ return XZ_OK; 2.2260 ++ } 2.2261 ++ 2.2262 ++ break; 2.2263 ++ 2.2264 ++ case SEQ_COPY: 2.2265 ++ dict_uncompressed(&s->dict, b, &s->lzma2.compressed); 2.2266 ++ if (s->lzma2.compressed > 0) 2.2267 ++ return XZ_OK; 2.2268 ++ 2.2269 ++ s->lzma2.sequence = SEQ_CONTROL; 2.2270 ++ break; 2.2271 ++ } 2.2272 ++ } 2.2273 ++ 2.2274 ++ return XZ_OK; 2.2275 ++} 2.2276 ++ 2.2277 ++XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, 2.2278 ++ uint32_t dict_max) 2.2279 ++{ 2.2280 ++ struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL); 2.2281 ++ if (s == NULL) 2.2282 ++ return NULL; 2.2283 ++ 2.2284 ++ s->dict.mode = mode; 2.2285 ++ s->dict.size_max = dict_max; 2.2286 ++ 2.2287 ++ if (DEC_IS_PREALLOC(mode)) { 2.2288 ++ s->dict.buf = vmalloc(dict_max); 2.2289 ++ if (s->dict.buf == NULL) { 2.2290 ++ kfree(s); 2.2291 ++ return NULL; 2.2292 ++ } 2.2293 ++ } else if (DEC_IS_DYNALLOC(mode)) { 2.2294 ++ s->dict.buf = NULL; 2.2295 ++ s->dict.allocated = 0; 2.2296 ++ } 2.2297 ++ 2.2298 ++ return s; 2.2299 ++} 2.2300 ++ 2.2301 ++XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props) 2.2302 ++{ 2.2303 ++ /* This limits dictionary size to 3 GiB to keep parsing simpler. */ 2.2304 ++ if (props > 39) 2.2305 ++ return XZ_OPTIONS_ERROR; 2.2306 ++ 2.2307 ++ s->dict.size = 2 + (props & 1); 2.2308 ++ s->dict.size <<= (props >> 1) + 11; 2.2309 ++ 2.2310 ++ if (DEC_IS_MULTI(s->dict.mode)) { 2.2311 ++ if (s->dict.size > s->dict.size_max) 2.2312 ++ return XZ_MEMLIMIT_ERROR; 2.2313 ++ 2.2314 ++ s->dict.end = s->dict.size; 2.2315 ++ 2.2316 ++ if (DEC_IS_DYNALLOC(s->dict.mode)) { 2.2317 ++ if (s->dict.allocated < s->dict.size) { 2.2318 ++ vfree(s->dict.buf); 2.2319 ++ s->dict.buf = vmalloc(s->dict.size); 2.2320 ++ if (s->dict.buf == NULL) { 2.2321 ++ s->dict.allocated = 0; 2.2322 ++ return XZ_MEM_ERROR; 2.2323 ++ } 2.2324 ++ } 2.2325 ++ } 2.2326 ++ } 2.2327 ++ 2.2328 ++ s->lzma.len = 0; 2.2329 ++ 2.2330 ++ s->lzma2.sequence = SEQ_CONTROL; 2.2331 ++ s->lzma2.need_dict_reset = true; 2.2332 ++ 2.2333 ++ s->temp.size = 0; 2.2334 ++ 2.2335 ++ return XZ_OK; 2.2336 ++} 2.2337 ++ 2.2338 ++XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s) 2.2339 ++{ 2.2340 ++ if (DEC_IS_MULTI(s->dict.mode)) 2.2341 ++ vfree(s->dict.buf); 2.2342 ++ 2.2343 ++ kfree(s); 2.2344 ++} 2.2345 +diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c 2.2346 +new file mode 100644 2.2347 +index 0000000..ac809b1 2.2348 +--- /dev/null 2.2349 ++++ b/lib/xz/xz_dec_stream.c 2.2350 +@@ -0,0 +1,821 @@ 2.2351 ++/* 2.2352 ++ * .xz Stream decoder 2.2353 ++ * 2.2354 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 2.2355 ++ * 2.2356 ++ * This file has been put into the public domain. 2.2357 ++ * You can do whatever you want with this file. 2.2358 ++ */ 2.2359 ++ 2.2360 ++#include "xz_private.h" 2.2361 ++#include "xz_stream.h" 2.2362 ++ 2.2363 ++/* Hash used to validate the Index field */ 2.2364 ++struct xz_dec_hash { 2.2365 ++ vli_type unpadded; 2.2366 ++ vli_type uncompressed; 2.2367 ++ uint32_t crc32; 2.2368 ++}; 2.2369 ++ 2.2370 ++struct xz_dec { 2.2371 ++ /* Position in dec_main() */ 2.2372 ++ enum { 2.2373 ++ SEQ_STREAM_HEADER, 2.2374 ++ SEQ_BLOCK_START, 2.2375 ++ SEQ_BLOCK_HEADER, 2.2376 ++ SEQ_BLOCK_UNCOMPRESS, 2.2377 ++ SEQ_BLOCK_PADDING, 2.2378 ++ SEQ_BLOCK_CHECK, 2.2379 ++ SEQ_INDEX, 2.2380 ++ SEQ_INDEX_PADDING, 2.2381 ++ SEQ_INDEX_CRC32, 2.2382 ++ SEQ_STREAM_FOOTER 2.2383 ++ } sequence; 2.2384 ++ 2.2385 ++ /* Position in variable-length integers and Check fields */ 2.2386 ++ uint32_t pos; 2.2387 ++ 2.2388 ++ /* Variable-length integer decoded by dec_vli() */ 2.2389 ++ vli_type vli; 2.2390 ++ 2.2391 ++ /* Saved in_pos and out_pos */ 2.2392 ++ size_t in_start; 2.2393 ++ size_t out_start; 2.2394 ++ 2.2395 ++ /* CRC32 value in Block or Index */ 2.2396 ++ uint32_t crc32; 2.2397 ++ 2.2398 ++ /* Type of the integrity check calculated from uncompressed data */ 2.2399 ++ enum xz_check check_type; 2.2400 ++ 2.2401 ++ /* Operation mode */ 2.2402 ++ enum xz_mode mode; 2.2403 ++ 2.2404 ++ /* 2.2405 ++ * True if the next call to xz_dec_run() is allowed to return 2.2406 ++ * XZ_BUF_ERROR. 2.2407 ++ */ 2.2408 ++ bool allow_buf_error; 2.2409 ++ 2.2410 ++ /* Information stored in Block Header */ 2.2411 ++ struct { 2.2412 ++ /* 2.2413 ++ * Value stored in the Compressed Size field, or 2.2414 ++ * VLI_UNKNOWN if Compressed Size is not present. 2.2415 ++ */ 2.2416 ++ vli_type compressed; 2.2417 ++ 2.2418 ++ /* 2.2419 ++ * Value stored in the Uncompressed Size field, or 2.2420 ++ * VLI_UNKNOWN if Uncompressed Size is not present. 2.2421 ++ */ 2.2422 ++ vli_type uncompressed; 2.2423 ++ 2.2424 ++ /* Size of the Block Header field */ 2.2425 ++ uint32_t size; 2.2426 ++ } block_header; 2.2427 ++ 2.2428 ++ /* Information collected when decoding Blocks */ 2.2429 ++ struct { 2.2430 ++ /* Observed compressed size of the current Block */ 2.2431 ++ vli_type compressed; 2.2432 ++ 2.2433 ++ /* Observed uncompressed size of the current Block */ 2.2434 ++ vli_type uncompressed; 2.2435 ++ 2.2436 ++ /* Number of Blocks decoded so far */ 2.2437 ++ vli_type count; 2.2438 ++ 2.2439 ++ /* 2.2440 ++ * Hash calculated from the Block sizes. This is used to 2.2441 ++ * validate the Index field. 2.2442 ++ */ 2.2443 ++ struct xz_dec_hash hash; 2.2444 ++ } block; 2.2445 ++ 2.2446 ++ /* Variables needed when verifying the Index field */ 2.2447 ++ struct { 2.2448 ++ /* Position in dec_index() */ 2.2449 ++ enum { 2.2450 ++ SEQ_INDEX_COUNT, 2.2451 ++ SEQ_INDEX_UNPADDED, 2.2452 ++ SEQ_INDEX_UNCOMPRESSED 2.2453 ++ } sequence; 2.2454 ++ 2.2455 ++ /* Size of the Index in bytes */ 2.2456 ++ vli_type size; 2.2457 ++ 2.2458 ++ /* Number of Records (matches block.count in valid files) */ 2.2459 ++ vli_type count; 2.2460 ++ 2.2461 ++ /* 2.2462 ++ * Hash calculated from the Records (matches block.hash in 2.2463 ++ * valid files). 2.2464 ++ */ 2.2465 ++ struct xz_dec_hash hash; 2.2466 ++ } index; 2.2467 ++ 2.2468 ++ /* 2.2469 ++ * Temporary buffer needed to hold Stream Header, Block Header, 2.2470 ++ * and Stream Footer. The Block Header is the biggest (1 KiB) 2.2471 ++ * so we reserve space according to that. buf[] has to be aligned 2.2472 ++ * to a multiple of four bytes; the size_t variables before it 2.2473 ++ * should guarantee this. 2.2474 ++ */ 2.2475 ++ struct { 2.2476 ++ size_t pos; 2.2477 ++ size_t size; 2.2478 ++ uint8_t buf[1024]; 2.2479 ++ } temp; 2.2480 ++ 2.2481 ++ struct xz_dec_lzma2 *lzma2; 2.2482 ++ 2.2483 ++#ifdef XZ_DEC_BCJ 2.2484 ++ struct xz_dec_bcj *bcj; 2.2485 ++ bool bcj_active; 2.2486 ++#endif 2.2487 ++}; 2.2488 ++ 2.2489 ++#ifdef XZ_DEC_ANY_CHECK 2.2490 ++/* Sizes of the Check field with different Check IDs */ 2.2491 ++static const uint8_t check_sizes[16] = { 2.2492 ++ 0, 2.2493 ++ 4, 4, 4, 2.2494 ++ 8, 8, 8, 2.2495 ++ 16, 16, 16, 2.2496 ++ 32, 32, 32, 2.2497 ++ 64, 64, 64 2.2498 ++}; 2.2499 ++#endif 2.2500 ++ 2.2501 ++/* 2.2502 ++ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller 2.2503 ++ * must have set s->temp.pos to indicate how much data we are supposed 2.2504 ++ * to copy into s->temp.buf. Return true once s->temp.pos has reached 2.2505 ++ * s->temp.size. 2.2506 ++ */ 2.2507 ++static bool fill_temp(struct xz_dec *s, struct xz_buf *b) 2.2508 ++{ 2.2509 ++ size_t copy_size = min_t(size_t, 2.2510 ++ b->in_size - b->in_pos, s->temp.size - s->temp.pos); 2.2511 ++ 2.2512 ++ memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size); 2.2513 ++ b->in_pos += copy_size; 2.2514 ++ s->temp.pos += copy_size; 2.2515 ++ 2.2516 ++ if (s->temp.pos == s->temp.size) { 2.2517 ++ s->temp.pos = 0; 2.2518 ++ return true; 2.2519 ++ } 2.2520 ++ 2.2521 ++ return false; 2.2522 ++} 2.2523 ++ 2.2524 ++/* Decode a variable-length integer (little-endian base-128 encoding) */ 2.2525 ++static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in, 2.2526 ++ size_t *in_pos, size_t in_size) 2.2527 ++{ 2.2528 ++ uint8_t byte; 2.2529 ++ 2.2530 ++ if (s->pos == 0) 2.2531 ++ s->vli = 0; 2.2532 ++ 2.2533 ++ while (*in_pos < in_size) { 2.2534 ++ byte = in[*in_pos]; 2.2535 ++ ++*in_pos; 2.2536 ++ 2.2537 ++ s->vli |= (vli_type)(byte & 0x7F) << s->pos; 2.2538 ++ 2.2539 ++ if ((byte & 0x80) == 0) { 2.2540 ++ /* Don't allow non-minimal encodings. */ 2.2541 ++ if (byte == 0 && s->pos != 0) 2.2542 ++ return XZ_DATA_ERROR; 2.2543 ++ 2.2544 ++ s->pos = 0; 2.2545 ++ return XZ_STREAM_END; 2.2546 ++ } 2.2547 ++ 2.2548 ++ s->pos += 7; 2.2549 ++ if (s->pos == 7 * VLI_BYTES_MAX) 2.2550 ++ return XZ_DATA_ERROR; 2.2551 ++ } 2.2552 ++ 2.2553 ++ return XZ_OK; 2.2554 ++} 2.2555 ++ 2.2556 ++/* 2.2557 ++ * Decode the Compressed Data field from a Block. Update and validate 2.2558 ++ * the observed compressed and uncompressed sizes of the Block so that 2.2559 ++ * they don't exceed the values possibly stored in the Block Header 2.2560 ++ * (validation assumes that no integer overflow occurs, since vli_type 2.2561 ++ * is normally uint64_t). Update the CRC32 if presence of the CRC32 2.2562 ++ * field was indicated in Stream Header. 2.2563 ++ * 2.2564 ++ * Once the decoding is finished, validate that the observed sizes match 2.2565 ++ * the sizes possibly stored in the Block Header. Update the hash and 2.2566 ++ * Block count, which are later used to validate the Index field. 2.2567 ++ */ 2.2568 ++static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b) 2.2569 ++{ 2.2570 ++ enum xz_ret ret; 2.2571 ++ 2.2572 ++ s->in_start = b->in_pos; 2.2573 ++ s->out_start = b->out_pos; 2.2574 ++ 2.2575 ++#ifdef XZ_DEC_BCJ 2.2576 ++ if (s->bcj_active) 2.2577 ++ ret = xz_dec_bcj_run(s->bcj, s->lzma2, b); 2.2578 ++ else 2.2579 ++#endif 2.2580 ++ ret = xz_dec_lzma2_run(s->lzma2, b); 2.2581 ++ 2.2582 ++ s->block.compressed += b->in_pos - s->in_start; 2.2583 ++ s->block.uncompressed += b->out_pos - s->out_start; 2.2584 ++ 2.2585 ++ /* 2.2586 ++ * There is no need to separately check for VLI_UNKNOWN, since 2.2587 ++ * the observed sizes are always smaller than VLI_UNKNOWN. 2.2588 ++ */ 2.2589 ++ if (s->block.compressed > s->block_header.compressed 2.2590 ++ || s->block.uncompressed 2.2591 ++ > s->block_header.uncompressed) 2.2592 ++ return XZ_DATA_ERROR; 2.2593 ++ 2.2594 ++ if (s->check_type == XZ_CHECK_CRC32) 2.2595 ++ s->crc32 = xz_crc32(b->out + s->out_start, 2.2596 ++ b->out_pos - s->out_start, s->crc32); 2.2597 ++ 2.2598 ++ if (ret == XZ_STREAM_END) { 2.2599 ++ if (s->block_header.compressed != VLI_UNKNOWN 2.2600 ++ && s->block_header.compressed 2.2601 ++ != s->block.compressed) 2.2602 ++ return XZ_DATA_ERROR; 2.2603 ++ 2.2604 ++ if (s->block_header.uncompressed != VLI_UNKNOWN 2.2605 ++ && s->block_header.uncompressed 2.2606 ++ != s->block.uncompressed) 2.2607 ++ return XZ_DATA_ERROR; 2.2608 ++ 2.2609 ++ s->block.hash.unpadded += s->block_header.size 2.2610 ++ + s->block.compressed; 2.2611 ++ 2.2612 ++#ifdef XZ_DEC_ANY_CHECK 2.2613 ++ s->block.hash.unpadded += check_sizes[s->check_type]; 2.2614 ++#else 2.2615 ++ if (s->check_type == XZ_CHECK_CRC32) 2.2616 ++ s->block.hash.unpadded += 4; 2.2617 ++#endif 2.2618 ++ 2.2619 ++ s->block.hash.uncompressed += s->block.uncompressed; 2.2620 ++ s->block.hash.crc32 = xz_crc32( 2.2621 ++ (const uint8_t *)&s->block.hash, 2.2622 ++ sizeof(s->block.hash), s->block.hash.crc32); 2.2623 ++ 2.2624 ++ ++s->block.count; 2.2625 ++ } 2.2626 ++ 2.2627 ++ return ret; 2.2628 ++} 2.2629 ++ 2.2630 ++/* Update the Index size and the CRC32 value. */ 2.2631 ++static void index_update(struct xz_dec *s, const struct xz_buf *b) 2.2632 ++{ 2.2633 ++ size_t in_used = b->in_pos - s->in_start; 2.2634 ++ s->index.size += in_used; 2.2635 ++ s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32); 2.2636 ++} 2.2637 ++ 2.2638 ++/* 2.2639 ++ * Decode the Number of Records, Unpadded Size, and Uncompressed Size 2.2640 ++ * fields from the Index field. That is, Index Padding and CRC32 are not 2.2641 ++ * decoded by this function. 2.2642 ++ * 2.2643 ++ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything 2.2644 ++ * successfully decoded), or XZ_DATA_ERROR (input is corrupt). 2.2645 ++ */ 2.2646 ++static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b) 2.2647 ++{ 2.2648 ++ enum xz_ret ret; 2.2649 ++ 2.2650 ++ do { 2.2651 ++ ret = dec_vli(s, b->in, &b->in_pos, b->in_size); 2.2652 ++ if (ret != XZ_STREAM_END) { 2.2653 ++ index_update(s, b); 2.2654 ++ return ret; 2.2655 ++ } 2.2656 ++ 2.2657 ++ switch (s->index.sequence) { 2.2658 ++ case SEQ_INDEX_COUNT: 2.2659 ++ s->index.count = s->vli; 2.2660 ++ 2.2661 ++ /* 2.2662 ++ * Validate that the Number of Records field 2.2663 ++ * indicates the same number of Records as 2.2664 ++ * there were Blocks in the Stream. 2.2665 ++ */ 2.2666 ++ if (s->index.count != s->block.count) 2.2667 ++ return XZ_DATA_ERROR; 2.2668 ++ 2.2669 ++ s->index.sequence = SEQ_INDEX_UNPADDED; 2.2670 ++ break; 2.2671 ++ 2.2672 ++ case SEQ_INDEX_UNPADDED: 2.2673 ++ s->index.hash.unpadded += s->vli; 2.2674 ++ s->index.sequence = SEQ_INDEX_UNCOMPRESSED; 2.2675 ++ break; 2.2676 ++ 2.2677 ++ case SEQ_INDEX_UNCOMPRESSED: 2.2678 ++ s->index.hash.uncompressed += s->vli; 2.2679 ++ s->index.hash.crc32 = xz_crc32( 2.2680 ++ (const uint8_t *)&s->index.hash, 2.2681 ++ sizeof(s->index.hash), 2.2682 ++ s->index.hash.crc32); 2.2683 ++ --s->index.count; 2.2684 ++ s->index.sequence = SEQ_INDEX_UNPADDED; 2.2685 ++ break; 2.2686 ++ } 2.2687 ++ } while (s->index.count > 0); 2.2688 ++ 2.2689 ++ return XZ_STREAM_END; 2.2690 ++} 2.2691 ++ 2.2692 ++/* 2.2693 ++ * Validate that the next four input bytes match the value of s->crc32. 2.2694 ++ * s->pos must be zero when starting to validate the first byte. 2.2695 ++ */ 2.2696 ++static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b) 2.2697 ++{ 2.2698 ++ do { 2.2699 ++ if (b->in_pos == b->in_size) 2.2700 ++ return XZ_OK; 2.2701 ++ 2.2702 ++ if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++]) 2.2703 ++ return XZ_DATA_ERROR; 2.2704 ++ 2.2705 ++ s->pos += 8; 2.2706 ++ 2.2707 ++ } while (s->pos < 32); 2.2708 ++ 2.2709 ++ s->crc32 = 0; 2.2710 ++ s->pos = 0; 2.2711 ++ 2.2712 ++ return XZ_STREAM_END; 2.2713 ++} 2.2714 ++ 2.2715 ++#ifdef XZ_DEC_ANY_CHECK 2.2716 ++/* 2.2717 ++ * Skip over the Check field when the Check ID is not supported. 2.2718 ++ * Returns true once the whole Check field has been skipped over. 2.2719 ++ */ 2.2720 ++static bool check_skip(struct xz_dec *s, struct xz_buf *b) 2.2721 ++{ 2.2722 ++ while (s->pos < check_sizes[s->check_type]) { 2.2723 ++ if (b->in_pos == b->in_size) 2.2724 ++ return false; 2.2725 ++ 2.2726 ++ ++b->in_pos; 2.2727 ++ ++s->pos; 2.2728 ++ } 2.2729 ++ 2.2730 ++ s->pos = 0; 2.2731 ++ 2.2732 ++ return true; 2.2733 ++} 2.2734 ++#endif 2.2735 ++ 2.2736 ++/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */ 2.2737 ++static enum xz_ret dec_stream_header(struct xz_dec *s) 2.2738 ++{ 2.2739 ++ if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE)) 2.2740 ++ return XZ_FORMAT_ERROR; 2.2741 ++ 2.2742 ++ if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0) 2.2743 ++ != get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2)) 2.2744 ++ return XZ_DATA_ERROR; 2.2745 ++ 2.2746 ++ if (s->temp.buf[HEADER_MAGIC_SIZE] != 0) 2.2747 ++ return XZ_OPTIONS_ERROR; 2.2748 ++ 2.2749 ++ /* 2.2750 ++ * Of integrity checks, we support only none (Check ID = 0) and 2.2751 ++ * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined, 2.2752 ++ * we will accept other check types too, but then the check won't 2.2753 ++ * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given. 2.2754 ++ */ 2.2755 ++ s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1]; 2.2756 ++ 2.2757 ++#ifdef XZ_DEC_ANY_CHECK 2.2758 ++ if (s->check_type > XZ_CHECK_MAX) 2.2759 ++ return XZ_OPTIONS_ERROR; 2.2760 ++ 2.2761 ++ if (s->check_type > XZ_CHECK_CRC32) 2.2762 ++ return XZ_UNSUPPORTED_CHECK; 2.2763 ++#else 2.2764 ++ if (s->check_type > XZ_CHECK_CRC32) 2.2765 ++ return XZ_OPTIONS_ERROR; 2.2766 ++#endif 2.2767 ++ 2.2768 ++ return XZ_OK; 2.2769 ++} 2.2770 ++ 2.2771 ++/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */ 2.2772 ++static enum xz_ret dec_stream_footer(struct xz_dec *s) 2.2773 ++{ 2.2774 ++ if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE)) 2.2775 ++ return XZ_DATA_ERROR; 2.2776 ++ 2.2777 ++ if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf)) 2.2778 ++ return XZ_DATA_ERROR; 2.2779 ++ 2.2780 ++ /* 2.2781 ++ * Validate Backward Size. Note that we never added the size of the 2.2782 ++ * Index CRC32 field to s->index.size, thus we use s->index.size / 4 2.2783 ++ * instead of s->index.size / 4 - 1. 2.2784 ++ */ 2.2785 ++ if ((s->index.size >> 2) != get_le32(s->temp.buf + 4)) 2.2786 ++ return XZ_DATA_ERROR; 2.2787 ++ 2.2788 ++ if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type) 2.2789 ++ return XZ_DATA_ERROR; 2.2790 ++ 2.2791 ++ /* 2.2792 ++ * Use XZ_STREAM_END instead of XZ_OK to be more convenient 2.2793 ++ * for the caller. 2.2794 ++ */ 2.2795 ++ return XZ_STREAM_END; 2.2796 ++} 2.2797 ++ 2.2798 ++/* Decode the Block Header and initialize the filter chain. */ 2.2799 ++static enum xz_ret dec_block_header(struct xz_dec *s) 2.2800 ++{ 2.2801 ++ enum xz_ret ret; 2.2802 ++ 2.2803 ++ /* 2.2804 ++ * Validate the CRC32. We know that the temp buffer is at least 2.2805 ++ * eight bytes so this is safe. 2.2806 ++ */ 2.2807 ++ s->temp.size -= 4; 2.2808 ++ if (xz_crc32(s->temp.buf, s->temp.size, 0) 2.2809 ++ != get_le32(s->temp.buf + s->temp.size)) 2.2810 ++ return XZ_DATA_ERROR; 2.2811 ++ 2.2812 ++ s->temp.pos = 2; 2.2813 ++ 2.2814 ++ /* 2.2815 ++ * Catch unsupported Block Flags. We support only one or two filters 2.2816 ++ * in the chain, so we catch that with the same test. 2.2817 ++ */ 2.2818 ++#ifdef XZ_DEC_BCJ 2.2819 ++ if (s->temp.buf[1] & 0x3E) 2.2820 ++#else 2.2821 ++ if (s->temp.buf[1] & 0x3F) 2.2822 ++#endif 2.2823 ++ return XZ_OPTIONS_ERROR; 2.2824 ++ 2.2825 ++ /* Compressed Size */ 2.2826 ++ if (s->temp.buf[1] & 0x40) { 2.2827 ++ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) 2.2828 ++ != XZ_STREAM_END) 2.2829 ++ return XZ_DATA_ERROR; 2.2830 ++ 2.2831 ++ s->block_header.compressed = s->vli; 2.2832 ++ } else { 2.2833 ++ s->block_header.compressed = VLI_UNKNOWN; 2.2834 ++ } 2.2835 ++ 2.2836 ++ /* Uncompressed Size */ 2.2837 ++ if (s->temp.buf[1] & 0x80) { 2.2838 ++ if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size) 2.2839 ++ != XZ_STREAM_END) 2.2840 ++ return XZ_DATA_ERROR; 2.2841 ++ 2.2842 ++ s->block_header.uncompressed = s->vli; 2.2843 ++ } else { 2.2844 ++ s->block_header.uncompressed = VLI_UNKNOWN; 2.2845 ++ } 2.2846 ++ 2.2847 ++#ifdef XZ_DEC_BCJ 2.2848 ++ /* If there are two filters, the first one must be a BCJ filter. */ 2.2849 ++ s->bcj_active = s->temp.buf[1] & 0x01; 2.2850 ++ if (s->bcj_active) { 2.2851 ++ if (s->temp.size - s->temp.pos < 2) 2.2852 ++ return XZ_OPTIONS_ERROR; 2.2853 ++ 2.2854 ++ ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]); 2.2855 ++ if (ret != XZ_OK) 2.2856 ++ return ret; 2.2857 ++ 2.2858 ++ /* 2.2859 ++ * We don't support custom start offset, 2.2860 ++ * so Size of Properties must be zero. 2.2861 ++ */ 2.2862 ++ if (s->temp.buf[s->temp.pos++] != 0x00) 2.2863 ++ return XZ_OPTIONS_ERROR; 2.2864 ++ } 2.2865 ++#endif 2.2866 ++ 2.2867 ++ /* Valid Filter Flags always take at least two bytes. */ 2.2868 ++ if (s->temp.size - s->temp.pos < 2) 2.2869 ++ return XZ_DATA_ERROR; 2.2870 ++ 2.2871 ++ /* Filter ID = LZMA2 */ 2.2872 ++ if (s->temp.buf[s->temp.pos++] != 0x21) 2.2873 ++ return XZ_OPTIONS_ERROR; 2.2874 ++ 2.2875 ++ /* Size of Properties = 1-byte Filter Properties */ 2.2876 ++ if (s->temp.buf[s->temp.pos++] != 0x01) 2.2877 ++ return XZ_OPTIONS_ERROR; 2.2878 ++ 2.2879 ++ /* Filter Properties contains LZMA2 dictionary size. */ 2.2880 ++ if (s->temp.size - s->temp.pos < 1) 2.2881 ++ return XZ_DATA_ERROR; 2.2882 ++ 2.2883 ++ ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]); 2.2884 ++ if (ret != XZ_OK) 2.2885 ++ return ret; 2.2886 ++ 2.2887 ++ /* The rest must be Header Padding. */ 2.2888 ++ while (s->temp.pos < s->temp.size) 2.2889 ++ if (s->temp.buf[s->temp.pos++] != 0x00) 2.2890 ++ return XZ_OPTIONS_ERROR; 2.2891 ++ 2.2892 ++ s->temp.pos = 0; 2.2893 ++ s->block.compressed = 0; 2.2894 ++ s->block.uncompressed = 0; 2.2895 ++ 2.2896 ++ return XZ_OK; 2.2897 ++} 2.2898 ++ 2.2899 ++static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) 2.2900 ++{ 2.2901 ++ enum xz_ret ret; 2.2902 ++ 2.2903 ++ /* 2.2904 ++ * Store the start position for the case when we are in the middle 2.2905 ++ * of the Index field. 2.2906 ++ */ 2.2907 ++ s->in_start = b->in_pos; 2.2908 ++ 2.2909 ++ while (true) { 2.2910 ++ switch (s->sequence) { 2.2911 ++ case SEQ_STREAM_HEADER: 2.2912 ++ /* 2.2913 ++ * Stream Header is copied to s->temp, and then 2.2914 ++ * decoded from there. This way if the caller 2.2915 ++ * gives us only little input at a time, we can 2.2916 ++ * still keep the Stream Header decoding code 2.2917 ++ * simple. Similar approach is used in many places 2.2918 ++ * in this file. 2.2919 ++ */ 2.2920 ++ if (!fill_temp(s, b)) 2.2921 ++ return XZ_OK; 2.2922 ++ 2.2923 ++ /* 2.2924 ++ * If dec_stream_header() returns 2.2925 ++ * XZ_UNSUPPORTED_CHECK, it is still possible 2.2926 ++ * to continue decoding if working in multi-call 2.2927 ++ * mode. Thus, update s->sequence before calling 2.2928 ++ * dec_stream_header(). 2.2929 ++ */ 2.2930 ++ s->sequence = SEQ_BLOCK_START; 2.2931 ++ 2.2932 ++ ret = dec_stream_header(s); 2.2933 ++ if (ret != XZ_OK) 2.2934 ++ return ret; 2.2935 ++ 2.2936 ++ case SEQ_BLOCK_START: 2.2937 ++ /* We need one byte of input to continue. */ 2.2938 ++ if (b->in_pos == b->in_size) 2.2939 ++ return XZ_OK; 2.2940 ++ 2.2941 ++ /* See if this is the beginning of the Index field. */ 2.2942 ++ if (b->in[b->in_pos] == 0) { 2.2943 ++ s->in_start = b->in_pos++; 2.2944 ++ s->sequence = SEQ_INDEX; 2.2945 ++ break; 2.2946 ++ } 2.2947 ++ 2.2948 ++ /* 2.2949 ++ * Calculate the size of the Block Header and 2.2950 ++ * prepare to decode it. 2.2951 ++ */ 2.2952 ++ s->block_header.size 2.2953 ++ = ((uint32_t)b->in[b->in_pos] + 1) * 4; 2.2954 ++ 2.2955 ++ s->temp.size = s->block_header.size; 2.2956 ++ s->temp.pos = 0; 2.2957 ++ s->sequence = SEQ_BLOCK_HEADER; 2.2958 ++ 2.2959 ++ case SEQ_BLOCK_HEADER: 2.2960 ++ if (!fill_temp(s, b)) 2.2961 ++ return XZ_OK; 2.2962 ++ 2.2963 ++ ret = dec_block_header(s); 2.2964 ++ if (ret != XZ_OK) 2.2965 ++ return ret; 2.2966 ++ 2.2967 ++ s->sequence = SEQ_BLOCK_UNCOMPRESS; 2.2968 ++ 2.2969 ++ case SEQ_BLOCK_UNCOMPRESS: 2.2970 ++ ret = dec_block(s, b); 2.2971 ++ if (ret != XZ_STREAM_END) 2.2972 ++ return ret; 2.2973 ++ 2.2974 ++ s->sequence = SEQ_BLOCK_PADDING; 2.2975 ++ 2.2976 ++ case SEQ_BLOCK_PADDING: 2.2977 ++ /* 2.2978 ++ * Size of Compressed Data + Block Padding 2.2979 ++ * must be a multiple of four. We don't need 2.2980 ++ * s->block.compressed for anything else 2.2981 ++ * anymore, so we use it here to test the size 2.2982 ++ * of the Block Padding field. 2.2983 ++ */ 2.2984 ++ while (s->block.compressed & 3) { 2.2985 ++ if (b->in_pos == b->in_size) 2.2986 ++ return XZ_OK; 2.2987 ++ 2.2988 ++ if (b->in[b->in_pos++] != 0) 2.2989 ++ return XZ_DATA_ERROR; 2.2990 ++ 2.2991 ++ ++s->block.compressed; 2.2992 ++ } 2.2993 ++ 2.2994 ++ s->sequence = SEQ_BLOCK_CHECK; 2.2995 ++ 2.2996 ++ case SEQ_BLOCK_CHECK: 2.2997 ++ if (s->check_type == XZ_CHECK_CRC32) { 2.2998 ++ ret = crc32_validate(s, b); 2.2999 ++ if (ret != XZ_STREAM_END) 2.3000 ++ return ret; 2.3001 ++ } 2.3002 ++#ifdef XZ_DEC_ANY_CHECK 2.3003 ++ else if (!check_skip(s, b)) { 2.3004 ++ return XZ_OK; 2.3005 ++ } 2.3006 ++#endif 2.3007 ++ 2.3008 ++ s->sequence = SEQ_BLOCK_START; 2.3009 ++ break; 2.3010 ++ 2.3011 ++ case SEQ_INDEX: 2.3012 ++ ret = dec_index(s, b); 2.3013 ++ if (ret != XZ_STREAM_END) 2.3014 ++ return ret; 2.3015 ++ 2.3016 ++ s->sequence = SEQ_INDEX_PADDING; 2.3017 ++ 2.3018 ++ case SEQ_INDEX_PADDING: 2.3019 ++ while ((s->index.size + (b->in_pos - s->in_start)) 2.3020 ++ & 3) { 2.3021 ++ if (b->in_pos == b->in_size) { 2.3022 ++ index_update(s, b); 2.3023 ++ return XZ_OK; 2.3024 ++ } 2.3025 ++ 2.3026 ++ if (b->in[b->in_pos++] != 0) 2.3027 ++ return XZ_DATA_ERROR; 2.3028 ++ } 2.3029 ++ 2.3030 ++ /* Finish the CRC32 value and Index size. */ 2.3031 ++ index_update(s, b); 2.3032 ++ 2.3033 ++ /* Compare the hashes to validate the Index field. */ 2.3034 ++ if (!memeq(&s->block.hash, &s->index.hash, 2.3035 ++ sizeof(s->block.hash))) 2.3036 ++ return XZ_DATA_ERROR; 2.3037 ++ 2.3038 ++ s->sequence = SEQ_INDEX_CRC32; 2.3039 ++ 2.3040 ++ case SEQ_INDEX_CRC32: 2.3041 ++ ret = crc32_validate(s, b); 2.3042 ++ if (ret != XZ_STREAM_END) 2.3043 ++ return ret; 2.3044 ++ 2.3045 ++ s->temp.size = STREAM_HEADER_SIZE; 2.3046 ++ s->sequence = SEQ_STREAM_FOOTER; 2.3047 ++ 2.3048 ++ case SEQ_STREAM_FOOTER: 2.3049 ++ if (!fill_temp(s, b)) 2.3050 ++ return XZ_OK; 2.3051 ++ 2.3052 ++ return dec_stream_footer(s); 2.3053 ++ } 2.3054 ++ } 2.3055 ++ 2.3056 ++ /* Never reached */ 2.3057 ++} 2.3058 ++ 2.3059 ++/* 2.3060 ++ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in 2.3061 ++ * multi-call and single-call decoding. 2.3062 ++ * 2.3063 ++ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we 2.3064 ++ * are not going to make any progress anymore. This is to prevent the caller 2.3065 ++ * from calling us infinitely when the input file is truncated or otherwise 2.3066 ++ * corrupt. Since zlib-style API allows that the caller fills the input buffer 2.3067 ++ * only when the decoder doesn't produce any new output, we have to be careful 2.3068 ++ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only 2.3069 ++ * after the second consecutive call to xz_dec_run() that makes no progress. 2.3070 ++ * 2.3071 ++ * In single-call mode, if we couldn't decode everything and no error 2.3072 ++ * occurred, either the input is truncated or the output buffer is too small. 2.3073 ++ * Since we know that the last input byte never produces any output, we know 2.3074 ++ * that if all the input was consumed and decoding wasn't finished, the file 2.3075 ++ * must be corrupt. Otherwise the output buffer has to be too small or the 2.3076 ++ * file is corrupt in a way that decoding it produces too big output. 2.3077 ++ * 2.3078 ++ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to 2.3079 ++ * their original values. This is because with some filter chains there won't 2.3080 ++ * be any valid uncompressed data in the output buffer unless the decoding 2.3081 ++ * actually succeeds (that's the price to pay of using the output buffer as 2.3082 ++ * the workspace). 2.3083 ++ */ 2.3084 ++XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b) 2.3085 ++{ 2.3086 ++ size_t in_start; 2.3087 ++ size_t out_start; 2.3088 ++ enum xz_ret ret; 2.3089 ++ 2.3090 ++ if (DEC_IS_SINGLE(s->mode)) 2.3091 ++ xz_dec_reset(s); 2.3092 ++ 2.3093 ++ in_start = b->in_pos; 2.3094 ++ out_start = b->out_pos; 2.3095 ++ ret = dec_main(s, b); 2.3096 ++ 2.3097 ++ if (DEC_IS_SINGLE(s->mode)) { 2.3098 ++ if (ret == XZ_OK) 2.3099 ++ ret = b->in_pos == b->in_size 2.3100 ++ ? XZ_DATA_ERROR : XZ_BUF_ERROR; 2.3101 ++ 2.3102 ++ if (ret != XZ_STREAM_END) { 2.3103 ++ b->in_pos = in_start; 2.3104 ++ b->out_pos = out_start; 2.3105 ++ } 2.3106 ++ 2.3107 ++ } else if (ret == XZ_OK && in_start == b->in_pos 2.3108 ++ && out_start == b->out_pos) { 2.3109 ++ if (s->allow_buf_error) 2.3110 ++ ret = XZ_BUF_ERROR; 2.3111 ++ 2.3112 ++ s->allow_buf_error = true; 2.3113 ++ } else { 2.3114 ++ s->allow_buf_error = false; 2.3115 ++ } 2.3116 ++ 2.3117 ++ return ret; 2.3118 ++} 2.3119 ++ 2.3120 ++XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max) 2.3121 ++{ 2.3122 ++ struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL); 2.3123 ++ if (s == NULL) 2.3124 ++ return NULL; 2.3125 ++ 2.3126 ++ s->mode = mode; 2.3127 ++ 2.3128 ++#ifdef XZ_DEC_BCJ 2.3129 ++ s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode)); 2.3130 ++ if (s->bcj == NULL) 2.3131 ++ goto error_bcj; 2.3132 ++#endif 2.3133 ++ 2.3134 ++ s->lzma2 = xz_dec_lzma2_create(mode, dict_max); 2.3135 ++ if (s->lzma2 == NULL) 2.3136 ++ goto error_lzma2; 2.3137 ++ 2.3138 ++ xz_dec_reset(s); 2.3139 ++ return s; 2.3140 ++ 2.3141 ++error_lzma2: 2.3142 ++#ifdef XZ_DEC_BCJ 2.3143 ++ xz_dec_bcj_end(s->bcj); 2.3144 ++error_bcj: 2.3145 ++#endif 2.3146 ++ kfree(s); 2.3147 ++ return NULL; 2.3148 ++} 2.3149 ++ 2.3150 ++XZ_EXTERN void xz_dec_reset(struct xz_dec *s) 2.3151 ++{ 2.3152 ++ s->sequence = SEQ_STREAM_HEADER; 2.3153 ++ s->allow_buf_error = false; 2.3154 ++ s->pos = 0; 2.3155 ++ s->crc32 = 0; 2.3156 ++ memzero(&s->block, sizeof(s->block)); 2.3157 ++ memzero(&s->index, sizeof(s->index)); 2.3158 ++ s->temp.pos = 0; 2.3159 ++ s->temp.size = STREAM_HEADER_SIZE; 2.3160 ++} 2.3161 ++ 2.3162 ++XZ_EXTERN void xz_dec_end(struct xz_dec *s) 2.3163 ++{ 2.3164 ++ if (s != NULL) { 2.3165 ++ xz_dec_lzma2_end(s->lzma2); 2.3166 ++#ifdef XZ_DEC_BCJ 2.3167 ++ xz_dec_bcj_end(s->bcj); 2.3168 ++#endif 2.3169 ++ kfree(s); 2.3170 ++ } 2.3171 ++} 2.3172 +diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c 2.3173 +new file mode 100644 2.3174 +index 0000000..32eb3c0 2.3175 +--- /dev/null 2.3176 ++++ b/lib/xz/xz_dec_syms.c 2.3177 +@@ -0,0 +1,26 @@ 2.3178 ++/* 2.3179 ++ * XZ decoder module information 2.3180 ++ * 2.3181 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 2.3182 ++ * 2.3183 ++ * This file has been put into the public domain. 2.3184 ++ * You can do whatever you want with this file. 2.3185 ++ */ 2.3186 ++ 2.3187 ++#include <linux/module.h> 2.3188 ++#include <linux/xz.h> 2.3189 ++ 2.3190 ++EXPORT_SYMBOL(xz_dec_init); 2.3191 ++EXPORT_SYMBOL(xz_dec_reset); 2.3192 ++EXPORT_SYMBOL(xz_dec_run); 2.3193 ++EXPORT_SYMBOL(xz_dec_end); 2.3194 ++ 2.3195 ++MODULE_DESCRIPTION("XZ decompressor"); 2.3196 ++MODULE_VERSION("1.0"); 2.3197 ++MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov"); 2.3198 ++ 2.3199 ++/* 2.3200 ++ * This code is in the public domain, but in Linux it's simplest to just 2.3201 ++ * say it's GPL and consider the authors as the copyright holders. 2.3202 ++ */ 2.3203 ++MODULE_LICENSE("GPL"); 2.3204 +diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c 2.3205 +new file mode 100644 2.3206 +index 0000000..da28a19 2.3207 +--- /dev/null 2.3208 ++++ b/lib/xz/xz_dec_test.c 2.3209 +@@ -0,0 +1,220 @@ 2.3210 ++/* 2.3211 ++ * XZ decoder tester 2.3212 ++ * 2.3213 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 2.3214 ++ * 2.3215 ++ * This file has been put into the public domain. 2.3216 ++ * You can do whatever you want with this file. 2.3217 ++ */ 2.3218 ++ 2.3219 ++#include <linux/kernel.h> 2.3220 ++#include <linux/module.h> 2.3221 ++#include <linux/fs.h> 2.3222 ++#include <linux/uaccess.h> 2.3223 ++#include <linux/crc32.h> 2.3224 ++#include <linux/xz.h> 2.3225 ++ 2.3226 ++/* Maximum supported dictionary size */ 2.3227 ++#define DICT_MAX (1 << 20) 2.3228 ++ 2.3229 ++/* Device name to pass to register_chrdev(). */ 2.3230 ++#define DEVICE_NAME "xz_dec_test" 2.3231 ++ 2.3232 ++/* Dynamically allocated device major number */ 2.3233 ++static int device_major; 2.3234 ++ 2.3235 ++/* 2.3236 ++ * We reuse the same decoder state, and thus can decode only one 2.3237 ++ * file at a time. 2.3238 ++ */ 2.3239 ++static bool device_is_open; 2.3240 ++ 2.3241 ++/* XZ decoder state */ 2.3242 ++static struct xz_dec *state; 2.3243 ++ 2.3244 ++/* 2.3245 ++ * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after 2.3246 ++ * it has returned XZ_STREAM_END, so we make this static. 2.3247 ++ */ 2.3248 ++static enum xz_ret ret; 2.3249 ++ 2.3250 ++/* 2.3251 ++ * Input and output buffers. The input buffer is used as a temporary safe 2.3252 ++ * place for the data coming from the userspace. 2.3253 ++ */ 2.3254 ++static uint8_t buffer_in[1024]; 2.3255 ++static uint8_t buffer_out[1024]; 2.3256 ++ 2.3257 ++/* 2.3258 ++ * Structure to pass the input and output buffers to the XZ decoder. 2.3259 ++ * A few of the fields are never modified so we initialize them here. 2.3260 ++ */ 2.3261 ++static struct xz_buf buffers = { 2.3262 ++ .in = buffer_in, 2.3263 ++ .out = buffer_out, 2.3264 ++ .out_size = sizeof(buffer_out) 2.3265 ++}; 2.3266 ++ 2.3267 ++/* 2.3268 ++ * CRC32 of uncompressed data. This is used to give the user a simple way 2.3269 ++ * to check that the decoder produces correct output. 2.3270 ++ */ 2.3271 ++static uint32_t crc; 2.3272 ++ 2.3273 ++static int xz_dec_test_open(struct inode *i, struct file *f) 2.3274 ++{ 2.3275 ++ if (device_is_open) 2.3276 ++ return -EBUSY; 2.3277 ++ 2.3278 ++ device_is_open = true; 2.3279 ++ 2.3280 ++ xz_dec_reset(state); 2.3281 ++ ret = XZ_OK; 2.3282 ++ crc = 0xFFFFFFFF; 2.3283 ++ 2.3284 ++ buffers.in_pos = 0; 2.3285 ++ buffers.in_size = 0; 2.3286 ++ buffers.out_pos = 0; 2.3287 ++ 2.3288 ++ printk(KERN_INFO DEVICE_NAME ": opened\n"); 2.3289 ++ return 0; 2.3290 ++} 2.3291 ++ 2.3292 ++static int xz_dec_test_release(struct inode *i, struct file *f) 2.3293 ++{ 2.3294 ++ device_is_open = false; 2.3295 ++ 2.3296 ++ if (ret == XZ_OK) 2.3297 ++ printk(KERN_INFO DEVICE_NAME ": input was truncated\n"); 2.3298 ++ 2.3299 ++ printk(KERN_INFO DEVICE_NAME ": closed\n"); 2.3300 ++ return 0; 2.3301 ++} 2.3302 ++ 2.3303 ++/* 2.3304 ++ * Decode the data given to us from the userspace. CRC32 of the uncompressed 2.3305 ++ * data is calculated and is printed at the end of successful decoding. The 2.3306 ++ * uncompressed data isn't stored anywhere for further use. 2.3307 ++ * 2.3308 ++ * The .xz file must have exactly one Stream and no Stream Padding. The data 2.3309 ++ * after the first Stream is considered to be garbage. 2.3310 ++ */ 2.3311 ++static ssize_t xz_dec_test_write(struct file *file, const char __user *buf, 2.3312 ++ size_t size, loff_t *pos) 2.3313 ++{ 2.3314 ++ size_t remaining; 2.3315 ++ 2.3316 ++ if (ret != XZ_OK) { 2.3317 ++ if (size > 0) 2.3318 ++ printk(KERN_INFO DEVICE_NAME ": %zu bytes of " 2.3319 ++ "garbage at the end of the file\n", 2.3320 ++ size); 2.3321 ++ 2.3322 ++ return -ENOSPC; 2.3323 ++ } 2.3324 ++ 2.3325 ++ printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n", 2.3326 ++ size); 2.3327 ++ 2.3328 ++ remaining = size; 2.3329 ++ while ((remaining > 0 || buffers.out_pos == buffers.out_size) 2.3330 ++ && ret == XZ_OK) { 2.3331 ++ if (buffers.in_pos == buffers.in_size) { 2.3332 ++ buffers.in_pos = 0; 2.3333 ++ buffers.in_size = min(remaining, sizeof(buffer_in)); 2.3334 ++ if (copy_from_user(buffer_in, buf, buffers.in_size)) 2.3335 ++ return -EFAULT; 2.3336 ++ 2.3337 ++ buf += buffers.in_size; 2.3338 ++ remaining -= buffers.in_size; 2.3339 ++ } 2.3340 ++ 2.3341 ++ buffers.out_pos = 0; 2.3342 ++ ret = xz_dec_run(state, &buffers); 2.3343 ++ crc = crc32(crc, buffer_out, buffers.out_pos); 2.3344 ++ } 2.3345 ++ 2.3346 ++ switch (ret) { 2.3347 ++ case XZ_OK: 2.3348 ++ printk(KERN_INFO DEVICE_NAME ": XZ_OK\n"); 2.3349 ++ return size; 2.3350 ++ 2.3351 ++ case XZ_STREAM_END: 2.3352 ++ printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, " 2.3353 ++ "CRC32 = 0x%08X\n", ~crc); 2.3354 ++ return size - remaining - (buffers.in_size - buffers.in_pos); 2.3355 ++ 2.3356 ++ case XZ_MEMLIMIT_ERROR: 2.3357 ++ printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n"); 2.3358 ++ break; 2.3359 ++ 2.3360 ++ case XZ_FORMAT_ERROR: 2.3361 ++ printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n"); 2.3362 ++ break; 2.3363 ++ 2.3364 ++ case XZ_OPTIONS_ERROR: 2.3365 ++ printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n"); 2.3366 ++ break; 2.3367 ++ 2.3368 ++ case XZ_DATA_ERROR: 2.3369 ++ printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n"); 2.3370 ++ break; 2.3371 ++ 2.3372 ++ case XZ_BUF_ERROR: 2.3373 ++ printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n"); 2.3374 ++ break; 2.3375 ++ 2.3376 ++ default: 2.3377 ++ printk(KERN_INFO DEVICE_NAME ": Bug detected!\n"); 2.3378 ++ break; 2.3379 ++ } 2.3380 ++ 2.3381 ++ return -EIO; 2.3382 ++} 2.3383 ++ 2.3384 ++/* Allocate the XZ decoder state and register the character device. */ 2.3385 ++static int __init xz_dec_test_init(void) 2.3386 ++{ 2.3387 ++ static const struct file_operations fileops = { 2.3388 ++ .owner = THIS_MODULE, 2.3389 ++ .open = &xz_dec_test_open, 2.3390 ++ .release = &xz_dec_test_release, 2.3391 ++ .write = &xz_dec_test_write 2.3392 ++ }; 2.3393 ++ 2.3394 ++ state = xz_dec_init(XZ_PREALLOC, DICT_MAX); 2.3395 ++ if (state == NULL) 2.3396 ++ return -ENOMEM; 2.3397 ++ 2.3398 ++ device_major = register_chrdev(0, DEVICE_NAME, &fileops); 2.3399 ++ if (device_major < 0) { 2.3400 ++ xz_dec_end(state); 2.3401 ++ return device_major; 2.3402 ++ } 2.3403 ++ 2.3404 ++ printk(KERN_INFO DEVICE_NAME ": module loaded\n"); 2.3405 ++ printk(KERN_INFO DEVICE_NAME ": Create a device node with " 2.3406 ++ "'mknod " DEVICE_NAME " c %d 0' and write .xz files " 2.3407 ++ "to it.\n", device_major); 2.3408 ++ return 0; 2.3409 ++} 2.3410 ++ 2.3411 ++static void __exit xz_dec_test_exit(void) 2.3412 ++{ 2.3413 ++ unregister_chrdev(device_major, DEVICE_NAME); 2.3414 ++ xz_dec_end(state); 2.3415 ++ printk(KERN_INFO DEVICE_NAME ": module unloaded\n"); 2.3416 ++} 2.3417 ++ 2.3418 ++module_init(xz_dec_test_init); 2.3419 ++module_exit(xz_dec_test_exit); 2.3420 ++ 2.3421 ++MODULE_DESCRIPTION("XZ decompressor tester"); 2.3422 ++MODULE_VERSION("1.0"); 2.3423 ++MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>"); 2.3424 ++ 2.3425 ++/* 2.3426 ++ * This code is in the public domain, but in Linux it's simplest to just 2.3427 ++ * say it's GPL and consider the authors as the copyright holders. 2.3428 ++ */ 2.3429 ++MODULE_LICENSE("GPL"); 2.3430 +diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h 2.3431 +new file mode 100644 2.3432 +index 0000000..071d67b 2.3433 +--- /dev/null 2.3434 ++++ b/lib/xz/xz_lzma2.h 2.3435 +@@ -0,0 +1,204 @@ 2.3436 ++/* 2.3437 ++ * LZMA2 definitions 2.3438 ++ * 2.3439 ++ * Authors: Lasse Collin <lasse.collin@tukaani.org> 2.3440 ++ * Igor Pavlov <http://7-zip.org/> 2.3441 ++ * 2.3442 ++ * This file has been put into the public domain. 2.3443 ++ * You can do whatever you want with this file. 2.3444 ++ */ 2.3445 ++ 2.3446 ++#ifndef XZ_LZMA2_H 2.3447 ++#define XZ_LZMA2_H 2.3448 ++ 2.3449 ++/* Range coder constants */ 2.3450 ++#define RC_SHIFT_BITS 8 2.3451 ++#define RC_TOP_BITS 24 2.3452 ++#define RC_TOP_VALUE (1 << RC_TOP_BITS) 2.3453 ++#define RC_BIT_MODEL_TOTAL_BITS 11 2.3454 ++#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS) 2.3455 ++#define RC_MOVE_BITS 5 2.3456 ++ 2.3457 ++/* 2.3458 ++ * Maximum number of position states. A position state is the lowest pb 2.3459 ++ * number of bits of the current uncompressed offset. In some places there 2.3460 ++ * are different sets of probabilities for different position states. 2.3461 ++ */ 2.3462 ++#define POS_STATES_MAX (1 << 4) 2.3463 ++ 2.3464 ++/* 2.3465 ++ * This enum is used to track which LZMA symbols have occurred most recently 2.3466 ++ * and in which order. This information is used to predict the next symbol. 2.3467 ++ * 2.3468 ++ * Symbols: 2.3469 ++ * - Literal: One 8-bit byte 2.3470 ++ * - Match: Repeat a chunk of data at some distance 2.3471 ++ * - Long repeat: Multi-byte match at a recently seen distance 2.3472 ++ * - Short repeat: One-byte repeat at a recently seen distance 2.3473 ++ * 2.3474 ++ * The symbol names are in from STATE_oldest_older_previous. REP means 2.3475 ++ * either short or long repeated match, and NONLIT means any non-literal. 2.3476 ++ */ 2.3477 ++enum lzma_state { 2.3478 ++ STATE_LIT_LIT, 2.3479 ++ STATE_MATCH_LIT_LIT, 2.3480 ++ STATE_REP_LIT_LIT, 2.3481 ++ STATE_SHORTREP_LIT_LIT, 2.3482 ++ STATE_MATCH_LIT, 2.3483 ++ STATE_REP_LIT, 2.3484 ++ STATE_SHORTREP_LIT, 2.3485 ++ STATE_LIT_MATCH, 2.3486 ++ STATE_LIT_LONGREP, 2.3487 ++ STATE_LIT_SHORTREP, 2.3488 ++ STATE_NONLIT_MATCH, 2.3489 ++ STATE_NONLIT_REP 2.3490 ++}; 2.3491 ++ 2.3492 ++/* Total number of states */ 2.3493 ++#define STATES 12 2.3494 ++ 2.3495 ++/* The lowest 7 states indicate that the previous state was a literal. */ 2.3496 ++#define LIT_STATES 7 2.3497 ++ 2.3498 ++/* Indicate that the latest symbol was a literal. */ 2.3499 ++static inline void lzma_state_literal(enum lzma_state *state) 2.3500 ++{ 2.3501 ++ if (*state <= STATE_SHORTREP_LIT_LIT) 2.3502 ++ *state = STATE_LIT_LIT; 2.3503 ++ else if (*state <= STATE_LIT_SHORTREP) 2.3504 ++ *state -= 3; 2.3505 ++ else 2.3506 ++ *state -= 6; 2.3507 ++} 2.3508 ++ 2.3509 ++/* Indicate that the latest symbol was a match. */ 2.3510 ++static inline void lzma_state_match(enum lzma_state *state) 2.3511 ++{ 2.3512 ++ *state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH; 2.3513 ++} 2.3514 ++ 2.3515 ++/* Indicate that the latest state was a long repeated match. */ 2.3516 ++static inline void lzma_state_long_rep(enum lzma_state *state) 2.3517 ++{ 2.3518 ++ *state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP; 2.3519 ++} 2.3520 ++ 2.3521 ++/* Indicate that the latest symbol was a short match. */ 2.3522 ++static inline void lzma_state_short_rep(enum lzma_state *state) 2.3523 ++{ 2.3524 ++ *state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP; 2.3525 ++} 2.3526 ++ 2.3527 ++/* Test if the previous symbol was a literal. */ 2.3528 ++static inline bool lzma_state_is_literal(enum lzma_state state) 2.3529 ++{ 2.3530 ++ return state < LIT_STATES; 2.3531 ++} 2.3532 ++ 2.3533 ++/* Each literal coder is divided in three sections: 2.3534 ++ * - 0x001-0x0FF: Without match byte 2.3535 ++ * - 0x101-0x1FF: With match byte; match bit is 0 2.3536 ++ * - 0x201-0x2FF: With match byte; match bit is 1 2.3537 ++ * 2.3538 ++ * Match byte is used when the previous LZMA symbol was something else than 2.3539 ++ * a literal (that is, it was some kind of match). 2.3540 ++ */ 2.3541 ++#define LITERAL_CODER_SIZE 0x300 2.3542 ++ 2.3543 ++/* Maximum number of literal coders */ 2.3544 ++#define LITERAL_CODERS_MAX (1 << 4) 2.3545 ++ 2.3546 ++/* Minimum length of a match is two bytes. */ 2.3547 ++#define MATCH_LEN_MIN 2 2.3548 ++ 2.3549 ++/* Match length is encoded with 4, 5, or 10 bits. 2.3550 ++ * 2.3551 ++ * Length Bits 2.3552 ++ * 2-9 4 = Choice=0 + 3 bits 2.3553 ++ * 10-17 5 = Choice=1 + Choice2=0 + 3 bits 2.3554 ++ * 18-273 10 = Choice=1 + Choice2=1 + 8 bits 2.3555 ++ */ 2.3556 ++#define LEN_LOW_BITS 3 2.3557 ++#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS) 2.3558 ++#define LEN_MID_BITS 3 2.3559 ++#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS) 2.3560 ++#define LEN_HIGH_BITS 8 2.3561 ++#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS) 2.3562 ++#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS) 2.3563 ++ 2.3564 ++/* 2.3565 ++ * Maximum length of a match is 273 which is a result of the encoding 2.3566 ++ * described above. 2.3567 ++ */ 2.3568 ++#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1) 2.3569 ++ 2.3570 ++/* 2.3571 ++ * Different sets of probabilities are used for match distances that have 2.3572 ++ * very short match length: Lengths of 2, 3, and 4 bytes have a separate 2.3573 ++ * set of probabilities for each length. The matches with longer length 2.3574 ++ * use a shared set of probabilities. 2.3575 ++ */ 2.3576 ++#define DIST_STATES 4 2.3577 ++ 2.3578 ++/* 2.3579 ++ * Get the index of the appropriate probability array for decoding 2.3580 ++ * the distance slot. 2.3581 ++ */ 2.3582 ++static inline uint32_t lzma_get_dist_state(uint32_t len) 2.3583 ++{ 2.3584 ++ return len < DIST_STATES + MATCH_LEN_MIN 2.3585 ++ ? len - MATCH_LEN_MIN : DIST_STATES - 1; 2.3586 ++} 2.3587 ++ 2.3588 ++/* 2.3589 ++ * The highest two bits of a 32-bit match distance are encoded using six bits. 2.3590 ++ * This six-bit value is called a distance slot. This way encoding a 32-bit 2.3591 ++ * value takes 6-36 bits, larger values taking more bits. 2.3592 ++ */ 2.3593 ++#define DIST_SLOT_BITS 6 2.3594 ++#define DIST_SLOTS (1 << DIST_SLOT_BITS) 2.3595 ++ 2.3596 ++/* Match distances up to 127 are fully encoded using probabilities. Since 2.3597 ++ * the highest two bits (distance slot) are always encoded using six bits, 2.3598 ++ * the distances 0-3 don't need any additional bits to encode, since the 2.3599 ++ * distance slot itself is the same as the actual distance. DIST_MODEL_START 2.3600 ++ * indicates the first distance slot where at least one additional bit is 2.3601 ++ * needed. 2.3602 ++ */ 2.3603 ++#define DIST_MODEL_START 4 2.3604 ++ 2.3605 ++/* 2.3606 ++ * Match distances greater than 127 are encoded in three pieces: 2.3607 ++ * - distance slot: the highest two bits 2.3608 ++ * - direct bits: 2-26 bits below the highest two bits 2.3609 ++ * - alignment bits: four lowest bits 2.3610 ++ * 2.3611 ++ * Direct bits don't use any probabilities. 2.3612 ++ * 2.3613 ++ * The distance slot value of 14 is for distances 128-191. 2.3614 ++ */ 2.3615 ++#define DIST_MODEL_END 14 2.3616 ++ 2.3617 ++/* Distance slots that indicate a distance <= 127. */ 2.3618 ++#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2) 2.3619 ++#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS) 2.3620 ++ 2.3621 ++/* 2.3622 ++ * For match distances greater than 127, only the highest two bits and the 2.3623 ++ * lowest four bits (alignment) is encoded using probabilities. 2.3624 ++ */ 2.3625 ++#define ALIGN_BITS 4 2.3626 ++#define ALIGN_SIZE (1 << ALIGN_BITS) 2.3627 ++#define ALIGN_MASK (ALIGN_SIZE - 1) 2.3628 ++ 2.3629 ++/* Total number of all probability variables */ 2.3630 ++#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE) 2.3631 ++ 2.3632 ++/* 2.3633 ++ * LZMA remembers the four most recent match distances. Reusing these 2.3634 ++ * distances tends to take less space than re-encoding the actual 2.3635 ++ * distance value. 2.3636 ++ */ 2.3637 ++#define REPS 4 2.3638 ++ 2.3639 ++#endif 2.3640 +diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h 2.3641 +new file mode 100644 2.3642 +index 0000000..a65633e 2.3643 +--- /dev/null 2.3644 ++++ b/lib/xz/xz_private.h 2.3645 +@@ -0,0 +1,156 @@ 2.3646 ++/* 2.3647 ++ * Private includes and definitions 2.3648 ++ * 2.3649 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 2.3650 ++ * 2.3651 ++ * This file has been put into the public domain. 2.3652 ++ * You can do whatever you want with this file. 2.3653 ++ */ 2.3654 ++ 2.3655 ++#ifndef XZ_PRIVATE_H 2.3656 ++#define XZ_PRIVATE_H 2.3657 ++ 2.3658 ++#ifdef __KERNEL__ 2.3659 ++# include <linux/xz.h> 2.3660 ++# include <asm/byteorder.h> 2.3661 ++# include <asm/unaligned.h> 2.3662 ++ /* XZ_PREBOOT may be defined only via decompress_unxz.c. */ 2.3663 ++# ifndef XZ_PREBOOT 2.3664 ++# include <linux/slab.h> 2.3665 ++# include <linux/vmalloc.h> 2.3666 ++# include <linux/string.h> 2.3667 ++# ifdef CONFIG_XZ_DEC_X86 2.3668 ++# define XZ_DEC_X86 2.3669 ++# endif 2.3670 ++# ifdef CONFIG_XZ_DEC_POWERPC 2.3671 ++# define XZ_DEC_POWERPC 2.3672 ++# endif 2.3673 ++# ifdef CONFIG_XZ_DEC_IA64 2.3674 ++# define XZ_DEC_IA64 2.3675 ++# endif 2.3676 ++# ifdef CONFIG_XZ_DEC_ARM 2.3677 ++# define XZ_DEC_ARM 2.3678 ++# endif 2.3679 ++# ifdef CONFIG_XZ_DEC_ARMTHUMB 2.3680 ++# define XZ_DEC_ARMTHUMB 2.3681 ++# endif 2.3682 ++# ifdef CONFIG_XZ_DEC_SPARC 2.3683 ++# define XZ_DEC_SPARC 2.3684 ++# endif 2.3685 ++# define memeq(a, b, size) (memcmp(a, b, size) == 0) 2.3686 ++# define memzero(buf, size) memset(buf, 0, size) 2.3687 ++# endif 2.3688 ++# define get_le32(p) le32_to_cpup((const uint32_t *)(p)) 2.3689 ++#else 2.3690 ++ /* 2.3691 ++ * For userspace builds, use a separate header to define the required 2.3692 ++ * macros and functions. This makes it easier to adapt the code into 2.3693 ++ * different environments and avoids clutter in the Linux kernel tree. 2.3694 ++ */ 2.3695 ++# include "xz_config.h" 2.3696 ++#endif 2.3697 ++ 2.3698 ++/* If no specific decoding mode is requested, enable support for all modes. */ 2.3699 ++#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \ 2.3700 ++ && !defined(XZ_DEC_DYNALLOC) 2.3701 ++# define XZ_DEC_SINGLE 2.3702 ++# define XZ_DEC_PREALLOC 2.3703 ++# define XZ_DEC_DYNALLOC 2.3704 ++#endif 2.3705 ++ 2.3706 ++/* 2.3707 ++ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some 2.3708 ++ * of the supported modes are enabled, these macros will evaluate to true or 2.3709 ++ * false at compile time and thus allow the compiler to omit unneeded code. 2.3710 ++ */ 2.3711 ++#ifdef XZ_DEC_SINGLE 2.3712 ++# define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE) 2.3713 ++#else 2.3714 ++# define DEC_IS_SINGLE(mode) (false) 2.3715 ++#endif 2.3716 ++ 2.3717 ++#ifdef XZ_DEC_PREALLOC 2.3718 ++# define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC) 2.3719 ++#else 2.3720 ++# define DEC_IS_PREALLOC(mode) (false) 2.3721 ++#endif 2.3722 ++ 2.3723 ++#ifdef XZ_DEC_DYNALLOC 2.3724 ++# define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC) 2.3725 ++#else 2.3726 ++# define DEC_IS_DYNALLOC(mode) (false) 2.3727 ++#endif 2.3728 ++ 2.3729 ++#if !defined(XZ_DEC_SINGLE) 2.3730 ++# define DEC_IS_MULTI(mode) (true) 2.3731 ++#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC) 2.3732 ++# define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE) 2.3733 ++#else 2.3734 ++# define DEC_IS_MULTI(mode) (false) 2.3735 ++#endif 2.3736 ++ 2.3737 ++/* 2.3738 ++ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ. 2.3739 ++ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders. 2.3740 ++ */ 2.3741 ++#ifndef XZ_DEC_BCJ 2.3742 ++# if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \ 2.3743 ++ || defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \ 2.3744 ++ || defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \ 2.3745 ++ || defined(XZ_DEC_SPARC) 2.3746 ++# define XZ_DEC_BCJ 2.3747 ++# endif 2.3748 ++#endif 2.3749 ++ 2.3750 ++/* 2.3751 ++ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used 2.3752 ++ * before calling xz_dec_lzma2_run(). 2.3753 ++ */ 2.3754 ++XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode, 2.3755 ++ uint32_t dict_max); 2.3756 ++ 2.3757 ++/* 2.3758 ++ * Decode the LZMA2 properties (one byte) and reset the decoder. Return 2.3759 ++ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not 2.3760 ++ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this 2.3761 ++ * decoder doesn't support. 2.3762 ++ */ 2.3763 ++XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, 2.3764 ++ uint8_t props); 2.3765 ++ 2.3766 ++/* Decode raw LZMA2 stream from b->in to b->out. */ 2.3767 ++XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s, 2.3768 ++ struct xz_buf *b); 2.3769 ++ 2.3770 ++/* Free the memory allocated for the LZMA2 decoder. */ 2.3771 ++XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s); 2.3772 ++ 2.3773 ++#ifdef XZ_DEC_BCJ 2.3774 ++/* 2.3775 ++ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before 2.3776 ++ * calling xz_dec_bcj_run(). 2.3777 ++ */ 2.3778 ++XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call); 2.3779 ++ 2.3780 ++/* 2.3781 ++ * Decode the Filter ID of a BCJ filter. This implementation doesn't 2.3782 ++ * support custom start offsets, so no decoding of Filter Properties 2.3783 ++ * is needed. Returns XZ_OK if the given Filter ID is supported. 2.3784 ++ * Otherwise XZ_OPTIONS_ERROR is returned. 2.3785 ++ */ 2.3786 ++XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id); 2.3787 ++ 2.3788 ++/* 2.3789 ++ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is 2.3790 ++ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run() 2.3791 ++ * must be called directly. 2.3792 ++ */ 2.3793 ++XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s, 2.3794 ++ struct xz_dec_lzma2 *lzma2, 2.3795 ++ struct xz_buf *b); 2.3796 ++ 2.3797 ++/* Free the memory allocated for the BCJ filters. */ 2.3798 ++#define xz_dec_bcj_end(s) kfree(s) 2.3799 ++#endif 2.3800 ++ 2.3801 ++#endif 2.3802 +diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h 2.3803 +new file mode 100644 2.3804 +index 0000000..66cb5a7 2.3805 +--- /dev/null 2.3806 ++++ b/lib/xz/xz_stream.h 2.3807 +@@ -0,0 +1,62 @@ 2.3808 ++/* 2.3809 ++ * Definitions for handling the .xz file format 2.3810 ++ * 2.3811 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 2.3812 ++ * 2.3813 ++ * This file has been put into the public domain. 2.3814 ++ * You can do whatever you want with this file. 2.3815 ++ */ 2.3816 ++ 2.3817 ++#ifndef XZ_STREAM_H 2.3818 ++#define XZ_STREAM_H 2.3819 ++ 2.3820 ++#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32 2.3821 ++# include <linux/crc32.h> 2.3822 ++# undef crc32 2.3823 ++# define xz_crc32(buf, size, crc) \ 2.3824 ++ (~crc32_le(~(uint32_t)(crc), buf, size)) 2.3825 ++#endif 2.3826 ++ 2.3827 ++/* 2.3828 ++ * See the .xz file format specification at 2.3829 ++ * http://tukaani.org/xz/xz-file-format.txt 2.3830 ++ * to understand the container format. 2.3831 ++ */ 2.3832 ++ 2.3833 ++#define STREAM_HEADER_SIZE 12 2.3834 ++ 2.3835 ++#define HEADER_MAGIC "\3757zXZ" 2.3836 ++#define HEADER_MAGIC_SIZE 6 2.3837 ++ 2.3838 ++#define FOOTER_MAGIC "YZ" 2.3839 ++#define FOOTER_MAGIC_SIZE 2 2.3840 ++ 2.3841 ++/* 2.3842 ++ * Variable-length integer can hold a 63-bit unsigned integer or a special 2.3843 ++ * value indicating that the value is unknown. 2.3844 ++ * 2.3845 ++ * Experimental: vli_type can be defined to uint32_t to save a few bytes 2.3846 ++ * in code size (no effect on speed). Doing so limits the uncompressed and 2.3847 ++ * compressed size of the file to less than 256 MiB and may also weaken 2.3848 ++ * error detection slightly. 2.3849 ++ */ 2.3850 ++typedef uint64_t vli_type; 2.3851 ++ 2.3852 ++#define VLI_MAX ((vli_type)-1 / 2) 2.3853 ++#define VLI_UNKNOWN ((vli_type)-1) 2.3854 ++ 2.3855 ++/* Maximum encoded size of a VLI */ 2.3856 ++#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7) 2.3857 ++ 2.3858 ++/* Integrity Check types */ 2.3859 ++enum xz_check { 2.3860 ++ XZ_CHECK_NONE = 0, 2.3861 ++ XZ_CHECK_CRC32 = 1, 2.3862 ++ XZ_CHECK_CRC64 = 4, 2.3863 ++ XZ_CHECK_SHA256 = 10 2.3864 ++}; 2.3865 ++ 2.3866 ++/* Maximum possible Check ID */ 2.3867 ++#define XZ_CHECK_MAX 15 2.3868 ++ 2.3869 ++#endif 2.3870 +diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib 2.3871 +index 54fd1b7..b862007 100644 2.3872 +--- a/scripts/Makefile.lib 2.3873 ++++ b/scripts/Makefile.lib 2.3874 +@@ -246,6 +246,34 @@ cmd_lzo = (cat $(filter-out FORCE,$^) | \ 2.3875 + lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ 2.3876 + (rm -f $@ ; false) 2.3877 + 2.3878 ++# XZ 2.3879 ++# --------------------------------------------------------------------------- 2.3880 ++# Use xzkern to compress the kernel image and xzmisc to compress other things. 2.3881 ++# 2.3882 ++# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage 2.3883 ++# of the kernel decompressor. A BCJ filter is used if it is available for 2.3884 ++# the target architecture. xzkern also appends uncompressed size of the data 2.3885 ++# using size_append. The .xz format has the size information available at 2.3886 ++# the end of the file too, but it's in more complex format and it's good to 2.3887 ++# avoid changing the part of the boot code that reads the uncompressed size. 2.3888 ++# Note that the bytes added by size_append will make the xz tool think that 2.3889 ++# the file is corrupt. This is expected. 2.3890 ++# 2.3891 ++# xzmisc doesn't use size_append, so it can be used to create normal .xz 2.3892 ++# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very 2.3893 ++# big dictionary would increase the memory usage too much in the multi-call 2.3894 ++# decompression mode. A BCJ filter isn't used either. 2.3895 ++quiet_cmd_xzkern = XZKERN $@ 2.3896 ++cmd_xzkern = (cat $(filter-out FORCE,$^) | \ 2.3897 ++ sh $(srctree)/scripts/xz_wrap.sh && \ 2.3898 ++ $(call size_append, $(filter-out FORCE,$^))) > $@ || \ 2.3899 ++ (rm -f $@ ; false) 2.3900 ++ 2.3901 ++quiet_cmd_xzmisc = XZMISC $@ 2.3902 ++cmd_xzmisc = (cat $(filter-out FORCE,$^) | \ 2.3903 ++ xz --check=crc32 --lzma2=dict=1MiB) > $@ || \ 2.3904 ++ (rm -f $@ ; false) 2.3905 ++ 2.3906 + # misc stuff 2.3907 + # --------------------------------------------------------------------------- 2.3908 + quote:=" 2.3909 +diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh 2.3910 +new file mode 100644 2.3911 +index 0000000..17a5798 2.3912 +--- /dev/null 2.3913 ++++ b/scripts/xz_wrap.sh 2.3914 +@@ -0,0 +1,23 @@ 2.3915 ++#!/bin/sh 2.3916 ++# 2.3917 ++# This is a wrapper for xz to compress the kernel image using appropriate 2.3918 ++# compression options depending on the architecture. 2.3919 ++# 2.3920 ++# Author: Lasse Collin <lasse.collin@tukaani.org> 2.3921 ++# 2.3922 ++# This file has been put into the public domain. 2.3923 ++# You can do whatever you want with this file. 2.3924 ++# 2.3925 ++ 2.3926 ++BCJ= 2.3927 ++LZMA2OPTS= 2.3928 ++ 2.3929 ++case $ARCH in 2.3930 ++ x86|x86_64) BCJ=--x86 ;; 2.3931 ++ powerpc) BCJ=--powerpc ;; 2.3932 ++ ia64) BCJ=--ia64; LZMA2OPTS=pb=4 ;; 2.3933 ++ arm) BCJ=--arm ;; 2.3934 ++ sparc) BCJ=--sparc ;; 2.3935 ++esac 2.3936 ++ 2.3937 ++exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/linux/stuff/002-squashfs-decompressors-add-boot-time-xz-support.patch Tue Dec 14 21:45:09 2010 +0000 3.3 @@ -0,0 +1,638 @@ 3.4 +From: Lasse Collin <lasse.collin@tukaani.org> 3.5 +Date: Thu, 2 Dec 2010 19:14:37 +0000 (+0200) 3.6 +Subject: Decompressors: Add boot-time XZ support 3.7 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fpkl%2Fsquashfs-xz.git;a=commitdiff_plain;h=c64bc9a229b46db75d7761601dd8ca25385a7780 3.8 + 3.9 +Decompressors: Add boot-time XZ support 3.10 + 3.11 +This implements the API defined in <linux/decompress/generic.h> 3.12 +which is used for kernel, initramfs, and initrd decompression. 3.13 +This patch together with the first patch is enough for 3.14 +XZ-compressed initramfs and initrd; XZ-compressed kernel will 3.15 +need arch-specific changes. 3.16 + 3.17 +In contrast to other initramfs compression methods, support for 3.18 +XZ-compressed initramfs is not enabled by default in usr/Kconfig. 3.19 +This is primarily due to the Kconfig options of the xz_dec 3.20 +module. It can be good to require that xz_dec is enabled 3.21 +separately so the user can select only the BCJ filters he needs 3.22 +when EMBEDDED=y. 3.23 + 3.24 +The buffering requirements described in decompress_unxz.c are 3.25 +stricter than with gzip, so the relevant changes should be done 3.26 +to the arch-specific code when adding support for XZ-compressed 3.27 +kernel. Similarly, the heap size in arch-specific pre-boot code 3.28 +may need to be increased (30 KiB is enough). 3.29 + 3.30 +The XZ decompressor needs memmove(), memeq() (memcmp() == 0), 3.31 +and memzero() (memset(ptr, 0, size)), which aren't available in 3.32 +all arch-specific pre-boot environments. I'm including simple 3.33 +versions in decompress_unxz.c, but a cleaner solution would 3.34 +naturally be nicer. 3.35 + 3.36 +Signed-off-by: Lasse Collin <lasse.collin@tukaani.org> 3.37 +--- 3.38 + 3.39 +diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h 3.40 +new file mode 100644 3.41 +index 0000000..41728fc 3.42 +--- /dev/null 3.43 ++++ b/include/linux/decompress/unxz.h 3.44 +@@ -0,0 +1,19 @@ 3.45 ++/* 3.46 ++ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd 3.47 ++ * 3.48 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 3.49 ++ * 3.50 ++ * This file has been put into the public domain. 3.51 ++ * You can do whatever you want with this file. 3.52 ++ */ 3.53 ++ 3.54 ++#ifndef DECOMPRESS_UNXZ_H 3.55 ++#define DECOMPRESS_UNXZ_H 3.56 ++ 3.57 ++int unxz(unsigned char *in, int in_size, 3.58 ++ int (*fill)(void *dest, unsigned int size), 3.59 ++ int (*flush)(void *src, unsigned int size), 3.60 ++ unsigned char *out, int *in_used, 3.61 ++ void (*error)(char *x)); 3.62 ++ 3.63 ++#endif 3.64 +diff --git a/init/Kconfig b/init/Kconfig 3.65 +index 2de5b1c..d9fbb0f 100644 3.66 +--- a/init/Kconfig 3.67 ++++ b/init/Kconfig 3.68 +@@ -123,13 +123,16 @@ config HAVE_KERNEL_BZIP2 3.69 + config HAVE_KERNEL_LZMA 3.70 + bool 3.71 + 3.72 ++config HAVE_KERNEL_XZ 3.73 ++ bool 3.74 ++ 3.75 + config HAVE_KERNEL_LZO 3.76 + bool 3.77 + 3.78 + choice 3.79 + prompt "Kernel compression mode" 3.80 + default KERNEL_GZIP 3.81 +- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO 3.82 ++ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO 3.83 + help 3.84 + The linux kernel is a kind of self-extracting executable. 3.85 + Several compression algorithms are available, which differ 3.86 +@@ -174,6 +177,21 @@ config KERNEL_LZMA 3.87 + two. Compression is slowest. The kernel size is about 33% 3.88 + smaller with LZMA in comparison to gzip. 3.89 + 3.90 ++config KERNEL_XZ 3.91 ++ bool "XZ" 3.92 ++ depends on HAVE_KERNEL_XZ 3.93 ++ help 3.94 ++ XZ uses the LZMA2 algorithm and instruction set specific 3.95 ++ BCJ filters which can improve compression ratio of executable 3.96 ++ code. The size of the kernel is about 30% smaller with XZ in 3.97 ++ comparison to gzip. On architectures for which there is a BCJ 3.98 ++ filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ 3.99 ++ will create a few percent smaller kernel than plain LZMA. 3.100 ++ 3.101 ++ The speed is about the same as with LZMA: The decompression 3.102 ++ speed of XZ is better than that of bzip2 but worse than gzip 3.103 ++ and LZO. Compression is slow. 3.104 ++ 3.105 + config KERNEL_LZO 3.106 + bool "LZO" 3.107 + depends on HAVE_KERNEL_LZO 3.108 +diff --git a/lib/Kconfig b/lib/Kconfig 3.109 +index 6090314..45960e1 100644 3.110 +--- a/lib/Kconfig 3.111 ++++ b/lib/Kconfig 3.112 +@@ -122,6 +122,9 @@ config DECOMPRESS_BZIP2 3.113 + config DECOMPRESS_LZMA 3.114 + tristate 3.115 + 3.116 ++config DECOMPRESS_XZ 3.117 ++ tristate 3.118 ++ 3.119 + config DECOMPRESS_LZO 3.120 + select LZO_DECOMPRESS 3.121 + tristate 3.122 +diff --git a/lib/Makefile b/lib/Makefile 3.123 +index f2f98dd..06e3d8a 100644 3.124 +--- a/lib/Makefile 3.125 ++++ b/lib/Makefile 3.126 +@@ -75,6 +75,7 @@ obj-$(CONFIG_RAID6_PQ) += raid6/ 3.127 + lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o 3.128 + lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o 3.129 + lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o 3.130 ++lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o 3.131 + lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o 3.132 + 3.133 + obj-$(CONFIG_TEXTSEARCH) += textsearch.o 3.134 +diff --git a/lib/decompress.c b/lib/decompress.c 3.135 +index a760681..3d766b7 100644 3.136 +--- a/lib/decompress.c 3.137 ++++ b/lib/decompress.c 3.138 +@@ -8,6 +8,7 @@ 3.139 + 3.140 + #include <linux/decompress/bunzip2.h> 3.141 + #include <linux/decompress/unlzma.h> 3.142 ++#include <linux/decompress/unxz.h> 3.143 + #include <linux/decompress/inflate.h> 3.144 + #include <linux/decompress/unlzo.h> 3.145 + 3.146 +@@ -23,6 +24,9 @@ 3.147 + #ifndef CONFIG_DECOMPRESS_LZMA 3.148 + # define unlzma NULL 3.149 + #endif 3.150 ++#ifndef CONFIG_DECOMPRESS_XZ 3.151 ++# define unxz NULL 3.152 ++#endif 3.153 + #ifndef CONFIG_DECOMPRESS_LZO 3.154 + # define unlzo NULL 3.155 + #endif 3.156 +@@ -36,6 +40,7 @@ static const struct compress_format { 3.157 + { {037, 0236}, "gzip", gunzip }, 3.158 + { {0x42, 0x5a}, "bzip2", bunzip2 }, 3.159 + { {0x5d, 0x00}, "lzma", unlzma }, 3.160 ++ { {0xfd, 0x37}, "xz", unxz }, 3.161 + { {0x89, 0x4c}, "lzo", unlzo }, 3.162 + { {0, 0}, NULL, NULL } 3.163 + }; 3.164 +diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c 3.165 +new file mode 100644 3.166 +index 0000000..cecd23d 3.167 +--- /dev/null 3.168 ++++ b/lib/decompress_unxz.c 3.169 +@@ -0,0 +1,397 @@ 3.170 ++/* 3.171 ++ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd 3.172 ++ * 3.173 ++ * Author: Lasse Collin <lasse.collin@tukaani.org> 3.174 ++ * 3.175 ++ * This file has been put into the public domain. 3.176 ++ * You can do whatever you want with this file. 3.177 ++ */ 3.178 ++ 3.179 ++/* 3.180 ++ * Important notes about in-place decompression 3.181 ++ * 3.182 ++ * At least on x86, the kernel is decompressed in place: the compressed data 3.183 ++ * is placed to the end of the output buffer, and the decompressor overwrites 3.184 ++ * most of the compressed data. There must be enough safety margin to 3.185 ++ * guarantee that the write position is always behind the read position. 3.186 ++ * 3.187 ++ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below. 3.188 ++ * Note that the margin with XZ is bigger than with Deflate (gzip)! 3.189 ++ * 3.190 ++ * The worst case for in-place decompression is that the beginning of 3.191 ++ * the file is compressed extremely well, and the rest of the file is 3.192 ++ * uncompressible. Thus, we must look for worst-case expansion when the 3.193 ++ * compressor is encoding uncompressible data. 3.194 ++ * 3.195 ++ * The structure of the .xz file in case of a compresed kernel is as follows. 3.196 ++ * Sizes (as bytes) of the fields are in parenthesis. 3.197 ++ * 3.198 ++ * Stream Header (12) 3.199 ++ * Block Header: 3.200 ++ * Block Header (8-12) 3.201 ++ * Compressed Data (N) 3.202 ++ * Block Padding (0-3) 3.203 ++ * CRC32 (4) 3.204 ++ * Index (8-20) 3.205 ++ * Stream Footer (12) 3.206 ++ * 3.207 ++ * Normally there is exactly one Block, but let's assume that there are 3.208 ++ * 2-4 Blocks just in case. Because Stream Header and also Block Header 3.209 ++ * of the first Block don't make the decompressor produce any uncompressed 3.210 ++ * data, we can ignore them from our calculations. Block Headers of possible 3.211 ++ * additional Blocks have to be taken into account still. With these 3.212 ++ * assumptions, it is safe to assume that the total header overhead is 3.213 ++ * less than 128 bytes. 3.214 ++ * 3.215 ++ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ 3.216 ++ * doesn't change the size of the data, it is enough to calculate the 3.217 ++ * safety margin for LZMA2. 3.218 ++ * 3.219 ++ * LZMA2 stores the data in chunks. Each chunk has a header whose size is 3.220 ++ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that 3.221 ++ * the maximum chunk header size is 8 bytes. After the chunk header, there 3.222 ++ * may be up to 64 KiB of actual payload in the chunk. Often the payload is 3.223 ++ * quite a bit smaller though; to be safe, let's assume that an average 3.224 ++ * chunk has only 32 KiB of payload. 3.225 ++ * 3.226 ++ * The maximum uncompressed size of the payload is 2 MiB. The minimum 3.227 ++ * uncompressed size of the payload is in practice never less than the 3.228 ++ * payload size itself. The LZMA2 format would allow uncompressed size 3.229 ++ * to be less than the payload size, but no sane compressor creates such 3.230 ++ * files. LZMA2 supports storing uncompressible data in uncompressed form, 3.231 ++ * so there's never a need to create payloads whose uncompressed size is 3.232 ++ * smaller than the compressed size. 3.233 ++ * 3.234 ++ * The assumption, that the uncompressed size of the payload is never 3.235 ++ * smaller than the payload itself, is valid only when talking about 3.236 ++ * the payload as a whole. It is possible that the payload has parts where 3.237 ++ * the decompressor consumes more input than it produces output. Calculating 3.238 ++ * the worst case for this would be tricky. Instead of trying to do that, 3.239 ++ * let's simply make sure that the decompressor never overwrites any bytes 3.240 ++ * of the payload which it is currently reading. 3.241 ++ * 3.242 ++ * Now we have enough information to calculate the safety margin. We need 3.243 ++ * - 128 bytes for the .xz file format headers; 3.244 ++ * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header 3.245 ++ * per chunk, each chunk having average payload size of 32 KiB); and 3.246 ++ * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that 3.247 ++ * the decompressor never overwrites anything from the LZMA2 chunk 3.248 ++ * payload it is currently reading. 3.249 ++ * 3.250 ++ * We get the following formula: 3.251 ++ * 3.252 ++ * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536 3.253 ++ * = 128 + (uncompressed_size >> 12) + 65536 3.254 ++ * 3.255 ++ * For comparision, according to arch/x86/boot/compressed/misc.c, the 3.256 ++ * equivalent formula for Deflate is this: 3.257 ++ * 3.258 ++ * safety_margin = 18 + (uncompressed_size >> 12) + 32768 3.259 ++ * 3.260 ++ * Thus, when updating Deflate-only in-place kernel decompressor to 3.261 ++ * support XZ, the fixed overhead has to be increased from 18+32768 bytes 3.262 ++ * to 128+65536 bytes. 3.263 ++ */ 3.264 ++ 3.265 ++/* 3.266 ++ * STATIC is defined to "static" if we are being built for kernel 3.267 ++ * decompression (pre-boot code). <linux/decompress/mm.h> will define 3.268 ++ * STATIC to empty if it wasn't already defined. Since we will need to 3.269 ++ * know later if we are being used for kernel decompression, we define 3.270 ++ * XZ_PREBOOT here. 3.271 ++ */ 3.272 ++#ifdef STATIC 3.273 ++# define XZ_PREBOOT 3.274 ++#endif 3.275 ++#ifdef __KERNEL__ 3.276 ++# include <linux/decompress/mm.h> 3.277 ++#endif 3.278 ++#define XZ_EXTERN STATIC 3.279 ++ 3.280 ++#ifndef XZ_PREBOOT 3.281 ++# include <linux/slab.h> 3.282 ++# include <linux/xz.h> 3.283 ++#else 3.284 ++/* 3.285 ++ * Use the internal CRC32 code instead of kernel's CRC32 module, which 3.286 ++ * is not available in early phase of booting. 3.287 ++ */ 3.288 ++#define XZ_INTERNAL_CRC32 1 3.289 ++ 3.290 ++/* 3.291 ++ * For boot time use, we enable only the BCJ filter of the current 3.292 ++ * architecture or none if no BCJ filter is available for the architecture. 3.293 ++ */ 3.294 ++#ifdef CONFIG_X86 3.295 ++# define XZ_DEC_X86 3.296 ++#endif 3.297 ++#ifdef CONFIG_PPC 3.298 ++# define XZ_DEC_POWERPC 3.299 ++#endif 3.300 ++#ifdef CONFIG_ARM 3.301 ++# define XZ_DEC_ARM 3.302 ++#endif 3.303 ++#ifdef CONFIG_IA64 3.304 ++# define XZ_DEC_IA64 3.305 ++#endif 3.306 ++#ifdef CONFIG_SPARC 3.307 ++# define XZ_DEC_SPARC 3.308 ++#endif 3.309 ++ 3.310 ++/* 3.311 ++ * This will get the basic headers so that memeq() and others 3.312 ++ * can be defined. 3.313 ++ */ 3.314 ++#include "xz/xz_private.h" 3.315 ++ 3.316 ++/* 3.317 ++ * Replace the normal allocation functions with the versions from 3.318 ++ * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL) 3.319 ++ * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it. 3.320 ++ * Workaround it here because the other decompressors don't need it. 3.321 ++ */ 3.322 ++#undef kmalloc 3.323 ++#undef kfree 3.324 ++#undef vmalloc 3.325 ++#undef vfree 3.326 ++#define kmalloc(size, flags) malloc(size) 3.327 ++#define kfree(ptr) free(ptr) 3.328 ++#define vmalloc(size) malloc(size) 3.329 ++#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0) 3.330 ++ 3.331 ++/* 3.332 ++ * FIXME: Not all basic memory functions are provided in architecture-specific 3.333 ++ * files (yet). We define our own versions here for now, but this should be 3.334 ++ * only a temporary solution. 3.335 ++ * 3.336 ++ * memeq and memzero are not used much and any remotely sane implementation 3.337 ++ * is fast enough. memcpy/memmove speed matters in multi-call mode, but 3.338 ++ * the kernel image is decompressed in single-call mode, in which only 3.339 ++ * memcpy speed can matter and only if there is a lot of uncompressible data 3.340 ++ * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the 3.341 ++ * functions below should just be kept small; it's probably not worth 3.342 ++ * optimizing for speed. 3.343 ++ */ 3.344 ++ 3.345 ++#ifndef memeq 3.346 ++static bool memeq(const void *a, const void *b, size_t size) 3.347 ++{ 3.348 ++ const uint8_t *x = a; 3.349 ++ const uint8_t *y = b; 3.350 ++ size_t i; 3.351 ++ 3.352 ++ for (i = 0; i < size; ++i) 3.353 ++ if (x[i] != y[i]) 3.354 ++ return false; 3.355 ++ 3.356 ++ return true; 3.357 ++} 3.358 ++#endif 3.359 ++ 3.360 ++#ifndef memzero 3.361 ++static void memzero(void *buf, size_t size) 3.362 ++{ 3.363 ++ uint8_t *b = buf; 3.364 ++ uint8_t *e = b + size; 3.365 ++ 3.366 ++ while (b != e) 3.367 ++ *b++ = '\0'; 3.368 ++} 3.369 ++#endif 3.370 ++ 3.371 ++#ifndef memmove 3.372 ++/* Not static to avoid a conflict with the prototype in the Linux headers. */ 3.373 ++void *memmove(void *dest, const void *src, size_t size) 3.374 ++{ 3.375 ++ uint8_t *d = dest; 3.376 ++ const uint8_t *s = src; 3.377 ++ size_t i; 3.378 ++ 3.379 ++ if (d < s) { 3.380 ++ for (i = 0; i < size; ++i) 3.381 ++ d[i] = s[i]; 3.382 ++ } else if (d > s) { 3.383 ++ i = size; 3.384 ++ while (i-- > 0) 3.385 ++ d[i] = s[i]; 3.386 ++ } 3.387 ++ 3.388 ++ return dest; 3.389 ++} 3.390 ++#endif 3.391 ++ 3.392 ++/* 3.393 ++ * Since we need memmove anyway, would use it as memcpy too. 3.394 ++ * Commented out for now to avoid breaking things. 3.395 ++ */ 3.396 ++/* 3.397 ++#ifndef memcpy 3.398 ++# define memcpy memmove 3.399 ++#endif 3.400 ++*/ 3.401 ++ 3.402 ++#include "xz/xz_crc32.c" 3.403 ++#include "xz/xz_dec_stream.c" 3.404 ++#include "xz/xz_dec_lzma2.c" 3.405 ++#include "xz/xz_dec_bcj.c" 3.406 ++ 3.407 ++#endif /* XZ_PREBOOT */ 3.408 ++ 3.409 ++/* Size of the input and output buffers in multi-call mode */ 3.410 ++#define XZ_IOBUF_SIZE 4096 3.411 ++ 3.412 ++/* 3.413 ++ * This function implements the API defined in <linux/decompress/generic.h>. 3.414 ++ * 3.415 ++ * This wrapper will automatically choose single-call or multi-call mode 3.416 ++ * of the native XZ decoder API. The single-call mode can be used only when 3.417 ++ * both input and output buffers are available as a single chunk, i.e. when 3.418 ++ * fill() and flush() won't be used. 3.419 ++ */ 3.420 ++STATIC int INIT unxz(unsigned char *in, int in_size, 3.421 ++ int (*fill)(void *dest, unsigned int size), 3.422 ++ int (*flush)(void *src, unsigned int size), 3.423 ++ unsigned char *out, int *in_used, 3.424 ++ void (*error)(char *x)) 3.425 ++{ 3.426 ++ struct xz_buf b; 3.427 ++ struct xz_dec *s; 3.428 ++ enum xz_ret ret; 3.429 ++ bool must_free_in = false; 3.430 ++ 3.431 ++#if XZ_INTERNAL_CRC32 3.432 ++ xz_crc32_init(); 3.433 ++#endif 3.434 ++ 3.435 ++ if (in_used != NULL) 3.436 ++ *in_used = 0; 3.437 ++ 3.438 ++ if (fill == NULL && flush == NULL) 3.439 ++ s = xz_dec_init(XZ_SINGLE, 0); 3.440 ++ else 3.441 ++ s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1); 3.442 ++ 3.443 ++ if (s == NULL) 3.444 ++ goto error_alloc_state; 3.445 ++ 3.446 ++ if (flush == NULL) { 3.447 ++ b.out = out; 3.448 ++ b.out_size = (size_t)-1; 3.449 ++ } else { 3.450 ++ b.out_size = XZ_IOBUF_SIZE; 3.451 ++ b.out = malloc(XZ_IOBUF_SIZE); 3.452 ++ if (b.out == NULL) 3.453 ++ goto error_alloc_out; 3.454 ++ } 3.455 ++ 3.456 ++ if (in == NULL) { 3.457 ++ must_free_in = true; 3.458 ++ in = malloc(XZ_IOBUF_SIZE); 3.459 ++ if (in == NULL) 3.460 ++ goto error_alloc_in; 3.461 ++ } 3.462 ++ 3.463 ++ b.in = in; 3.464 ++ b.in_pos = 0; 3.465 ++ b.in_size = in_size; 3.466 ++ b.out_pos = 0; 3.467 ++ 3.468 ++ if (fill == NULL && flush == NULL) { 3.469 ++ ret = xz_dec_run(s, &b); 3.470 ++ } else { 3.471 ++ do { 3.472 ++ if (b.in_pos == b.in_size && fill != NULL) { 3.473 ++ if (in_used != NULL) 3.474 ++ *in_used += b.in_pos; 3.475 ++ 3.476 ++ b.in_pos = 0; 3.477 ++ 3.478 ++ in_size = fill(in, XZ_IOBUF_SIZE); 3.479 ++ if (in_size < 0) { 3.480 ++ /* 3.481 ++ * This isn't an optimal error code 3.482 ++ * but it probably isn't worth making 3.483 ++ * a new one either. 3.484 ++ */ 3.485 ++ ret = XZ_BUF_ERROR; 3.486 ++ break; 3.487 ++ } 3.488 ++ 3.489 ++ b.in_size = in_size; 3.490 ++ } 3.491 ++ 3.492 ++ ret = xz_dec_run(s, &b); 3.493 ++ 3.494 ++ if (flush != NULL && (b.out_pos == b.out_size 3.495 ++ || (ret != XZ_OK && b.out_pos > 0))) { 3.496 ++ /* 3.497 ++ * Setting ret here may hide an error 3.498 ++ * returned by xz_dec_run(), but probably 3.499 ++ * it's not too bad. 3.500 ++ */ 3.501 ++ if (flush(b.out, b.out_pos) != (int)b.out_pos) 3.502 ++ ret = XZ_BUF_ERROR; 3.503 ++ 3.504 ++ b.out_pos = 0; 3.505 ++ } 3.506 ++ } while (ret == XZ_OK); 3.507 ++ 3.508 ++ if (must_free_in) 3.509 ++ free(in); 3.510 ++ 3.511 ++ if (flush != NULL) 3.512 ++ free(b.out); 3.513 ++ } 3.514 ++ 3.515 ++ if (in_used != NULL) 3.516 ++ *in_used += b.in_pos; 3.517 ++ 3.518 ++ xz_dec_end(s); 3.519 ++ 3.520 ++ switch (ret) { 3.521 ++ case XZ_STREAM_END: 3.522 ++ return 0; 3.523 ++ 3.524 ++ case XZ_MEM_ERROR: 3.525 ++ /* This can occur only in multi-call mode. */ 3.526 ++ error("XZ decompressor ran out of memory"); 3.527 ++ break; 3.528 ++ 3.529 ++ case XZ_FORMAT_ERROR: 3.530 ++ error("Input is not in the XZ format (wrong magic bytes)"); 3.531 ++ break; 3.532 ++ 3.533 ++ case XZ_OPTIONS_ERROR: 3.534 ++ error("Input was encoded with settings that are not " 3.535 ++ "supported by this XZ decoder"); 3.536 ++ break; 3.537 ++ 3.538 ++ case XZ_DATA_ERROR: 3.539 ++ case XZ_BUF_ERROR: 3.540 ++ error("XZ-compressed data is corrupt"); 3.541 ++ break; 3.542 ++ 3.543 ++ default: 3.544 ++ error("Bug in the XZ decompressor"); 3.545 ++ break; 3.546 ++ } 3.547 ++ 3.548 ++ return -1; 3.549 ++ 3.550 ++error_alloc_in: 3.551 ++ if (flush != NULL) 3.552 ++ free(b.out); 3.553 ++ 3.554 ++error_alloc_out: 3.555 ++ xz_dec_end(s); 3.556 ++ 3.557 ++error_alloc_state: 3.558 ++ error("XZ decompressor ran out of memory"); 3.559 ++ return -1; 3.560 ++} 3.561 ++ 3.562 ++/* 3.563 ++ * This macro is used by architecture-specific files to decompress 3.564 ++ * the kernel image. 3.565 ++ */ 3.566 ++#define decompress unxz 3.567 +diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh 3.568 +index 5958fff..55caecd 100644 3.569 +--- a/scripts/gen_initramfs_list.sh 3.570 ++++ b/scripts/gen_initramfs_list.sh 3.571 +@@ -243,6 +243,8 @@ case "$arg" in 3.572 + echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" 3.573 + echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" 3.574 + echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" 3.575 ++ echo "$output_file" | grep -q "\.xz$" && \ 3.576 ++ compr="xz --check=crc32 --lzma2=dict=1MiB" 3.577 + echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f" 3.578 + echo "$output_file" | grep -q "\.cpio$" && compr="cat" 3.579 + shift 3.580 +diff --git a/usr/Kconfig b/usr/Kconfig 3.581 +index e2721f5..9f51a29 100644 3.582 +--- a/usr/Kconfig 3.583 ++++ b/usr/Kconfig 3.584 +@@ -72,6 +72,18 @@ config RD_LZMA 3.585 + Support loading of a LZMA encoded initial ramdisk or cpio buffer 3.586 + If unsure, say N. 3.587 + 3.588 ++config RD_XZ 3.589 ++ bool "Support initial ramdisks compressed using XZ" 3.590 ++ depends on BLK_DEV_INITRD && XZ_DEC=y 3.591 ++ select DECOMPRESS_XZ 3.592 ++ help 3.593 ++ Support loading of a XZ encoded initial ramdisk or cpio buffer. 3.594 ++ 3.595 ++ If this option is inactive, say Y to "XZ decompression support" 3.596 ++ under "Library routines" first. 3.597 ++ 3.598 ++ If unsure, say N. 3.599 ++ 3.600 + config RD_LZO 3.601 + bool "Support initial ramdisks compressed using LZO" if EMBEDDED 3.602 + default !EMBEDDED 3.603 +@@ -139,6 +151,15 @@ config INITRAMFS_COMPRESSION_LZMA 3.604 + three. Compression is slowest. The initramfs size is about 33% 3.605 + smaller with LZMA in comparison to gzip. 3.606 + 3.607 ++config INITRAMFS_COMPRESSION_XZ 3.608 ++ bool "XZ" 3.609 ++ depends on RD_XZ 3.610 ++ help 3.611 ++ XZ uses the LZMA2 algorithm. The initramfs size is about 30% 3.612 ++ smaller with XZ in comparison to gzip. Decompression speed 3.613 ++ is better than that of bzip2 but worse than gzip and LZO. 3.614 ++ Compression is slow. 3.615 ++ 3.616 + config INITRAMFS_COMPRESSION_LZO 3.617 + bool "LZO" 3.618 + depends on RD_LZO 3.619 +diff --git a/usr/Makefile b/usr/Makefile 3.620 +index 6b4b6da..5845a13 100644 3.621 +--- a/usr/Makefile 3.622 ++++ b/usr/Makefile 3.623 +@@ -15,6 +15,9 @@ suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2) = .bz2 3.624 + # Lzma 3.625 + suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA) = .lzma 3.626 + 3.627 ++# XZ 3.628 ++suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ) = .xz 3.629 ++ 3.630 + # Lzo 3.631 + suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO) = .lzo 3.632 + 3.633 +@@ -48,7 +51,7 @@ endif 3.634 + quiet_cmd_initfs = GEN $@ 3.635 + cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input) 3.636 + 3.637 +-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio 3.638 ++targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio 3.639 + # do not try to update files included in initramfs 3.640 + $(deps_initramfs): ; 3.641 +
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/linux/stuff/003-squashfs-x86-support-xz-compressed-kernel.patch Tue Dec 14 21:45:09 2010 +0000 4.3 @@ -0,0 +1,128 @@ 4.4 +From: Lasse Collin <lasse.collin@tukaani.org> 4.5 +Date: Thu, 2 Dec 2010 19:14:57 +0000 (+0200) 4.6 +Subject: x86: Support XZ-compressed kernel 4.7 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fpkl%2Fsquashfs-xz.git;a=commitdiff_plain;h=d4ad78414e5312126127b1f88cdaf8584af3eef1 4.8 + 4.9 +x86: Support XZ-compressed kernel 4.10 + 4.11 +This integrates the XZ decompression code to the x86 4.12 +pre-boot code. 4.13 + 4.14 +mkpiggy.c is updated to reserve about 32 KiB more buffer safety 4.15 +margin for kernel decompression. It is done unconditionally for 4.16 +all decompressors to keep the code simpler. 4.17 + 4.18 +The XZ decompressor needs around 30 KiB of heap, so the heap size 4.19 +is increased to 32 KiB on both x86-32 and x86-64. 4.20 + 4.21 +Documentation/x86/boot.txt is updated to list the XZ magic number. 4.22 + 4.23 +With the x86 BCJ filter in XZ, XZ-compressed x86 kernel tends to be 4.24 +a few percent smaller than the equivalent LZMA-compressed kernel. 4.25 + 4.26 +Signed-off-by: Lasse Collin <lasse.collin@tukaani.org> 4.27 +--- 4.28 + 4.29 +diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt 4.30 +index 30b43e1..3988cde 100644 4.31 +--- a/Documentation/x86/boot.txt 4.32 ++++ b/Documentation/x86/boot.txt 4.33 +@@ -621,9 +621,9 @@ Protocol: 2.08+ 4.34 + The payload may be compressed. The format of both the compressed and 4.35 + uncompressed data should be determined using the standard magic 4.36 + numbers. The currently supported compression formats are gzip 4.37 +- (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA 4.38 +- (magic number 5D 00). The uncompressed payload is currently always ELF 4.39 +- (magic number 7F 45 4C 46). 4.40 ++ (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA 4.41 ++ (magic number 5D 00), and XZ (magic number FD 37). The uncompressed 4.42 ++ payload is currently always ELF (magic number 7F 45 4C 46). 4.43 + 4.44 + Field name: payload_length 4.45 + Type: read 4.46 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig 4.47 +index cea0cd9..f3db0d7 100644 4.48 +--- a/arch/x86/Kconfig 4.49 ++++ b/arch/x86/Kconfig 4.50 +@@ -51,6 +51,7 @@ config X86 4.51 + select HAVE_KERNEL_GZIP 4.52 + select HAVE_KERNEL_BZIP2 4.53 + select HAVE_KERNEL_LZMA 4.54 ++ select HAVE_KERNEL_XZ 4.55 + select HAVE_KERNEL_LZO 4.56 + select HAVE_HW_BREAKPOINT 4.57 + select HAVE_MIXED_BREAKPOINTS_REGS 4.58 +diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile 4.59 +index 0c22955..09664ef 100644 4.60 +--- a/arch/x86/boot/compressed/Makefile 4.61 ++++ b/arch/x86/boot/compressed/Makefile 4.62 +@@ -4,7 +4,7 @@ 4.63 + # create a compressed vmlinux image from the original vmlinux 4.64 + # 4.65 + 4.66 +-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o 4.67 ++targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o 4.68 + 4.69 + KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 4.70 + KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 4.71 +@@ -49,12 +49,15 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE 4.72 + $(call if_changed,bzip2) 4.73 + $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE 4.74 + $(call if_changed,lzma) 4.75 ++$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE 4.76 ++ $(call if_changed,xzkern) 4.77 + $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE 4.78 + $(call if_changed,lzo) 4.79 + 4.80 + suffix-$(CONFIG_KERNEL_GZIP) := gz 4.81 + suffix-$(CONFIG_KERNEL_BZIP2) := bz2 4.82 + suffix-$(CONFIG_KERNEL_LZMA) := lzma 4.83 ++suffix-$(CONFIG_KERNEL_XZ) := xz 4.84 + suffix-$(CONFIG_KERNEL_LZO) := lzo 4.85 + 4.86 + quiet_cmd_mkpiggy = MKPIGGY $@ 4.87 +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c 4.88 +index 8f7bef8..6d4cab7 100644 4.89 +--- a/arch/x86/boot/compressed/misc.c 4.90 ++++ b/arch/x86/boot/compressed/misc.c 4.91 +@@ -139,6 +139,10 @@ static int lines, cols; 4.92 + #include "../../../../lib/decompress_unlzma.c" 4.93 + #endif 4.94 + 4.95 ++#ifdef CONFIG_KERNEL_XZ 4.96 ++#include "../../../../lib/decompress_unxz.c" 4.97 ++#endif 4.98 ++ 4.99 + #ifdef CONFIG_KERNEL_LZO 4.100 + #include "../../../../lib/decompress_unlzo.c" 4.101 + #endif 4.102 +diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c 4.103 +index 5c22812..646aa78 100644 4.104 +--- a/arch/x86/boot/compressed/mkpiggy.c 4.105 ++++ b/arch/x86/boot/compressed/mkpiggy.c 4.106 +@@ -74,7 +74,7 @@ int main(int argc, char *argv[]) 4.107 + 4.108 + offs = (olen > ilen) ? olen - ilen : 0; 4.109 + offs += olen >> 12; /* Add 8 bytes for each 32K block */ 4.110 +- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ 4.111 ++ offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ 4.112 + offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ 4.113 + 4.114 + printf(".section \".rodata..compressed\",\"a\",@progbits\n"); 4.115 +diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h 4.116 +index 3b62ab5..5e1a2ee 100644 4.117 +--- a/arch/x86/include/asm/boot.h 4.118 ++++ b/arch/x86/include/asm/boot.h 4.119 +@@ -32,11 +32,7 @@ 4.120 + #define BOOT_HEAP_SIZE 0x400000 4.121 + #else /* !CONFIG_KERNEL_BZIP2 */ 4.122 + 4.123 +-#ifdef CONFIG_X86_64 4.124 +-#define BOOT_HEAP_SIZE 0x7000 4.125 +-#else 4.126 +-#define BOOT_HEAP_SIZE 0x4000 4.127 +-#endif 4.128 ++#define BOOT_HEAP_SIZE 0x8000 4.129 + 4.130 + #endif /* !CONFIG_KERNEL_BZIP2 */ 4.131 +
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/linux/stuff/004-squashfs-add-xz-compression-support.patch Tue Dec 14 21:45:09 2010 +0000 5.3 @@ -0,0 +1,183 @@ 5.4 +From: Phillip Lougher <phillip@lougher.demon.co.uk> 5.5 +Date: Thu, 9 Dec 2010 02:02:29 +0000 (+0000) 5.6 +Subject: Squashfs: add XZ compression support 5.7 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fpkl%2Fsquashfs-xz.git;a=commitdiff_plain;h=d3e6969b9ff1f3a3c6bf3da71433c77046aa80e4 5.8 + 5.9 +Squashfs: add XZ compression support 5.10 + 5.11 +Add XZ decompressor wrapper code. 5.12 + 5.13 +Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk> 5.14 +--- 5.15 + 5.16 +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h 5.17 +index c5137fc..39533fe 100644 5.18 +--- a/fs/squashfs/squashfs_fs.h 5.19 ++++ b/fs/squashfs/squashfs_fs.h 5.20 +@@ -238,6 +238,7 @@ struct meta_index { 5.21 + #define ZLIB_COMPRESSION 1 5.22 + #define LZMA_COMPRESSION 2 5.23 + #define LZO_COMPRESSION 3 5.24 ++#define XZ_COMPRESSION 4 5.25 + 5.26 + struct squashfs_super_block { 5.27 + __le32 s_magic; 5.28 +diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c 5.29 +new file mode 100644 5.30 +index 0000000..053fe35 5.31 +--- /dev/null 5.32 ++++ b/fs/squashfs/xz_wrapper.c 5.33 +@@ -0,0 +1,153 @@ 5.34 ++/* 5.35 ++ * Squashfs - a compressed read only filesystem for Linux 5.36 ++ * 5.37 ++ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 5.38 ++ * Phillip Lougher <phillip@lougher.demon.co.uk> 5.39 ++ * 5.40 ++ * This program is free software; you can redistribute it and/or 5.41 ++ * modify it under the terms of the GNU General Public License 5.42 ++ * as published by the Free Software Foundation; either version 2, 5.43 ++ * or (at your option) any later version. 5.44 ++ * 5.45 ++ * This program is distributed in the hope that it will be useful, 5.46 ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of 5.47 ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 5.48 ++ * GNU General Public License for more details. 5.49 ++ * 5.50 ++ * You should have received a copy of the GNU General Public License 5.51 ++ * along with this program; if not, write to the Free Software 5.52 ++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 5.53 ++ * 5.54 ++ * xz_wrapper.c 5.55 ++ */ 5.56 ++ 5.57 ++ 5.58 ++#include <linux/mutex.h> 5.59 ++#include <linux/buffer_head.h> 5.60 ++#include <linux/slab.h> 5.61 ++#include <linux/xz.h> 5.62 ++ 5.63 ++#include "squashfs_fs.h" 5.64 ++#include "squashfs_fs_sb.h" 5.65 ++#include "squashfs_fs_i.h" 5.66 ++#include "squashfs.h" 5.67 ++#include "decompressor.h" 5.68 ++ 5.69 ++struct squashfs_xz { 5.70 ++ struct xz_dec *state; 5.71 ++ struct xz_buf buf; 5.72 ++}; 5.73 ++ 5.74 ++static void *squashfs_xz_init(struct squashfs_sb_info *msblk) 5.75 ++{ 5.76 ++ int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); 5.77 ++ 5.78 ++ struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL); 5.79 ++ if (stream == NULL) 5.80 ++ goto failed; 5.81 ++ stream->state = xz_dec_init(XZ_PREALLOC, block_size); 5.82 ++ if (stream->state == NULL) 5.83 ++ goto failed; 5.84 ++ 5.85 ++ return stream; 5.86 ++ 5.87 ++failed: 5.88 ++ ERROR("Failed to allocate xz workspace\n"); 5.89 ++ kfree(stream); 5.90 ++ return NULL; 5.91 ++} 5.92 ++ 5.93 ++ 5.94 ++static void squashfs_xz_free(void *strm) 5.95 ++{ 5.96 ++ struct squashfs_xz *stream = strm; 5.97 ++ 5.98 ++ if (stream) { 5.99 ++ xz_dec_end(stream->state); 5.100 ++ kfree(stream); 5.101 ++ } 5.102 ++} 5.103 ++ 5.104 ++ 5.105 ++static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, 5.106 ++ struct buffer_head **bh, int b, int offset, int length, int srclength, 5.107 ++ int pages) 5.108 ++{ 5.109 ++ enum xz_ret xz_err; 5.110 ++ int avail, total = 0, k = 0, page = 0; 5.111 ++ struct squashfs_xz *stream = msblk->stream; 5.112 ++ 5.113 ++ mutex_lock(&msblk->read_data_mutex); 5.114 ++ 5.115 ++ xz_dec_reset(stream->state); 5.116 ++ stream->buf.in_pos = 0; 5.117 ++ stream->buf.in_size = 0; 5.118 ++ stream->buf.out_pos = 0; 5.119 ++ stream->buf.out_size = PAGE_CACHE_SIZE; 5.120 ++ stream->buf.out = buffer[page++]; 5.121 ++ 5.122 ++ do { 5.123 ++ if (stream->buf.in_pos == stream->buf.in_size && k < b) { 5.124 ++ avail = min(length, msblk->devblksize - offset); 5.125 ++ length -= avail; 5.126 ++ wait_on_buffer(bh[k]); 5.127 ++ if (!buffer_uptodate(bh[k])) 5.128 ++ goto release_mutex; 5.129 ++ 5.130 ++ if (avail == 0) { 5.131 ++ offset = 0; 5.132 ++ put_bh(bh[k++]); 5.133 ++ continue; 5.134 ++ } 5.135 ++ 5.136 ++ stream->buf.in = bh[k]->b_data + offset; 5.137 ++ stream->buf.in_size = avail; 5.138 ++ stream->buf.in_pos = 0; 5.139 ++ offset = 0; 5.140 ++ } 5.141 ++ 5.142 ++ if (stream->buf.out_pos == stream->buf.out_size 5.143 ++ && page < pages) { 5.144 ++ stream->buf.out = buffer[page++]; 5.145 ++ stream->buf.out_pos = 0; 5.146 ++ total += PAGE_CACHE_SIZE; 5.147 ++ } 5.148 ++ 5.149 ++ xz_err = xz_dec_run(stream->state, &stream->buf); 5.150 ++ 5.151 ++ if (stream->buf.in_pos == stream->buf.in_size && k < b) 5.152 ++ put_bh(bh[k++]); 5.153 ++ } while (xz_err == XZ_OK); 5.154 ++ 5.155 ++ if (xz_err != XZ_STREAM_END) { 5.156 ++ ERROR("xz_dec_run error, data probably corrupt\n"); 5.157 ++ goto release_mutex; 5.158 ++ } 5.159 ++ 5.160 ++ if (k < b) { 5.161 ++ ERROR("xz_uncompress error, input remaining\n"); 5.162 ++ goto release_mutex; 5.163 ++ } 5.164 ++ 5.165 ++ total += stream->buf.out_pos; 5.166 ++ mutex_unlock(&msblk->read_data_mutex); 5.167 ++ return total; 5.168 ++ 5.169 ++release_mutex: 5.170 ++ mutex_unlock(&msblk->read_data_mutex); 5.171 ++ 5.172 ++ for (; k < b; k++) 5.173 ++ put_bh(bh[k]); 5.174 ++ 5.175 ++ return -EIO; 5.176 ++} 5.177 ++ 5.178 ++const struct squashfs_decompressor squashfs_xz_comp_ops = { 5.179 ++ .init = squashfs_xz_init, 5.180 ++ .free = squashfs_xz_free, 5.181 ++ .decompress = squashfs_xz_uncompress, 5.182 ++ .id = XZ_COMPRESSION, 5.183 ++ .name = "xz", 5.184 ++ .supported = 1 5.185 ++}; 5.186 ++
6.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 6.2 +++ b/linux/stuff/005-squashfs-add-xz-compression-configuration-option.patch Tue Dec 14 21:45:09 2010 +0000 6.3 @@ -0,0 +1,86 @@ 6.4 +From: Phillip Lougher <phillip@lougher.demon.co.uk> 6.5 +Date: Thu, 9 Dec 2010 02:08:31 +0000 (+0000) 6.6 +Subject: Squashfs: Add XZ compression configuration option 6.7 +X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fpkl%2Fsquashfs-xz.git;a=commitdiff_plain;h=e23d468968e608de27328888240de27d7582ad52 6.8 + 6.9 +Squashfs: Add XZ compression configuration option 6.10 + 6.11 +Signed-off-by: Phillip Lougher <phillip@lougher.demon.co.uk> 6.12 +--- 6.13 + 6.14 +diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig 6.15 +index e5f63da..e96d99a 100644 6.16 +--- a/fs/squashfs/Kconfig 6.17 ++++ b/fs/squashfs/Kconfig 6.18 +@@ -53,6 +53,22 @@ config SQUASHFS_LZO 6.19 + 6.20 + If unsure, say N. 6.21 + 6.22 ++config SQUASHFS_XZ 6.23 ++ bool "Include support for XZ compressed file systems" 6.24 ++ depends on SQUASHFS 6.25 ++ default n 6.26 ++ select XZ_DEC 6.27 ++ help 6.28 ++ Saying Y here includes support for reading Squashfs file systems 6.29 ++ compressed with XZ compresssion. XZ gives better compression than 6.30 ++ the default zlib compression, at the expense of greater CPU and 6.31 ++ memory overhead. 6.32 ++ 6.33 ++ XZ is not the standard compression used in Squashfs and so most 6.34 ++ file systems will be readable without selecting this option. 6.35 ++ 6.36 ++ If unsure, say N. 6.37 ++ 6.38 + config SQUASHFS_EMBEDDED 6.39 + bool "Additional option for memory-constrained systems" 6.40 + depends on SQUASHFS 6.41 +diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile 6.42 +index 7672bac..cecf2be 100644 6.43 +--- a/fs/squashfs/Makefile 6.44 ++++ b/fs/squashfs/Makefile 6.45 +@@ -7,3 +7,4 @@ squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o 6.46 + squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o 6.47 + squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o 6.48 + squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o 6.49 ++squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o 6.50 +diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c 6.51 +index 24af9ce..ac333b8 100644 6.52 +--- a/fs/squashfs/decompressor.c 6.53 ++++ b/fs/squashfs/decompressor.c 6.54 +@@ -46,6 +46,12 @@ static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = { 6.55 + }; 6.56 + #endif 6.57 + 6.58 ++#ifndef CONFIG_SQUASHFS_XZ 6.59 ++static const struct squashfs_decompressor squashfs_xz_unsupported_comp_ops = { 6.60 ++ NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 6.61 ++}; 6.62 ++#endif 6.63 ++ 6.64 + static const struct squashfs_decompressor squashfs_unknown_comp_ops = { 6.65 + NULL, NULL, NULL, 0, "unknown", 0 6.66 + }; 6.67 +@@ -58,6 +64,11 @@ static const struct squashfs_decompressor *decompressor[] = { 6.68 + #else 6.69 + &squashfs_lzo_unsupported_comp_ops, 6.70 + #endif 6.71 ++#ifdef CONFIG_SQUASHFS_XZ 6.72 ++ &squashfs_xz_comp_ops, 6.73 ++#else 6.74 ++ &squashfs_xz_unsupported_comp_ops, 6.75 ++#endif 6.76 + &squashfs_unknown_comp_ops 6.77 + }; 6.78 + 6.79 +diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h 6.80 +index 5d45569..1096e2e 100644 6.81 +--- a/fs/squashfs/squashfs.h 6.82 ++++ b/fs/squashfs/squashfs.h 6.83 +@@ -107,3 +107,6 @@ extern const struct squashfs_decompressor squashfs_zlib_comp_ops; 6.84 + 6.85 + /* lzo_wrapper.c */ 6.86 + extern const struct squashfs_decompressor squashfs_lzo_comp_ops; 6.87 ++ 6.88 ++/* xz_wrapper.c */ 6.89 ++extern const struct squashfs_decompressor squashfs_xz_comp_ops;
7.1 --- a/linux/stuff/linux-2.6.36-slitaz.config Tue Dec 14 21:34:46 2010 +0000 7.2 +++ b/linux/stuff/linux-2.6.36-slitaz.config Tue Dec 14 21:45:09 2010 +0000 7.3 @@ -1,7 +1,7 @@ 7.4 # 7.5 # Automatically generated make config: don't edit 7.6 # Linux kernel version: 2.6.36 7.7 -# Fri Nov 5 02:14:33 2010 7.8 +# Tue Dec 14 11:22:55 2010 7.9 # 7.10 # CONFIG_64BIT is not set 7.11 CONFIG_X86_32=y 7.12 @@ -74,10 +74,12 @@ 7.13 CONFIG_HAVE_KERNEL_GZIP=y 7.14 CONFIG_HAVE_KERNEL_BZIP2=y 7.15 CONFIG_HAVE_KERNEL_LZMA=y 7.16 +CONFIG_HAVE_KERNEL_XZ=y 7.17 CONFIG_HAVE_KERNEL_LZO=y 7.18 # CONFIG_KERNEL_GZIP is not set 7.19 # CONFIG_KERNEL_BZIP2 is not set 7.20 CONFIG_KERNEL_LZMA=y 7.21 +# CONFIG_KERNEL_XZ is not set 7.22 # CONFIG_KERNEL_LZO is not set 7.23 CONFIG_SWAP=y 7.24 CONFIG_SYSVIPC=y 7.25 @@ -116,6 +118,7 @@ 7.26 CONFIG_RD_GZIP=y 7.27 CONFIG_RD_BZIP2=y 7.28 CONFIG_RD_LZMA=y 7.29 +CONFIG_RD_XZ=y 7.30 CONFIG_RD_LZO=y 7.31 CONFIG_CC_OPTIMIZE_FOR_SIZE=y 7.32 CONFIG_SYSCTL=y 7.33 @@ -837,7 +840,6 @@ 7.34 CONFIG_ATM_BR2684=m 7.35 # CONFIG_ATM_BR2684_IPFILTER is not set 7.36 # CONFIG_L2TP is not set 7.37 -# CONFIG_L2TP_V3 is not set 7.38 CONFIG_STP=m 7.39 CONFIG_BRIDGE=m 7.40 # CONFIG_BRIDGE_IGMP_SNOOPING is not set 7.41 @@ -1815,7 +1817,6 @@ 7.42 CONFIG_PPP_MPPE=y 7.43 CONFIG_PPPOE=y 7.44 CONFIG_PPPOATM=m 7.45 -# CONFIG_PPPOL2TP is not set 7.46 # CONFIG_SLIP is not set 7.47 CONFIG_SLHC=y 7.48 # CONFIG_NET_FC is not set 7.49 @@ -3268,6 +3269,7 @@ 7.50 CONFIG_SQUASHFS=m 7.51 CONFIG_SQUASHFS_XATTR=y 7.52 CONFIG_SQUASHFS_LZO=y 7.53 +CONFIG_SQUASHFS_XZ=y 7.54 # CONFIG_SQUASHFS_EMBEDDED is not set 7.55 CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 7.56 # CONFIG_VXFS_FS is not set 7.57 @@ -3278,23 +3280,6 @@ 7.58 # CONFIG_ROMFS_FS is not set 7.59 # CONFIG_SYSV_FS is not set 7.60 # CONFIG_UFS_FS is not set 7.61 -CONFIG_AUFS_FS=m 7.62 -# CONFIG_AUFS_BRANCH_MAX_127 is not set 7.63 -# CONFIG_AUFS_BRANCH_MAX_511 is not set 7.64 -CONFIG_AUFS_BRANCH_MAX_1023=y 7.65 -# CONFIG_AUFS_BRANCH_MAX_32767 is not set 7.66 -CONFIG_AUFS_HNOTIFY=y 7.67 -CONFIG_AUFS_HFSNOTIFY=y 7.68 -# CONFIG_AUFS_HINOTIFY is not set 7.69 -# CONFIG_AUFS_EXPORT is not set 7.70 -# CONFIG_AUFS_RDU is not set 7.71 -# CONFIG_AUFS_SP_IATTR is not set 7.72 -CONFIG_AUFS_SHWH=y 7.73 -CONFIG_AUFS_BR_RAMFS=y 7.74 -# CONFIG_AUFS_BR_FUSE is not set 7.75 -# CONFIG_AUFS_BR_HFSPLUS is not set 7.76 -CONFIG_AUFS_BDEV_LOOP=y 7.77 -# CONFIG_AUFS_DEBUG is not set 7.78 CONFIG_NETWORK_FILESYSTEMS=y 7.79 CONFIG_NFS_FS=y 7.80 CONFIG_NFS_V3=y 7.81 @@ -3615,9 +3600,19 @@ 7.82 CONFIG_ZLIB_INFLATE=y 7.83 CONFIG_ZLIB_DEFLATE=y 7.84 CONFIG_LZO_DECOMPRESS=y 7.85 +CONFIG_XZ_DEC=y 7.86 +CONFIG_XZ_DEC_X86=y 7.87 +CONFIG_XZ_DEC_POWERPC=y 7.88 +CONFIG_XZ_DEC_IA64=y 7.89 +CONFIG_XZ_DEC_ARM=y 7.90 +CONFIG_XZ_DEC_ARMTHUMB=y 7.91 +CONFIG_XZ_DEC_SPARC=y 7.92 +CONFIG_XZ_DEC_BCJ=y 7.93 +# CONFIG_XZ_DEC_TEST is not set 7.94 CONFIG_DECOMPRESS_GZIP=y 7.95 CONFIG_DECOMPRESS_BZIP2=y 7.96 CONFIG_DECOMPRESS_LZMA=y 7.97 +CONFIG_DECOMPRESS_XZ=y 7.98 CONFIG_DECOMPRESS_LZO=y 7.99 CONFIG_TEXTSEARCH=y 7.100 CONFIG_TEXTSEARCH_KMP=m
8.1 --- a/linux/stuff/modules-2.6.36.list Tue Dec 14 21:34:46 2010 +0000 8.2 +++ b/linux/stuff/modules-2.6.36.list Tue Dec 14 21:45:09 2010 +0000 8.3 @@ -128,6 +128,9 @@ 8.4 net/llc/llc.ko.gz 8.5 drivers/mmc/core/mmc_core.ko.gz 8.6 net/rfkill/rfkill.ko.gz 8.7 +net/sunrpc/auth_gss/auth_rpcgss.ko.gz 8.8 +net/sunrpc/auth_gss/rpcsec_gss_krb5.ko.gz 8.9 +net/netfilter/nf_conntrack.ko.gz 8.10 drivers/input/gameport/gameport.ko.gz 8.11 drivers/i2c/i2c-core.ko.gz 8.12 sound/soundcore.ko.gz