wok view linux/stuff/linux-lzma-2.6.22.9.u @ rev 2757
locale-ru: Use UTF-8 by default
author | Christophe Lincoln <pankso@slitaz.org> |
---|---|
date | Sun Apr 26 21:07:54 2009 +0200 (2009-04-26) |
parents | |
children |
line source
1 --- linux-2.6.22.9/arch/i386/boot/compressed/Makefile
2 +++ linux-2.6.22.9/arch/i386/boot/compressed/Makefile
3 @@ -4,7 +4,7 @@
4 # create a compressed vmlinux image from the original vmlinux
5 #
7 -targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o \
8 +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head.o misc.o piggy.o \
9 vmlinux.bin.all vmlinux.relocs
10 EXTRA_AFLAGS := -traditional
12 @@ -39,7 +39,27 @@ $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bi
13 $(call if_changed,gzip)
14 endif
16 +ifdef CONFIG_RELOCATABLE
17 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
18 + $(call if_changed,bzip2)
19 +else
20 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
21 + $(call if_changed,bzip2)
22 +endif
23 +
24 +ifdef CONFIG_RELOCATABLE
25 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
26 + $(call if_changed,lzma)
27 +else
28 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
29 + $(call if_changed,lzma)
30 +endif
31 +
32 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
34 -$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
35 +suffix_$(CONFIG_KERNEL_GZIP) = gz
36 +suffix_$(CONFIG_KERNEL_BZIP2) = bz2
37 +suffix_$(CONFIG_KERNEL_LZMA) = lzma
38 +
39 +$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
40 $(call if_changed,ld)
42 --- linux-2.6.22.9/arch/i386/boot/compressed/misc.c
43 +++ linux-2.6.22.9/arch/i386/boot/compressed/misc.c
44 @@ -121,9 +121,12 @@
45 * always be larger than our output buffer.
46 */
48 +#ifdef CONFIG_KERNEL_GZIP
49 static uch *inbuf; /* input buffer */
50 +#endif
51 static uch *window; /* Sliding window buffer, (and final output buffer) */
53 +#ifdef CONFIG_KERNEL_GZIP
54 static unsigned insize; /* valid bytes in inbuf */
55 static unsigned inptr; /* index of next byte to be processed in inbuf */
56 static unsigned outcnt; /* bytes in output buffer */
57 @@ -158,9 +161,14 @@ static unsigned outcnt; /* bytes in out
59 static int fill_inbuf(void);
60 static void flush_window(void);
61 +#endif
62 +
63 static void error(char *m);
64 +
65 +#ifdef CONFIG_KERNEL_GZIP
66 static void gzip_mark(void **);
67 static void gzip_release(void **);
68 +#endif
70 /*
71 * This is set up by the setup-routine at boot-time
72 @@ -181,7 +189,9 @@ static long bytes_out = 0;
73 static void *malloc(int size);
74 static void free(void *where);
76 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
77 static void *memset(void *s, int c, unsigned n);
78 +#endif
79 static void *memcpy(void *dest, const void *src, unsigned n);
81 static void putstr(const char *);
82 @@ -189,7 +199,11 @@ static void putstr(const char *);
83 static unsigned long free_mem_ptr;
84 static unsigned long free_mem_end_ptr;
86 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
87 +#define HEAP_SIZE 0x400000
88 +#else
89 #define HEAP_SIZE 0x4000
90 +#endif
92 static char *vidmem = (char *)0xb8000;
93 static int vidport;
94 @@ -199,7 +213,29 @@ static int lines, cols;
95 void *xquad_portio;
96 #endif
98 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
99 +
100 +#define large_malloc malloc
101 +#define large_free free
102 +
103 +#ifdef current
104 +#undef current
105 +#endif
106 +
107 +#define INCLUDED
108 +#endif
109 +
110 +#ifdef CONFIG_KERNEL_GZIP
111 #include "../../../../lib/inflate.c"
112 +#endif
113 +
114 +#ifdef CONFIG_KERNEL_BZIP2
115 +#include "../../../../lib/decompress_bunzip2.c"
116 +#endif
117 +
118 +#ifdef CONFIG_KERNEL_LZMA
119 +#include "../../../../lib/decompress_unlzma.c"
120 +#endif
122 static void *malloc(int size)
123 {
124 @@ -223,6 +259,7 @@ static void free(void *where)
125 { /* Don't care */
126 }
128 +#ifdef CONFIG_KERNEL_GZIP
129 static void gzip_mark(void **ptr)
130 {
131 *ptr = (void *) free_mem_ptr;
132 @@ -232,6 +269,7 @@ static void gzip_release(void **ptr)
133 {
134 free_mem_ptr = (unsigned long) *ptr;
135 }
136 +#endif
138 static void scroll(void)
139 {
140 @@ -279,6 +317,7 @@ static void putstr(const char *s)
141 outb_p(0xff & (pos >> 1), vidport+1);
142 }
144 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
145 static void* memset(void* s, int c, unsigned n)
146 {
147 int i;
148 @@ -287,6 +326,7 @@ static void* memset(void* s, int c, unsi
149 for (i=0;i<n;i++) ss[i] = c;
150 return s;
151 }
152 +#endif
154 static void* memcpy(void* dest, const void* src, unsigned n)
155 {
156 @@ -297,6 +337,26 @@ static void* memcpy(void* dest, const vo
157 return dest;
158 }
160 +#ifndef CONFIG_KERNEL_GZIP
161 +/* ===========================================================================
162 + * Write the output window window[0..outcnt-1] and update bytes_out.
163 + * (Used for the decompressed data only.)
164 + */
165 +static int compr_flush(char *data, unsigned int len)
166 +{
167 + unsigned n;
168 + uch *out;
169 +
170 + out = window;
171 + for (n = 0; n < len; n++) {
172 + *out++ = *data++;
173 + }
174 + bytes_out += (ulg)len;
175 + window += (ulg)len;
176 + return len;
177 +}
178 +
179 +#else
180 /* ===========================================================================
181 * Fill the input buffer. This is called only when the buffer is empty
182 * and at least one byte is really needed.
183 @@ -329,6 +389,7 @@ static void flush_window(void)
184 bytes_out += (ulg)outcnt;
185 outcnt = 0;
186 }
187 +#endif
189 static void error(char *x)
190 {
191 @@ -358,9 +419,11 @@ asmlinkage void decompress_kernel(void *
192 window = output; /* Output buffer (Normally at 1M) */
193 free_mem_ptr = end; /* Heap */
194 free_mem_end_ptr = end + HEAP_SIZE;
195 +#ifdef CONFIG_KERNEL_GZIP
196 inbuf = input_data; /* Input buffer */
197 insize = input_len;
198 inptr = 0;
199 +#endif
201 if ((u32)output & (CONFIG_PHYSICAL_ALIGN -1))
202 error("Destination address not CONFIG_PHYSICAL_ALIGN aligned");
203 @@ -371,9 +434,21 @@ asmlinkage void decompress_kernel(void *
204 error("Wrong destination address");
205 #endif
207 +#ifdef CONFIG_KERNEL_BZIP2
208 + putstr("Bunzipping Linux... ");
209 + bunzip2(input_data, input_len-4, NULL, compr_flush, NULL);
210 +#endif
211 +
212 +#ifdef CONFIG_KERNEL_LZMA
213 + putstr("Unlzmaing Linux... ");
214 + unlzma(input_data, input_len-4, NULL, compr_flush, NULL);
215 +#endif
216 +
217 +#ifdef CONFIG_KERNEL_GZIP
218 makecrc();
219 putstr("Uncompressing Linux... ");
220 gunzip();
221 +#endif
222 putstr("Ok, booting the kernel.\n");
223 return;
224 }
226 --- linux-2.6.22.9/drivers/block/Kconfig
227 +++ linux-2.6.22.9/drivers/block/Kconfig
228 @@ -406,6 +406,30 @@
229 setups function - apparently needed by the rd_load_image routine
230 that supposes the filesystem in the image uses a 1024 blocksize.
232 +config RD_BZIP2
233 + bool "Initial ramdisk compressed using bzip2"
234 + default n
235 + depends on BLK_DEV_INITRD=y
236 + help
237 + Support loading of a bzip2 encoded initial ramdisk or cpio buffer
238 + If unsure, say N.
239 +
240 +config RD_LZMA
241 + bool "Initial ramdisk compressed using lzma"
242 + default n
243 + depends on BLK_DEV_INITRD=y
244 + help
245 + Support loading of a lzma encoded initial ramdisk or cpio buffer
246 + If unsure, say N.
247 +
248 +config RD_GZIP
249 + bool "Initial ramdisk compressed using gzip"
250 + default y
251 + depends on BLK_DEV_INITRD=y
252 + help
253 + Support loading of a gzip encoded initial ramdisk or cpio buffer.
254 + If unsure, say Y.
255 +
256 config CDROM_PKTCDVD
257 tristate "Packet writing on CD/DVD media"
258 depends on !UML
260 --- linux-2.6.22.9/include/linux/decompress_bunzip2.h
261 +++ linux-2.6.22.9/include/linux/decompress_bunzip2.h
262 @@ -0,0 +1,16 @@
263 +#ifndef DECOMPRESS_BUNZIP2_H
264 +#define DECOMPRESS_BUNZIP2_H
265 +
266 +/* Other housekeeping constants */
267 +#define BZIP2_IOBUF_SIZE 4096
268 +
269 +#ifndef STATIC
270 +#define STATIC /**/
271 +#endif
272 +
273 +STATIC int bunzip2(char *inbuf, int len,
274 + int(*fill)(void*,unsigned int),
275 + int(*writebb)(char*,unsigned int),
276 + int *pos);
277 +
278 +#endif
280 --- linux-2.6.22.9/include/linux/decompress_generic.h
281 +++ linux-2.6.22.9/include/linux/decompress_generic.h
282 @@ -0,0 +1,28 @@
283 +#ifndef DECOMPRESS_GENERIC_H
284 +#define DECOMPRESS_GENERIC_H
285 +
286 +/* Minimal chunksize to be read.
287 + * Bzip2 prefers at least 4096
288 + * Lzma prefers 0x10000 */
289 +#define COMPR_IOBUF_SIZE 4096
290 +
291 +typedef int (*uncompress_fn) (char *inbuf, int len,
292 + int(*fill)(char*,unsigned int),
293 + int(*writebb)(char*,unsigned int),
294 + int *posp);
295 +
296 +/* inbuf - input buffer
297 + * len - len of pre-read data in inbuf
298 + * fill - function to fill inbuf if empty
299 + * writebb - function to write out outbug
300 + * posp - if non-null, input position (number of bytes read) will be
301 + * returned here
302 + *
303 + * If len != 0, the inbuf is initialized (with as much data), and fill
304 + * should not be called
305 + * If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
306 + * fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
307 + */
308 +
309 +
310 +#endif
312 --- linux-2.6.22.9/include/linux/decompress_unlzma.h
313 +++ linux-2.6.22.9/include/linux/decompress_unlzma.h
314 @@ -0,0 +1,15 @@
315 +#ifndef DECOMPRESS_UNLZMA_H
316 +#define DECOMPRESS_UNLZMA_H
317 +
318 +#define LZMA_IOBUF_SIZE 0x10000
319 +
320 +#ifndef STATIC
321 +#define STATIC /**/
322 +#endif
323 +
324 +STATIC int unlzma(char *inbuf, int len,
325 + int(*fill)(void*,unsigned int),
326 + int(*writebb)(char*,unsigned int),
327 + int *pos);
328 +
329 +#endif
331 --- linux-2.6.22.9/init/do_mounts_rd.c
332 +++ linux-2.6.22.9/init/do_mounts_rd.c
333 @@ -8,6 +8,16 @@
334 #include <linux/initrd.h>
335 #include <linux/string.h>
337 +#ifdef CONFIG_RD_BZIP2
338 +#include <linux/decompress_bunzip2.h>
339 +#undef STATIC
340 +#endif
341 +
342 +#ifdef CONFIG_RD_LZMA
343 +#include <linux/decompress_unlzma.h>
344 +#undef STATIC
345 +#endif
346 +
347 #include "do_mounts.h"
349 #define BUILD_CRAMDISK
350 @@ -30,7 +40,15 @@ static int __init ramdisk_start_setup(ch
351 }
352 __setup("ramdisk_start=", ramdisk_start_setup);
354 +#ifdef CONFIG_RD_GZIP
355 static int __init crd_load(int in_fd, int out_fd);
356 +#endif
357 +#ifdef CONFIG_RD_BZIP2
358 +static int __init crd_load_bzip2(int in_fd, int out_fd);
359 +#endif
360 +#ifdef CONFIG_RD_LZMA
361 +static int __init crd_load_lzma(int in_fd, int out_fd);
362 +#endif
364 /*
365 * This routine tries to find a RAM disk image to load, and returns the
366 @@ -46,7 +64,7 @@ static int __init crd_load(int in_fd, in
367 * gzip
368 */
369 static int __init
370 -identify_ramdisk_image(int fd, int start_block)
371 +identify_ramdisk_image(int fd, int start_block, int *ztype)
372 {
373 const int size = 512;
374 struct minix_super_block *minixsb;
375 @@ -72,6 +90,7 @@ identify_ramdisk_image(int fd, int start
376 sys_lseek(fd, start_block * BLOCK_SIZE, 0);
377 sys_read(fd, buf, size);
379 +#ifdef CONFIG_RD_GZIP
380 /*
381 * If it matches the gzip magic numbers, return -1
382 */
383 @@ -79,9 +98,40 @@ identify_ramdisk_image(int fd, int start
384 printk(KERN_NOTICE
385 "RAMDISK: Compressed image found at block %d\n",
386 start_block);
387 + *ztype = 0;
388 + nblocks = 0;
389 + goto done;
390 + }
391 +#endif
392 +
393 +#ifdef CONFIG_RD_BZIP2
394 + /*
395 + * If it matches the bzip magic numbers, return -1
396 + */
397 + if (buf[0] == 0x42 && (buf[1] == 0x5a)) {
398 + printk(KERN_NOTICE
399 + "RAMDISK: Bzipped image found at block %d\n",
400 + start_block);
401 + *ztype = 1;
402 nblocks = 0;
403 goto done;
404 }
405 +#endif
406 +
407 +#ifdef CONFIG_RD_LZMA
408 + /*
409 + * If it matches the bzip magic numbers, return -1
410 + */
411 + if (buf[0] == 0x5d && (buf[1] == 0x00)) {
412 + printk(KERN_NOTICE
413 + "RAMDISK: Lzma image found at block %d\n",
414 + start_block);
415 + *ztype = 2;
416 + nblocks = 0;
417 + goto done;
418 + }
419 +#endif
420 +
422 /* romfs is at block zero too */
423 if (romfsb->word0 == ROMSB_WORD0 &&
424 @@ -145,6 +195,7 @@ int __init rd_load_image(char *from)
425 int nblocks, i, disk;
426 char *buf = NULL;
427 unsigned short rotate = 0;
428 + int ztype=-1;
429 #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
430 char rotator[4] = { '|' , '/' , '-' , '\\' };
431 #endif
432 @@ -157,14 +208,38 @@ int __init rd_load_image(char *from)
433 if (in_fd < 0)
434 goto noclose_input;
436 - nblocks = identify_ramdisk_image(in_fd, rd_image_start);
437 + nblocks = identify_ramdisk_image(in_fd, rd_image_start, &ztype);
438 if (nblocks < 0)
439 goto done;
441 if (nblocks == 0) {
442 #ifdef BUILD_CRAMDISK
443 - if (crd_load(in_fd, out_fd) == 0)
444 - goto successful_load;
445 + switch(ztype) {
446 +
447 +#ifdef CONFIG_RD_GZIP
448 + case 0:
449 + if (crd_load(in_fd, out_fd) == 0)
450 + goto successful_load;
451 + break;
452 +#endif
453 +
454 +#ifdef CONFIG_RD_BZIP2
455 + case 1:
456 + if (crd_load_bzip2(in_fd, out_fd) == 0)
457 + goto successful_load;
458 + break;
459 +#endif
460 +
461 +#ifdef CONFIG_RD_LZMA
462 + case 2:
463 + if (crd_load_lzma(in_fd, out_fd) == 0)
464 + goto successful_load;
465 + break;
466 +#endif
467 +
468 + default:
469 + break;
470 + }
471 #else
472 printk(KERN_NOTICE
473 "RAMDISK: Kernel does not support compressed "
474 @@ -269,6 +344,7 @@ int __init rd_load_disk(int n)
476 #ifdef BUILD_CRAMDISK
478 +#ifdef CONFIG_RD_GZIP
479 /*
480 * gzip declarations
481 */
482 @@ -296,8 +372,11 @@ static unsigned outcnt; /* bytes in out
483 static int exit_code;
484 static int unzip_error;
485 static long bytes_out;
486 +#endif
487 +
488 static int crd_infd, crd_outfd;
490 +#ifdef CONFIG_RD_GZIP
491 #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
493 /* Diagnostic functions (stubbed out) */
494 @@ -359,7 +438,22 @@ static int __init fill_inbuf(void)
496 return inbuf[0];
497 }
498 +#endif
500 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
501 +static int __init compr_fill(void *buf, unsigned int len)
502 +{
503 + int r = sys_read(crd_infd, buf, len);
504 + if(r < 0) {
505 + printk(KERN_ERR "RAMDISK: error while reading compressed data");
506 + } else if(r == 0) {
507 + printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
508 + }
509 + return r;
510 +}
511 +#endif
512 +
513 +#ifdef CONFIG_RD_GZIP
514 /* ===========================================================================
515 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
516 * (Used for the decompressed data only.)
517 @@ -385,7 +479,24 @@ static void __init flush_window(void)
518 bytes_out += (ulg)outcnt;
519 outcnt = 0;
520 }
521 +#endif
523 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
524 +static int __init compr_flush(void *window, unsigned int outcnt) {
525 + static int progressDots=0;
526 + int written = sys_write(crd_outfd, window, outcnt);
527 + if (written != outcnt) {
528 + printk(KERN_ERR "RAMDISK: incomplete write (%d != %d)\n",
529 + written, outcnt);
530 + }
531 + progressDots = (progressDots+1)%10;
532 + if(!progressDots)
533 + printk(".");
534 + return outcnt;
535 +}
536 +#endif
537 +
538 +#ifdef CONFIG_RD_GZIP
539 static void __init error(char *x)
540 {
541 printk(KERN_ERR "%s\n", x);
542 @@ -425,5 +536,43 @@ static int __init crd_load(int in_fd, in
543 kfree(window);
544 return result;
545 }
546 +#endif
547 +
548 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
549 +static int __init crd_load_compr(int in_fd, int out_fd, int size,
550 + int (*deco)(char *,int,
551 + int(*fill)(void*,unsigned int),
552 + int(*flush)(void*,unsigned int),
553 + int *))
554 +{
555 + int result;
556 + char *inbuf = kmalloc(size, GFP_KERNEL);
557 + crd_infd = in_fd;
558 + crd_outfd = out_fd;
559 + if (inbuf == 0) {
560 + printk(KERN_ERR "RAMDISK: Couldn't allocate decompression buffer\n");
561 + return -1;
562 + }
563 + result=deco(inbuf, 0, compr_fill, compr_flush, NULL);
564 + kfree(inbuf);
565 + printk("\n");
566 + return result;
567 +}
568 +#endif
569 +
570 +#ifdef CONFIG_RD_BZIP2
571 +static int __init crd_load_bzip2(int in_fd, int out_fd)
572 +{
573 + return crd_load_compr(in_fd, out_fd, BZIP2_IOBUF_SIZE, bunzip2);
574 +}
575 +#endif
576 +
577 +#ifdef CONFIG_RD_LZMA
578 +static int __init crd_load_lzma(int in_fd, int out_fd)
579 +{
580 + return crd_load_compr(in_fd, out_fd, LZMA_IOBUF_SIZE, unlzma);
581 +}
582 +
583 +#endif
585 #endif /* BUILD_CRAMDISK */
587 --- linux-2.6.22.9/init/initramfs.c
588 +++ linux-2.6.22.9/init/initramfs.c
589 @@ -7,6 +7,15 @@
590 #include <linux/string.h>
591 #include <linux/syscalls.h>
593 +/* We need to enable RD_GZIP unconditionnally, as the built-in
594 + * initramfs is gzip-compressed, alas!
595 + * We can only wonder why, though, as the whole kernel (which contains
596 + * built-in initramfs) is gzip (or bzip) compressed anyways afterwards...
597 + */
598 +#ifndef CONFIG_RD_GZIP
599 +#define CONFIG_RD_GZIP
600 +#endif
601 +
602 static __initdata char *message;
603 static void __init error(char *x)
604 {
605 @@ -347,11 +356,13 @@ static int __init write_buffer(char *buf
606 return len - count;
607 }
609 -static void __init flush_buffer(char *buf, unsigned len)
610 +
611 +static int __init flush_buffer(char *buf, unsigned len)
612 {
613 int written;
614 + int origLen = len;
615 if (message)
616 - return;
617 + return -1;
618 while ((written = write_buffer(buf, len)) < len && !message) {
619 char c = buf[written];
620 if (c == '0') {
621 @@ -365,8 +376,24 @@ static void __init flush_buffer(char *bu
622 } else
623 error("junk in compressed archive");
624 }
625 + return origLen;
626 }
628 +#ifdef CONFIG_RD_BZIP2
629 +#include <linux/decompress_bunzip2.h>
630 +#undef STATIC
631 +
632 +#endif
633 +
634 +#ifdef CONFIG_RD_LZMA
635 +#include <linux/decompress_unlzma.h>
636 +#undef STATIC
637 +
638 +#endif
639 +
640 +static unsigned inptr; /* index of next byte to be processed in inbuf */
641 +
642 +#ifdef CONFIG_RD_GZIP
643 /*
644 * gzip declarations
645 */
646 @@ -388,7 +415,6 @@ static uch *inbuf;
647 static uch *window;
649 static unsigned insize; /* valid bytes in inbuf */
650 -static unsigned inptr; /* index of next byte to be processed in inbuf */
651 static unsigned outcnt; /* bytes in output buffer */
652 static long bytes_out;
654 @@ -440,6 +466,7 @@ static void __init flush_window(void)
655 bytes_out += (ulg)outcnt;
656 outcnt = 0;
657 }
658 +#endif
660 static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
661 {
662 @@ -448,9 +475,11 @@ static char * __init unpack_to_rootfs(ch
663 header_buf = malloc(110);
664 symlink_buf = malloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1);
665 name_buf = malloc(N_ALIGN(PATH_MAX));
666 +#ifdef CONFIG_RD_GZIP
667 window = malloc(WSIZE);
668 if (!window || !header_buf || !symlink_buf || !name_buf)
669 panic("can't allocate buffers");
670 +#endif
671 state = Start;
672 this_header = 0;
673 message = NULL;
674 @@ -470,6 +499,7 @@ static char * __init unpack_to_rootfs(ch
675 continue;
676 }
677 this_header = 0;
678 +#ifdef CONFIG_RD_GZIP
679 insize = len;
680 inbuf = buf;
681 inptr = 0;
682 @@ -477,14 +507,38 @@ static char * __init unpack_to_rootfs(ch
683 bytes_out = 0;
684 crc = (ulg)0xffffffffL; /* shift register contents */
685 makecrc();
686 - gunzip();
687 + if(!gunzip() && message == NULL)
688 + goto ok;
689 +#endif
690 +
691 +#ifdef CONFIG_RD_BZIP2
692 + message = NULL; /* Zero out message, or else cpio will
693 + think an error has already occured */
694 + if(!bunzip2(buf, len, NULL, flush_buffer, &inptr) < 0 &&
695 + message == NULL) {
696 + goto ok;
697 + }
698 +#endif
699 +
700 +#ifdef CONFIG_RD_LZMA
701 + message = NULL; /* Zero out message, or else cpio will
702 + think an error has already occured */
703 + if(!unlzma(buf, len, NULL, flush_buffer, &inptr) < 0 &&
704 + message == NULL) {
705 + goto ok;
706 + }
707 +#endif
708 + ok:
709 +
710 if (state != Reset)
711 - error("junk in gzipped archive");
712 + error("junk in compressed archive");
713 this_header = saved_offset + inptr;
714 buf += inptr;
715 len -= inptr;
716 }
717 +#ifdef CONFIG_RD_GZIP
718 free(window);
719 +#endif
720 free(name_buf);
721 free(symlink_buf);
722 free(header_buf);
724 --- linux-2.6.22.9/init/Kconfig
725 +++ linux-2.6.22.9/init/Kconfig
726 @@ -95,6 +95,56 @@
728 which is done within the script "scripts/setlocalversion".)
730 +choice
731 + prompt "Kernel compression mode"
732 + default KERNEL_GZIP
733 + help
734 + The linux kernel is a kind of self-extracting executable.
735 + Several compression algorithms are available, which differ
736 + in efficiency, compression and decompression speed.
737 + Compression speed is only relevant when building a kernel.
738 + Decompression speed is relevant at each boot.
739 +
740 + If you have any problems with bzip2 or lzma compressed
741 + kernels, mail me (Alain Knaff) <alain@knaff.lu>. (An older
742 + version of this functionality (bzip2 only), for 2.4, was
743 + supplied by Christian Ludwig)
744 +
745 + High compression options are mostly useful for users, who
746 + are low on disk space (embedded systems), but for whom ram
747 + size matters less.
748 +
749 + If in doubt, select 'gzip'
750 +
751 +config KERNEL_GZIP
752 + bool "Gzip"
753 + help
754 + The old and tries gzip compression. Its compression ratio is
755 + the poorest among the 3 choices; however its speed (both
756 + compression and decompression) is the fastest.
757 +
758 +config KERNEL_BZIP2
759 + bool "Bzip2"
760 + help
761 + Its compression ratio and speed is intermediate.
762 + Decompression speed is slowest among the 3.
763 + The kernel size is about 10 per cent smaller with bzip2,
764 + in comparison to gzip.
765 + Bzip2 uses a large amount of memory. For modern kernels
766 + you will need at least 8MB RAM or more for booting.
767 +
768 +config KERNEL_LZMA
769 + bool "LZMA"
770 + help
771 + The most recent compression algorithm.
772 + Its ratio is best, decompression speed is between the other
773 + 2. Compression is slowest.
774 + The kernel size is about 33 per cent smaller with lzma,
775 + in comparison to gzip.
776 +
777 +endchoice
778 +
779 +
780 config SWAP
781 bool "Support for paging of anonymous memory (swap)"
782 depends on MMU && BLOCK
784 --- linux-2.6.22.9/lib/decompress_bunzip2.c
785 +++ linux-2.6.22.9/lib/decompress_bunzip2.c
786 @@ -0,0 +1,645 @@
787 +/* vi: set sw=4 ts=4: */
788 +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
789 +
790 + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
791 + which also acknowledges contributions by Mike Burrows, David Wheeler,
792 + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
793 + Robert Sedgewick, and Jon L. Bentley.
794 +
795 + This code is licensed under the LGPLv2:
796 + LGPL (http://www.gnu.org/copyleft/lgpl.html
797 +*/
798 +
799 +/*
800 + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
801 +
802 + More efficient reading of Huffman codes, a streamlined read_bunzip()
803 + function, and various other tweaks. In (limited) tests, approximately
804 + 20% faster than bzcat on x86 and about 10% faster on arm.
805 +
806 + Note that about 2/3 of the time is spent in read_unzip() reversing
807 + the Burrows-Wheeler transformation. Much of that time is delay
808 + resulting from cache misses.
809 +
810 + I would ask that anyone benefiting from this work, especially those
811 + using it in commercial products, consider making a donation to my local
812 + non-profit hospice organization in the name of the woman I loved, who
813 + passed away Feb. 12, 2003.
814 +
815 + In memory of Toni W. Hagan
816 +
817 + Hospice of Acadiana, Inc.
818 + 2600 Johnston St., Suite 200
819 + Lafayette, LA 70503-3240
820 +
821 + Phone (337) 232-1234 or 1-800-738-2226
822 + Fax (337) 232-1297
823 +
824 + http://www.hospiceacadiana.com/
825 +
826 + Manuel
827 + */
828 +
829 +/*
830 + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
831 +*/
832 +
833 +
834 +#ifndef STATIC
835 +
836 +#include <linux/kernel.h>
837 +#include <linux/fs.h>
838 +#include <linux/string.h>
839 +
840 +#ifdef TEST
841 +#include "test.h"
842 +#else
843 +#include <linux/vmalloc.h>
844 +#endif
845 +
846 +static void __init *large_malloc(size_t size)
847 +{
848 + return vmalloc(size);
849 +}
850 +
851 +static void __init large_free(void *where)
852 +{
853 + vfree(where);
854 +}
855 +
856 +#ifndef TEST
857 +static void __init *malloc(size_t size)
858 +{
859 + return kmalloc(size, GFP_KERNEL);
860 +}
861 +
862 +static void __init free(void *where)
863 +{
864 + kfree(where);
865 +}
866 +
867 +static void __init error(char *x)
868 +{
869 + printk(KERN_ERR "%s\n", x);
870 +}
871 +#endif
872 +
873 +#define STATIC /**/
874 +
875 +#endif
876 +
877 +#include <linux/decompress_bunzip2.h>
878 +
879 +
880 +/* Constants for Huffman coding */
881 +#define MAX_GROUPS 6
882 +#define GROUP_SIZE 50 /* 64 would have been more efficient */
883 +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
884 +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
885 +#define SYMBOL_RUNA 0
886 +#define SYMBOL_RUNB 1
887 +
888 +/* Status return values */
889 +#define RETVAL_OK 0
890 +#define RETVAL_LAST_BLOCK (-1)
891 +#define RETVAL_NOT_BZIP_DATA (-2)
892 +#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
893 +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
894 +#define RETVAL_DATA_ERROR (-5)
895 +#define RETVAL_OUT_OF_MEMORY (-6)
896 +#define RETVAL_OBSOLETE_INPUT (-7)
897 +
898 +
899 +/* This is what we know about each Huffman coding group */
900 +struct group_data {
901 + /* We have an extra slot at the end of limit[] for a sentinal value. */
902 + int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS];
903 + int minLen, maxLen;
904 +};
905 +
906 +/* Structure holding all the housekeeping data, including IO buffers and
907 + memory that persists between calls to bunzip */
908 +typedef struct {
909 + /* State for interrupting output loop */
910 + int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent;
911 + /* I/O tracking data (file handles, buffers, positions, etc.) */
912 + int (*fill)(void*,unsigned int);
913 + int inbufCount,inbufPos /*,outbufPos*/;
914 + unsigned char *inbuf /*,*outbuf*/;
915 + unsigned int inbufBitCount, inbufBits;
916 + /* The CRC values stored in the block header and calculated from the data */
917 + unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC;
918 + /* Intermediate buffer and its size (in bytes) */
919 + unsigned int *dbuf, dbufSize;
920 + /* These things are a bit too big to go on the stack */
921 + unsigned char selectors[32768]; /* nSelectors=15 bits */
922 + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
923 + int io_error; /* non-zero if we have IO error */
924 +} bunzip_data;
925 +
926 +
927 +/* Return the next nnn bits of input. All reads from the compressed input
928 + are done through this function. All reads are big endian */
929 +static unsigned int get_bits(bunzip_data *bd, char bits_wanted)
930 +{
931 + unsigned int bits=0;
932 +
933 + /* If we need to get more data from the byte buffer, do so. (Loop getting
934 + one byte at a time to enforce endianness and avoid unaligned access.) */
935 + while (bd->inbufBitCount<bits_wanted) {
936 + /* If we need to read more data from file into byte buffer, do so */
937 + if(bd->inbufPos==bd->inbufCount) {
938 + if(bd->io_error)
939 + return 0;
940 + if((bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE)) <= 0) {
941 + bd->io_error=RETVAL_UNEXPECTED_INPUT_EOF;
942 + return 0;
943 + }
944 + bd->inbufPos=0;
945 + }
946 + /* Avoid 32-bit overflow (dump bit buffer to top of output) */
947 + if(bd->inbufBitCount>=24) {
948 + bits=bd->inbufBits&((1<<bd->inbufBitCount)-1);
949 + bits_wanted-=bd->inbufBitCount;
950 + bits<<=bits_wanted;
951 + bd->inbufBitCount=0;
952 + }
953 + /* Grab next 8 bits of input from buffer. */
954 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
955 + bd->inbufBitCount+=8;
956 + }
957 + /* Calculate result */
958 + bd->inbufBitCount-=bits_wanted;
959 + bits|=(bd->inbufBits>>bd->inbufBitCount)&((1<<bits_wanted)-1);
960 +
961 + return bits;
962 +}
963 +
964 +/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
965 +
966 +static int get_next_block(bunzip_data *bd)
967 +{
968 + struct group_data *hufGroup=NULL;
969 + int *base=NULL;
970 + int *limit=NULL;
971 + int dbufCount,nextSym,dbufSize,groupCount,selector,
972 + i,j,k,t,runPos,symCount,symTotal,nSelectors,byteCount[256];
973 + unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
974 + unsigned int *dbuf,origPtr;
975 +
976 + dbuf=bd->dbuf;
977 + dbufSize=bd->dbufSize;
978 + selectors=bd->selectors;
979 +
980 + /* Read in header signature and CRC, then validate signature.
981 + (last block signature means CRC is for whole file, return now) */
982 + i = get_bits(bd,24);
983 + j = get_bits(bd,24);
984 + bd->headerCRC=get_bits(bd,32);
985 + if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK;
986 + if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA;
987 + /* We can add support for blockRandomised if anybody complains. There was
988 + some code for this in busybox 1.0.0-pre3, but nobody ever noticed that
989 + it didn't actually work. */
990 + if(get_bits(bd,1)) return RETVAL_OBSOLETE_INPUT;
991 + if((origPtr=get_bits(bd,24)) > dbufSize) return RETVAL_DATA_ERROR;
992 + /* mapping table: if some byte values are never used (encoding things
993 + like ascii text), the compression code removes the gaps to have fewer
994 + symbols to deal with, and writes a sparse bitfield indicating which
995 + values were present. We make a translation table to convert the symbols
996 + back to the corresponding bytes. */
997 + t=get_bits(bd, 16);
998 + symTotal=0;
999 + for (i=0;i<16;i++) {
1000 + if(t&(1<<(15-i))) {
1001 + k=get_bits(bd,16);
1002 + for(j=0;j<16;j++)
1003 + if(k&(1<<(15-j))) symToByte[symTotal++]=(16*i)+j;
1004 + }
1005 + }
1006 + /* How many different Huffman coding groups does this block use? */
1007 + groupCount=get_bits(bd,3);
1008 + if (groupCount<2 || groupCount>MAX_GROUPS) return RETVAL_DATA_ERROR;
1009 + /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding
1010 + group. Read in the group selector list, which is stored as MTF encoded
1011 + bit runs. (MTF=Move To Front, as each value is used it's moved to the
1012 + start of the list.) */
1013 + if(!(nSelectors=get_bits(bd, 15))) return RETVAL_DATA_ERROR;
1014 + for(i=0; i<groupCount; i++) mtfSymbol[i] = i;
1015 + for(i=0; i<nSelectors; i++) {
1016 + /* Get next value */
1017 + for(j=0;get_bits(bd,1);j++) if (j>=groupCount) return RETVAL_DATA_ERROR;
1018 + /* Decode MTF to get the next selector */
1019 + uc = mtfSymbol[j];
1020 + for(;j;j--) mtfSymbol[j] = mtfSymbol[j-1];
1021 + mtfSymbol[0]=selectors[i]=uc;
1022 + }
1023 + /* Read the Huffman coding tables for each group, which code for symTotal
1024 + literal symbols, plus two run symbols (RUNA, RUNB) */
1025 + symCount=symTotal+2;
1026 + for (j=0; j<groupCount; j++) {
1027 + unsigned char length[MAX_SYMBOLS],temp[MAX_HUFCODE_BITS+1];
1028 + int minLen, maxLen, pp;
1029 + /* Read Huffman code lengths for each symbol. They're stored in
1030 + a way similar to mtf; record a starting value for the first symbol,
1031 + and an offset from the previous value for everys symbol after that.
1032 + (Subtracting 1 before the loop and then adding it back at the end is
1033 + an optimization that makes the test inside the loop simpler: symbol
1034 + length 0 becomes negative, so an unsigned inequality catches it.) */
1035 + t=get_bits(bd, 5)-1;
1036 + for (i = 0; i < symCount; i++) {
1037 + for(;;) {
1038 + if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
1039 + return RETVAL_DATA_ERROR;
1040 + /* If first bit is 0, stop. Else second bit indicates whether
1041 + to increment or decrement the value. Optimization: grab 2
1042 + bits and unget the second if the first was 0. */
1043 + k = get_bits(bd,2);
1044 + if (k < 2) {
1045 + bd->inbufBitCount++;
1046 + break;
1047 + }
1048 + /* Add one if second bit 1, else subtract 1. Avoids if/else */
1049 + t+=(((k+1)&2)-1);
1050 + }
1051 + /* Correct for the initial -1, to get the final symbol length */
1052 + length[i]=t+1;
1053 + }
1054 + /* Find largest and smallest lengths in this group */
1055 + minLen=maxLen=length[0];
1056 + for(i = 1; i < symCount; i++) {
1057 + if(length[i] > maxLen) maxLen = length[i];
1058 + else if(length[i] < minLen) minLen = length[i];
1059 + }
1060 + /* Calculate permute[], base[], and limit[] tables from length[].
1061 + *
1062 + * permute[] is the lookup table for converting Huffman coded symbols
1063 + * into decoded symbols. base[] is the amount to subtract from the
1064 + * value of a Huffman symbol of a given length when using permute[].
1065 + *
1066 + * limit[] indicates the largest numerical value a symbol with a given
1067 + * number of bits can have. This is how the Huffman codes can vary in
1068 + * length: each code with a value>limit[length] needs another bit.
1069 + */
1070 + hufGroup=bd->groups+j;
1071 + hufGroup->minLen = minLen;
1072 + hufGroup->maxLen = maxLen;
1073 + /* Note that minLen can't be smaller than 1, so we adjust the base
1074 + and limit array pointers so we're not always wasting the first
1075 + entry. We do this again when using them (during symbol decoding).*/
1076 + base=hufGroup->base-1;
1077 + limit=hufGroup->limit-1;
1078 + /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */
1079 + pp=0;
1080 + for(i=minLen;i<=maxLen;i++) {
1081 + temp[i]=limit[i]=0;
1082 + for(t=0;t<symCount;t++)
1083 + if(length[t]==i) hufGroup->permute[pp++] = t;
1084 + }
1085 + /* Count symbols coded for at each bit length */
1086 + for (i=0;i<symCount;i++) temp[length[i]]++;
1087 + /* Calculate limit[] (the largest symbol-coding value at each bit
1088 + * length, which is (previous limit<<1)+symbols at this level), and
1089 + * base[] (number of symbols to ignore at each bit length, which is
1090 + * limit minus the cumulative count of symbols coded for already). */
1091 + pp=t=0;
1092 + for (i=minLen; i<maxLen; i++) {
1093 + pp+=temp[i];
1094 + /* We read the largest possible symbol size and then unget bits
1095 + after determining how many we need, and those extra bits could
1096 + be set to anything. (They're noise from future symbols.) At
1097 + each level we're really only interested in the first few bits,
1098 + so here we set all the trailing to-be-ignored bits to 1 so they
1099 + don't affect the value>limit[length] comparison. */
1100 + limit[i]= (pp << (maxLen - i)) - 1;
1101 + pp<<=1;
1102 + base[i+1]=pp-(t+=temp[i]);
1103 + }
1104 + limit[maxLen+1] = INT_MAX; /* Sentinal value for reading next sym. */
1105 + limit[maxLen]=pp+temp[maxLen]-1;
1106 + base[minLen]=0;
1107 + }
1108 + /* We've finished reading and digesting the block header. Now read this
1109 + block's Huffman coded symbols from the file and undo the Huffman coding
1110 + and run length encoding, saving the result into dbuf[dbufCount++]=uc */
1111 +
1112 + /* Initialize symbol occurrence counters and symbol Move To Front table */
1113 + for(i=0;i<256;i++) {
1114 + byteCount[i] = 0;
1115 + mtfSymbol[i]=(unsigned char)i;
1116 + }
1117 + /* Loop through compressed symbols. */
1118 + runPos=dbufCount=symCount=selector=0;
1119 + for(;;) {
1120 + /* Determine which Huffman coding group to use. */
1121 + if(!(symCount--)) {
1122 + symCount=GROUP_SIZE-1;
1123 + if(selector>=nSelectors) return RETVAL_DATA_ERROR;
1124 + hufGroup=bd->groups+selectors[selector++];
1125 + base=hufGroup->base-1;
1126 + limit=hufGroup->limit-1;
1127 + }
1128 + /* Read next Huffman-coded symbol. */
1129 + /* Note: It is far cheaper to read maxLen bits and back up than it is
1130 + to read minLen bits and then an additional bit at a time, testing
1131 + as we go. Because there is a trailing last block (with file CRC),
1132 + there is no danger of the overread causing an unexpected EOF for a
1133 + valid compressed file. As a further optimization, we do the read
1134 + inline (falling back to a call to get_bits if the buffer runs
1135 + dry). The following (up to got_huff_bits:) is equivalent to
1136 + j=get_bits(bd,hufGroup->maxLen);
1137 + */
1138 + while (bd->inbufBitCount<hufGroup->maxLen) {
1139 + if(bd->inbufPos==bd->inbufCount) {
1140 + j = get_bits(bd,hufGroup->maxLen);
1141 + goto got_huff_bits;
1142 + }
1143 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
1144 + bd->inbufBitCount+=8;
1145 + };
1146 + bd->inbufBitCount-=hufGroup->maxLen;
1147 + j = (bd->inbufBits>>bd->inbufBitCount)&((1<<hufGroup->maxLen)-1);
1148 +got_huff_bits:
1149 + /* Figure how how many bits are in next symbol and unget extras */
1150 + i=hufGroup->minLen;
1151 + while(j>limit[i]) ++i;
1152 + bd->inbufBitCount += (hufGroup->maxLen - i);
1153 + /* Huffman decode value to get nextSym (with bounds checking) */
1154 + if ((i > hufGroup->maxLen)
1155 + || (((unsigned)(j=(j>>(hufGroup->maxLen-i))-base[i]))
1156 + >= MAX_SYMBOLS))
1157 + return RETVAL_DATA_ERROR;
1158 + nextSym = hufGroup->permute[j];
1159 + /* We have now decoded the symbol, which indicates either a new literal
1160 + byte, or a repeated run of the most recent literal byte. First,
1161 + check if nextSym indicates a repeated run, and if so loop collecting
1162 + how many times to repeat the last literal. */
1163 + if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
1164 + /* If this is the start of a new run, zero out counter */
1165 + if(!runPos) {
1166 + runPos = 1;
1167 + t = 0;
1168 + }
1169 + /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at
1170 + each bit position, add 1 or 2 instead. For example,
1171 + 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2.
1172 + You can make any bit pattern that way using 1 less symbol than
1173 + the basic or 0/1 method (except all bits 0, which would use no
1174 + symbols, but a run of length 0 doesn't mean anything in this
1175 + context). Thus space is saved. */
1176 + t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */
1177 + runPos <<= 1;
1178 + continue;
1179 + }
1180 + /* When we hit the first non-run symbol after a run, we now know
1181 + how many times to repeat the last literal, so append that many
1182 + copies to our buffer of decoded symbols (dbuf) now. (The last
1183 + literal used is the one at the head of the mtfSymbol array.) */
1184 + if(runPos) {
1185 + runPos=0;
1186 + if(dbufCount+t>=dbufSize) return RETVAL_DATA_ERROR;
1187 +
1188 + uc = symToByte[mtfSymbol[0]];
1189 + byteCount[uc] += t;
1190 + while(t--) dbuf[dbufCount++]=uc;
1191 + }
1192 + /* Is this the terminating symbol? */
1193 + if(nextSym>symTotal) break;
1194 + /* At this point, nextSym indicates a new literal character. Subtract
1195 + one to get the position in the MTF array at which this literal is
1196 + currently to be found. (Note that the result can't be -1 or 0,
1197 + because 0 and 1 are RUNA and RUNB. But another instance of the
1198 + first symbol in the mtf array, position 0, would have been handled
1199 + as part of a run above. Therefore 1 unused mtf position minus
1200 + 2 non-literal nextSym values equals -1.) */
1201 + if(dbufCount>=dbufSize) return RETVAL_DATA_ERROR;
1202 + i = nextSym - 1;
1203 + uc = mtfSymbol[i];
1204 + /* Adjust the MTF array. Since we typically expect to move only a
1205 + * small number of symbols, and are bound by 256 in any case, using
1206 + * memmove here would typically be bigger and slower due to function
1207 + * call overhead and other assorted setup costs. */
1208 + do {
1209 + mtfSymbol[i] = mtfSymbol[i-1];
1210 + } while (--i);
1211 + mtfSymbol[0] = uc;
1212 + uc=symToByte[uc];
1213 + /* We have our literal byte. Save it into dbuf. */
1214 + byteCount[uc]++;
1215 + dbuf[dbufCount++] = (unsigned int)uc;
1216 + }
1217 + /* At this point, we've read all the Huffman-coded symbols (and repeated
1218 + runs) for this block from the input stream, and decoded them into the
1219 + intermediate buffer. There are dbufCount many decoded bytes in dbuf[].
1220 + Now undo the Burrows-Wheeler transform on dbuf.
1221 + See http://dogma.net/markn/articles/bwt/bwt.htm
1222 + */
1223 + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
1224 + j=0;
1225 + for(i=0;i<256;i++) {
1226 + k=j+byteCount[i];
1227 + byteCount[i] = j;
1228 + j=k;
1229 + }
1230 + /* Figure out what order dbuf would be in if we sorted it. */
1231 + for (i=0;i<dbufCount;i++) {
1232 + uc=(unsigned char)(dbuf[i] & 0xff);
1233 + dbuf[byteCount[uc]] |= (i << 8);
1234 + byteCount[uc]++;
1235 + }
1236 + /* Decode first byte by hand to initialize "previous" byte. Note that it
1237 + doesn't get output, and if the first three characters are identical
1238 + it doesn't qualify as a run (hence writeRunCountdown=5). */
1239 + if(dbufCount) {
1240 + if(origPtr>=dbufCount) return RETVAL_DATA_ERROR;
1241 + bd->writePos=dbuf[origPtr];
1242 + bd->writeCurrent=(unsigned char)(bd->writePos&0xff);
1243 + bd->writePos>>=8;
1244 + bd->writeRunCountdown=5;
1245 + }
1246 + bd->writeCount=dbufCount;
1247 +
1248 + return RETVAL_OK;
1249 +}
1250 +
1251 +/* Undo burrows-wheeler transform on intermediate buffer to produce output.
1252 + If start_bunzip was initialized with out_fd=-1, then up to len bytes of
1253 + data are written to outbuf. Return value is number of bytes written or
1254 + error (all errors are negative numbers). If out_fd!=-1, outbuf and len
1255 + are ignored, data is written to out_fd and return is RETVAL_OK or error.
1256 +*/
1257 +
1258 +static int read_bunzip(bunzip_data *bd, char *outbuf, int len)
1259 +{
1260 + const unsigned int *dbuf;
1261 + int pos,xcurrent,previous,gotcount;
1262 +
1263 + /* If last read was short due to end of file, return last block now */
1264 + if(bd->writeCount<0) return bd->writeCount;
1265 +
1266 + gotcount = 0;
1267 + dbuf=bd->dbuf;
1268 + pos=bd->writePos;
1269 + xcurrent=bd->writeCurrent;
1270 +
1271 + /* We will always have pending decoded data to write into the output
1272 + buffer unless this is the very first call (in which case we haven't
1273 + Huffman-decoded a block into the intermediate buffer yet). */
1274 +
1275 + if (bd->writeCopies) {
1276 + /* Inside the loop, writeCopies means extra copies (beyond 1) */
1277 + --bd->writeCopies;
1278 + /* Loop outputting bytes */
1279 + for(;;) {
1280 + /* If the output buffer is full, snapshot state and return */
1281 + if(gotcount >= len) {
1282 + bd->writePos=pos;
1283 + bd->writeCurrent=xcurrent;
1284 + bd->writeCopies++;
1285 + return len;
1286 + }
1287 + /* Write next byte into output buffer, updating CRC */
1288 + outbuf[gotcount++] = xcurrent;
1289 + bd->writeCRC=(((bd->writeCRC)<<8)
1290 + ^bd->crc32Table[((bd->writeCRC)>>24)^xcurrent]);
1291 + /* Loop now if we're outputting multiple copies of this byte */
1292 + if (bd->writeCopies) {
1293 + --bd->writeCopies;
1294 + continue;
1295 + }
1296 +decode_next_byte:
1297 + if (!bd->writeCount--) break;
1298 + /* Follow sequence vector to undo Burrows-Wheeler transform */
1299 + previous=xcurrent;
1300 + pos=dbuf[pos];
1301 + xcurrent=pos&0xff;
1302 + pos>>=8;
1303 + /* After 3 consecutive copies of the same byte, the 4th is a repeat
1304 + count. We count down from 4 instead
1305 + * of counting up because testing for non-zero is faster */
1306 + if(--bd->writeRunCountdown) {
1307 + if(xcurrent!=previous) bd->writeRunCountdown=4;
1308 + } else {
1309 + /* We have a repeated run, this byte indicates the count */
1310 + bd->writeCopies=xcurrent;
1311 + xcurrent=previous;
1312 + bd->writeRunCountdown=5;
1313 + /* Sometimes there are just 3 bytes (run length 0) */
1314 + if(!bd->writeCopies) goto decode_next_byte;
1315 + /* Subtract the 1 copy we'd output anyway to get extras */
1316 + --bd->writeCopies;
1317 + }
1318 + }
1319 + /* Decompression of this block completed successfully */
1320 + bd->writeCRC=~bd->writeCRC;
1321 + bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC;
1322 + /* If this block had a CRC error, force file level CRC error. */
1323 + if(bd->writeCRC!=bd->headerCRC) {
1324 + bd->totalCRC=bd->headerCRC+1;
1325 + return RETVAL_LAST_BLOCK;
1326 + }
1327 + }
1328 +
1329 + /* Refill the intermediate buffer by Huffman-decoding next block of input */
1330 + /* (previous is just a convenient unused temp variable here) */
1331 + previous=get_next_block(bd);
1332 + if(previous) {
1333 + bd->writeCount=previous;
1334 + return (previous!=RETVAL_LAST_BLOCK) ? previous : gotcount;
1335 + }
1336 + bd->writeCRC=0xffffffffUL;
1337 + pos=bd->writePos;
1338 + xcurrent=bd->writeCurrent;
1339 + goto decode_next_byte;
1340 +}
1341 +
1342 +static int nofill(void *buf,unsigned int len) {
1343 + return -1;
1344 +}
1345 +
1346 +/* Allocate the structure, read file header. If in_fd==-1, inbuf must contain
1347 + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
1348 + ignored, and data is read from file handle into temporary buffer. */
1349 +static int start_bunzip(bunzip_data **bdp, void *inbuf, int len,
1350 + int (*fill)(void*,unsigned int))
1351 +{
1352 + bunzip_data *bd;
1353 + unsigned int i,j,c;
1354 + const unsigned int BZh0=(((unsigned int)'B')<<24)+(((unsigned int)'Z')<<16)
1355 + +(((unsigned int)'h')<<8)+(unsigned int)'0';
1356 +
1357 + /* Figure out how much data to allocate */
1358 + i=sizeof(bunzip_data);
1359 +
1360 + /* Allocate bunzip_data. Most fields initialize to zero. */
1361 + bd=*bdp=malloc(i);
1362 + memset(bd,0,sizeof(bunzip_data));
1363 + /* Setup input buffer */
1364 + bd->inbuf=inbuf;
1365 + bd->inbufCount=len;
1366 + if(fill != NULL)
1367 + bd->fill=fill;
1368 + else
1369 + bd->fill=nofill;
1370 +
1371 + /* Init the CRC32 table (big endian) */
1372 + for(i=0;i<256;i++) {
1373 + c=i<<24;
1374 + for(j=8;j;j--)
1375 + c=c&0x80000000 ? (c<<1)^0x04c11db7 : (c<<1);
1376 + bd->crc32Table[i]=c;
1377 + }
1378 +
1379 + /* Ensure that file starts with "BZh['1'-'9']." */
1380 + i = get_bits(bd,32);
1381 + if (((unsigned int)(i-BZh0-1)) >= 9) return RETVAL_NOT_BZIP_DATA;
1382 +
1383 + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
1384 + uncompressed data. Allocate intermediate buffer for block. */
1385 + bd->dbufSize=100000*(i-BZh0);
1386 +
1387 + bd->dbuf=large_malloc(bd->dbufSize * sizeof(int));
1388 + return RETVAL_OK;
1389 +}
1390 +
1391 +/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip data,
1392 + not end of file.) */
1393 +STATIC int bunzip2(char *inbuf, int len,
1394 + int(*fill)(void*,unsigned int),
1395 + int(*writebb)(char*,unsigned int),
1396 + int *pos)
1397 +{
1398 + char *outbuf;
1399 + bunzip_data *bd;
1400 + int i;
1401 +
1402 + outbuf=malloc(BZIP2_IOBUF_SIZE);
1403 + if(!(i=start_bunzip(&bd,inbuf,len,fill))) {
1404 + for(;;) {
1405 + if((i=read_bunzip(bd,outbuf,BZIP2_IOBUF_SIZE)) <= 0) break;
1406 + if(i!=writebb(outbuf,i)) {
1407 + i=RETVAL_UNEXPECTED_OUTPUT_EOF;
1408 + break;
1409 + }
1410 + }
1411 + }
1412 + /* Check CRC and release memory */
1413 + if(i==RETVAL_LAST_BLOCK) {
1414 + if (bd->headerCRC!=bd->totalCRC) {
1415 + error("Data integrity error when decompressing.");
1416 + } else {
1417 + i=RETVAL_OK;
1418 + }
1419 + }
1420 + else if (i==RETVAL_UNEXPECTED_OUTPUT_EOF) {
1421 + error("Compressed file ends unexpectedly");
1422 + }
1423 + if(bd->dbuf) large_free(bd->dbuf);
1424 + if(pos)
1425 + *pos = bd->inbufPos;
1426 + free(bd);
1427 + free(outbuf);
1428 +
1429 + return i;
1430 +}
1431 +
1433 --- linux-2.6.22.9/lib/decompress_unlzma.c
1434 +++ linux-2.6.22.9/lib/decompress_unlzma.c
1435 @@ -0,0 +1,605 @@
1436 +/* Lzma decompressor for Linux kernel. Shamelessly snarfed
1437 + * from busybox 1.1.1
1438 + *
1439 + * Linux kernel adaptation
1440 + * Copyright (C) 2006 Alain <alain@knaff.lu>
1441 + *
1442 + * Based on small lzma deflate implementation/Small range coder
1443 + * implementation for lzma.
1444 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1445 + *
1446 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1447 + * Copyright (C) 1999-2005 Igor Pavlov
1448 + *
1449 + * Copyrights of the parts, see headers below.
1450 + *
1451 + *
1452 + * This program is free software; you can redistribute it and/or
1453 + * modify it under the terms of the GNU Lesser General Public
1454 + * License as published by the Free Software Foundation; either
1455 + * version 2.1 of the License, or (at your option) any later version.
1456 + *
1457 + * This program is distributed in the hope that it will be useful,
1458 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1459 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1460 + * Lesser General Public License for more details.
1461 + *
1462 + * You should have received a copy of the GNU Lesser General Public
1463 + * License along with this library; if not, write to the Free Software
1464 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1465 + */
1466 +
1467 +#ifndef STATIC
1468 +
1469 +#include <linux/kernel.h>
1470 +#include <linux/fs.h>
1471 +#include <linux/string.h>
1472 +
1473 +#ifdef TEST
1474 +#include "test.h"
1475 +#else
1476 +#include <linux/vmalloc.h>
1477 +#endif
1478 +
1479 +static void __init *large_malloc(size_t size)
1480 +{
1481 + return vmalloc(size);
1482 +}
1483 +
1484 +static void __init large_free(void *where)
1485 +{
1486 + vfree(where);
1487 +}
1488 +
1489 +#ifndef TEST
1490 +static void __init *malloc(size_t size)
1491 +{
1492 + return kmalloc(size, GFP_KERNEL);
1493 +}
1494 +
1495 +static void __init free(void *where)
1496 +{
1497 + kfree(where);
1498 +}
1499 +
1500 +static void __init error(char *x)
1501 +{
1502 + printk(KERN_ERR "%s\n", x);
1503 +}
1504 +
1505 +#endif
1506 +
1507 +#define STATIC /**/
1508 +
1509 +#endif
1510 +
1511 +#include <linux/decompress_unlzma.h>
1512 +
1513 +#define MIN(a,b) (((a)<(b))?(a):(b))
1514 +
1515 +static long long read_int(unsigned char *ptr, int size)
1516 +{
1517 + int i;
1518 + long long ret=0;
1519 +
1520 + for(i=0; i<size; i++) {
1521 + ret = (ret << 8) | ptr[size-i-1];
1522 + }
1523 + return ret;
1524 +}
1525 +
1526 +#define ENDIAN_CONVERT(x) x=(typeof(x))read_int((unsigned char*)&x,sizeof(x))
1527 +
1528 +
1529 +/* Small range coder implementation for lzma.
1530 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1531 + *
1532 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1533 + * Copyright (c) 1999-2005 Igor Pavlov
1534 + */
1535 +
1536 +#ifndef always_inline
1537 +# if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >0)
1538 +# define always_inline __attribute__((always_inline)) inline
1539 +# else
1540 +# define always_inline inline
1541 +# endif
1542 +#endif
1543 +
1544 +#ifdef CONFIG_FEATURE_LZMA_FAST
1545 +# define speed_inline always_inline
1546 +#else
1547 +# define speed_inline
1548 +#endif
1549 +
1550 +
1551 +typedef struct {
1552 + int (*fill)(void*,unsigned int);
1553 + uint8_t *ptr;
1554 + uint8_t *buffer;
1555 + uint8_t *buffer_end;
1556 + int buffer_size;
1557 + uint32_t code;
1558 + uint32_t range;
1559 + uint32_t bound;
1560 +} rc_t;
1561 +
1562 +
1563 +#define RC_TOP_BITS 24
1564 +#define RC_MOVE_BITS 5
1565 +#define RC_MODEL_TOTAL_BITS 11
1566 +
1567 +
1568 +/* Called twice: once at startup and once in rc_normalize() */
1569 +static void rc_read(rc_t * rc)
1570 +{
1571 + rc->buffer_size = rc->fill((char*)rc->buffer, LZMA_IOBUF_SIZE);
1572 + if (rc->buffer_size <= 0)
1573 + error("unexpected EOF");
1574 + rc->ptr = rc->buffer;
1575 + rc->buffer_end = rc->buffer + rc->buffer_size;
1576 +}
1577 +
1578 +/* Called once */
1579 +static always_inline void rc_init(rc_t * rc, int (*fill)(void*,unsigned int),
1580 + char *buffer, int buffer_size)
1581 +{
1582 + rc->fill = fill;
1583 + rc->buffer = (uint8_t *)buffer;
1584 + rc->buffer_size = buffer_size;
1585 + rc->buffer_end = rc->buffer + rc->buffer_size;
1586 + rc->ptr = rc->buffer;
1587 +
1588 + rc->code = 0;
1589 + rc->range = 0xFFFFFFFF;
1590 +}
1591 +
1592 +static always_inline void rc_init_code(rc_t * rc)
1593 +{
1594 + int i;
1595 +
1596 + for (i = 0; i < 5; i++) {
1597 + if (rc->ptr >= rc->buffer_end)
1598 + rc_read(rc);
1599 + rc->code = (rc->code << 8) | *rc->ptr++;
1600 + }
1601 +}
1602 +
1603 +
1604 +/* Called once. TODO: bb_maybe_free() */
1605 +static always_inline void rc_free(rc_t * rc)
1606 +{
1607 + free(rc->buffer);
1608 +}
1609 +
1610 +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
1611 +static void rc_do_normalize(rc_t * rc)
1612 +{
1613 + if (rc->ptr >= rc->buffer_end)
1614 + rc_read(rc);
1615 + rc->range <<= 8;
1616 + rc->code = (rc->code << 8) | *rc->ptr++;
1617 +}
1618 +static always_inline void rc_normalize(rc_t * rc)
1619 +{
1620 + if (rc->range < (1 << RC_TOP_BITS)) {
1621 + rc_do_normalize(rc);
1622 + }
1623 +}
1624 +
1625 +/* Called 9 times */
1626 +/* Why rc_is_bit_0_helper exists?
1627 + * Because we want to always expose (rc->code < rc->bound) to optimizer
1628 + */
1629 +static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p)
1630 +{
1631 + rc_normalize(rc);
1632 + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
1633 + return rc->bound;
1634 +}
1635 +static always_inline int rc_is_bit_0(rc_t * rc, uint16_t * p)
1636 +{
1637 + uint32_t t = rc_is_bit_0_helper(rc, p);
1638 + return rc->code < t;
1639 +}
1640 +
1641 +/* Called ~10 times, but very small, thus inlined */
1642 +static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p)
1643 +{
1644 + rc->range = rc->bound;
1645 + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
1646 +}
1647 +static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p)
1648 +{
1649 + rc->range -= rc->bound;
1650 + rc->code -= rc->bound;
1651 + *p -= *p >> RC_MOVE_BITS;
1652 +}
1653 +
1654 +/* Called 4 times in unlzma loop */
1655 +static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
1656 +{
1657 + if (rc_is_bit_0(rc, p)) {
1658 + rc_update_bit_0(rc, p);
1659 + *symbol *= 2;
1660 + return 0;
1661 + } else {
1662 + rc_update_bit_1(rc, p);
1663 + *symbol = *symbol * 2 + 1;
1664 + return 1;
1665 + }
1666 +}
1667 +
1668 +/* Called once */
1669 +static always_inline int rc_direct_bit(rc_t * rc)
1670 +{
1671 + rc_normalize(rc);
1672 + rc->range >>= 1;
1673 + if (rc->code >= rc->range) {
1674 + rc->code -= rc->range;
1675 + return 1;
1676 + }
1677 + return 0;
1678 +}
1679 +
1680 +/* Called twice */
1681 +static speed_inline void
1682 +rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
1683 +{
1684 + int i = num_levels;
1685 +
1686 + *symbol = 1;
1687 + while (i--)
1688 + rc_get_bit(rc, p + *symbol, symbol);
1689 + *symbol -= 1 << num_levels;
1690 +}
1691 +
1692 +
1693 +/*
1694 + * Small lzma deflate implementation.
1695 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1696 + *
1697 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1698 + * Copyright (C) 1999-2005 Igor Pavlov
1699 + */
1700 +
1701 +
1702 +typedef struct {
1703 + uint8_t pos;
1704 + uint32_t dict_size;
1705 + uint64_t dst_size;
1706 +} __attribute__ ((packed)) lzma_header_t;
1707 +
1708 +
1709 +#define LZMA_BASE_SIZE 1846
1710 +#define LZMA_LIT_SIZE 768
1711 +
1712 +#define LZMA_NUM_POS_BITS_MAX 4
1713 +
1714 +#define LZMA_LEN_NUM_LOW_BITS 3
1715 +#define LZMA_LEN_NUM_MID_BITS 3
1716 +#define LZMA_LEN_NUM_HIGH_BITS 8
1717 +
1718 +#define LZMA_LEN_CHOICE 0
1719 +#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
1720 +#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
1721 +#define LZMA_LEN_MID (LZMA_LEN_LOW \
1722 + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
1723 +#define LZMA_LEN_HIGH (LZMA_LEN_MID \
1724 + +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
1725 +#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
1726 +
1727 +#define LZMA_NUM_STATES 12
1728 +#define LZMA_NUM_LIT_STATES 7
1729 +
1730 +#define LZMA_START_POS_MODEL_INDEX 4
1731 +#define LZMA_END_POS_MODEL_INDEX 14
1732 +#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
1733 +
1734 +#define LZMA_NUM_POS_SLOT_BITS 6
1735 +#define LZMA_NUM_LEN_TO_POS_STATES 4
1736 +
1737 +#define LZMA_NUM_ALIGN_BITS 4
1738 +
1739 +#define LZMA_MATCH_MIN_LEN 2
1740 +
1741 +#define LZMA_IS_MATCH 0
1742 +#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES <<LZMA_NUM_POS_BITS_MAX))
1743 +#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
1744 +#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
1745 +#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
1746 +#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
1747 +#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
1748 + + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
1749 +#define LZMA_SPEC_POS (LZMA_POS_SLOT \
1750 + +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
1751 +#define LZMA_ALIGN (LZMA_SPEC_POS \
1752 + + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
1753 +#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
1754 +#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
1755 +#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
1756 +
1757 +
1758 +STATIC int unlzma(char *inbuf, int in_len,
1759 + int(*fill)(void*,unsigned int),
1760 + int(*writebb)(char*,unsigned int),
1761 + int *posp)
1762 +{
1763 + lzma_header_t header;
1764 + int lc, pb, lp;
1765 + uint32_t pos_state_mask;
1766 + uint32_t literal_pos_mask;
1767 + uint32_t pos;
1768 + uint16_t *p;
1769 + uint16_t *prob;
1770 + uint16_t *prob_lit;
1771 + int num_bits;
1772 + int num_probs;
1773 + rc_t rc;
1774 + int i, mi;
1775 + uint8_t *buffer;
1776 + uint8_t previous_byte = 0;
1777 + size_t buffer_pos = 0, global_pos = 0;
1778 + int len = 0;
1779 + int state = 0;
1780 + int bufsize;
1781 + uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
1782 +
1783 + rc_init(&rc, fill, inbuf, in_len);
1784 +
1785 + for (i = 0; i < sizeof(header); i++) {
1786 + if (rc.ptr >= rc.buffer_end)
1787 + rc_read(&rc);
1788 + ((unsigned char *)&header)[i] = *rc.ptr++;
1789 + }
1790 +
1791 + if (header.pos >= (9 * 5 * 5))
1792 + error("bad header");
1793 +
1794 + mi = header.pos / 9;
1795 + lc = header.pos % 9;
1796 + pb = mi / 5;
1797 + lp = mi % 5;
1798 + pos_state_mask = (1 << pb) - 1;
1799 + literal_pos_mask = (1 << lp) - 1;
1800 +
1801 + ENDIAN_CONVERT(header.dict_size);
1802 + ENDIAN_CONVERT(header.dst_size);
1803 +
1804 + if (header.dict_size == 0)
1805 + header.dict_size = 1;
1806 +
1807 + bufsize = MIN(header.dst_size, header.dict_size);
1808 + buffer = large_malloc(bufsize);
1809 + if(buffer == NULL)
1810 + return -1;
1811 +
1812 + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
1813 + p = large_malloc(num_probs * sizeof(*p));
1814 + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
1815 + for (i = 0; i < num_probs; i++)
1816 + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
1817 +
1818 + rc_init_code(&rc);
1819 +
1820 + while (global_pos + buffer_pos < header.dst_size) {
1821 + int pos_state = (buffer_pos + global_pos) & pos_state_mask;
1822 +
1823 + prob =
1824 + p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
1825 + if (rc_is_bit_0(&rc, prob)) {
1826 + mi = 1;
1827 + rc_update_bit_0(&rc, prob);
1828 + prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE
1829 + * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
1830 + + (previous_byte >> (8 - lc)))));
1831 +
1832 + if (state >= LZMA_NUM_LIT_STATES) {
1833 + int match_byte;
1834 +
1835 + pos = buffer_pos - rep0;
1836 + while (pos >= header.dict_size)
1837 + pos += header.dict_size;
1838 + if(pos >= bufsize) {
1839 + goto fail;
1840 + }
1841 + match_byte = buffer[pos];
1842 + do {
1843 + int bit;
1844 +
1845 + match_byte <<= 1;
1846 + bit = match_byte & 0x100;
1847 + prob_lit = prob + 0x100 + bit + mi;
1848 + if (rc_get_bit(&rc, prob_lit, &mi)) {
1849 + if (!bit)
1850 + break;
1851 + } else {
1852 + if (bit)
1853 + break;
1854 + }
1855 + } while (mi < 0x100);
1856 + }
1857 + while (mi < 0x100) {
1858 + prob_lit = prob + mi;
1859 + rc_get_bit(&rc, prob_lit, &mi);
1860 + }
1861 + previous_byte = (uint8_t) mi;
1862 +
1863 + buffer[buffer_pos++] = previous_byte;
1864 + if (buffer_pos == header.dict_size) {
1865 + buffer_pos = 0;
1866 + global_pos += header.dict_size;
1867 + writebb((char*)buffer, header.dict_size);
1868 + }
1869 + if (state < 4)
1870 + state = 0;
1871 + else if (state < 10)
1872 + state -= 3;
1873 + else
1874 + state -= 6;
1875 + } else {
1876 + int offset;
1877 + uint16_t *prob_len;
1878 +
1879 + rc_update_bit_1(&rc, prob);
1880 + prob = p + LZMA_IS_REP + state;
1881 + if (rc_is_bit_0(&rc, prob)) {
1882 + rc_update_bit_0(&rc, prob);
1883 + rep3 = rep2;
1884 + rep2 = rep1;
1885 + rep1 = rep0;
1886 + state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
1887 + prob = p + LZMA_LEN_CODER;
1888 + } else {
1889 + rc_update_bit_1(&rc, prob);
1890 + prob = p + LZMA_IS_REP_G0 + state;
1891 + if (rc_is_bit_0(&rc, prob)) {
1892 + rc_update_bit_0(&rc, prob);
1893 + prob = (p + LZMA_IS_REP_0_LONG
1894 + + (state << LZMA_NUM_POS_BITS_MAX) + pos_state);
1895 + if (rc_is_bit_0(&rc, prob)) {
1896 + rc_update_bit_0(&rc, prob);
1897 +
1898 + state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
1899 + pos = buffer_pos - rep0;
1900 + while (pos >= header.dict_size)
1901 + pos += header.dict_size;
1902 + if(pos >= bufsize) {
1903 + goto fail;
1904 + }
1905 + previous_byte = buffer[pos];
1906 + buffer[buffer_pos++] = previous_byte;
1907 + if (buffer_pos == header.dict_size) {
1908 + buffer_pos = 0;
1909 + global_pos += header.dict_size;
1910 + writebb((char*)buffer, header.dict_size);
1911 + }
1912 + continue;
1913 + } else {
1914 + rc_update_bit_1(&rc, prob);
1915 + }
1916 + } else {
1917 + uint32_t distance;
1918 +
1919 + rc_update_bit_1(&rc, prob);
1920 + prob = p + LZMA_IS_REP_G1 + state;
1921 + if (rc_is_bit_0(&rc, prob)) {
1922 + rc_update_bit_0(&rc, prob);
1923 + distance = rep1;
1924 + } else {
1925 + rc_update_bit_1(&rc, prob);
1926 + prob = p + LZMA_IS_REP_G2 + state;
1927 + if (rc_is_bit_0(&rc, prob)) {
1928 + rc_update_bit_0(&rc, prob);
1929 + distance = rep2;
1930 + } else {
1931 + rc_update_bit_1(&rc, prob);
1932 + distance = rep3;
1933 + rep3 = rep2;
1934 + }
1935 + rep2 = rep1;
1936 + }
1937 + rep1 = rep0;
1938 + rep0 = distance;
1939 + }
1940 + state = state < LZMA_NUM_LIT_STATES ? 8 : 11;
1941 + prob = p + LZMA_REP_LEN_CODER;
1942 + }
1943 +
1944 + prob_len = prob + LZMA_LEN_CHOICE;
1945 + if (rc_is_bit_0(&rc, prob_len)) {
1946 + rc_update_bit_0(&rc, prob_len);
1947 + prob_len = (prob + LZMA_LEN_LOW
1948 + + (pos_state << LZMA_LEN_NUM_LOW_BITS));
1949 + offset = 0;
1950 + num_bits = LZMA_LEN_NUM_LOW_BITS;
1951 + } else {
1952 + rc_update_bit_1(&rc, prob_len);
1953 + prob_len = prob + LZMA_LEN_CHOICE_2;
1954 + if (rc_is_bit_0(&rc, prob_len)) {
1955 + rc_update_bit_0(&rc, prob_len);
1956 + prob_len = (prob + LZMA_LEN_MID
1957 + + (pos_state << LZMA_LEN_NUM_MID_BITS));
1958 + offset = 1 << LZMA_LEN_NUM_LOW_BITS;
1959 + num_bits = LZMA_LEN_NUM_MID_BITS;
1960 + } else {
1961 + rc_update_bit_1(&rc, prob_len);
1962 + prob_len = prob + LZMA_LEN_HIGH;
1963 + offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
1964 + + (1 << LZMA_LEN_NUM_MID_BITS));
1965 + num_bits = LZMA_LEN_NUM_HIGH_BITS;
1966 + }
1967 + }
1968 + rc_bit_tree_decode(&rc, prob_len, num_bits, &len);
1969 + len += offset;
1970 +
1971 + if (state < 4) {
1972 + int pos_slot;
1973 +
1974 + state += LZMA_NUM_LIT_STATES;
1975 + prob =
1976 + p + LZMA_POS_SLOT +
1977 + ((len <
1978 + LZMA_NUM_LEN_TO_POS_STATES ? len :
1979 + LZMA_NUM_LEN_TO_POS_STATES - 1)
1980 + << LZMA_NUM_POS_SLOT_BITS);
1981 + rc_bit_tree_decode(&rc, prob, LZMA_NUM_POS_SLOT_BITS,
1982 + &pos_slot);
1983 + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
1984 + num_bits = (pos_slot >> 1) - 1;
1985 + rep0 = 2 | (pos_slot & 1);
1986 + if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
1987 + rep0 <<= num_bits;
1988 + prob = p + LZMA_SPEC_POS + rep0 - pos_slot - 1;
1989 + } else {
1990 + num_bits -= LZMA_NUM_ALIGN_BITS;
1991 + while (num_bits--)
1992 + rep0 = (rep0 << 1) | rc_direct_bit(&rc);
1993 + prob = p + LZMA_ALIGN;
1994 + rep0 <<= LZMA_NUM_ALIGN_BITS;
1995 + num_bits = LZMA_NUM_ALIGN_BITS;
1996 + }
1997 + i = 1;
1998 + mi = 1;
1999 + while (num_bits--) {
2000 + if (rc_get_bit(&rc, prob + mi, &mi))
2001 + rep0 |= i;
2002 + i <<= 1;
2003 + }
2004 + } else
2005 + rep0 = pos_slot;
2006 + if (++rep0 == 0)
2007 + break;
2008 + }
2009 +
2010 + len += LZMA_MATCH_MIN_LEN;
2011 +
2012 + do {
2013 + pos = buffer_pos - rep0;
2014 + while (pos >= header.dict_size)
2015 + pos += header.dict_size;
2016 + if(pos >= bufsize) {
2017 + goto fail;
2018 + }
2019 + previous_byte = buffer[pos];
2020 + buffer[buffer_pos++] = previous_byte;
2021 + if (buffer_pos == header.dict_size) {
2022 + buffer_pos = 0;
2023 + global_pos += header.dict_size;
2024 + writebb((char*)buffer, header.dict_size);
2025 + }
2026 + len--;
2027 + } while (len != 0 && buffer_pos < header.dst_size);
2028 + }
2029 + }
2030 +
2031 + writebb((char*)buffer, buffer_pos);
2032 + if(posp) {
2033 + *posp = rc.ptr-rc.buffer;
2034 + }
2035 + large_free(buffer);
2036 + return 0;
2037 + fail:
2038 + large_free(buffer);
2039 + return -1;
2040 +}
2042 --- linux-2.6.22.9/lib/Makefile
2043 +++ linux-2.6.22.9/lib/Makefile
2044 @@ -46,6 +46,10 @@
2045 obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
2046 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
2048 +obj-$(CONFIG_RD_BZIP2) += decompress_bunzip2.o
2049 +obj-$(CONFIG_RD_LZMA) += decompress_unlzma.o
2050 +
2051 +
2052 obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
2053 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
2054 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
2056 --- linux-2.6.22.9/scripts/Makefile.lib
2057 +++ linux-2.6.22.9/scripts/Makefile.lib
2058 @@ -162,4 +162,17 @@
2059 quiet_cmd_gzip = GZIP $@
2060 cmd_gzip = gzip -f -9 < $< > $@
2062 +# Append size
2063 +size_append=perl -e 'print(pack("i",(stat($$ARGV[0]))[7]));'
2065 +# Bzip2
2066 +# ---------------------------------------------------------------------------
2067 +
2068 +quiet_cmd_bzip2 = BZIP2 $@
2069 +cmd_bzip2 = (bzip2 -9 < $< ; $(size_append) $<) > $@
2070 +
2071 +# Lzma
2072 +# ---------------------------------------------------------------------------
2073 +
2074 +quiet_cmd_lzma = LZMA $@
2075 +cmd_lzma = (lzma e $< -so ; $(size_append) $<) >$@