wok-6.x view linux/stuff/linux-lzma-2.6.29.3.u @ rev 3075
clamav: We dont need libiconv (dont disable but use iconv() from glibc
author | Christophe Lincoln <pankso@slitaz.org> |
---|---|
date | Fri May 15 03:34:46 2009 +0200 (2009-05-15) |
parents | |
children |
line source
1 --- linux-2.6.29.3/arch/x86/boot/compressed/Makefile
2 +++ linux-2.6.29.3/arch/x86/boot/compressed/Makefile
3 @@ -4,7 +4,7 @@
4 # create a compressed vmlinux image from the original vmlinux
5 #
7 -targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
8 +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
10 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
11 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
12 @@ -51,14 +51,41 @@
13 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
14 $(call if_changed,gzip)
15 endif
16 +
17 +ifdef CONFIG_RELOCATABLE
18 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
19 + $(call if_changed,bzip2)
20 +else
21 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
22 + $(call if_changed,bzip2)
23 +endif
24 +
25 +ifdef CONFIG_RELOCATABLE
26 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
27 + $(call if_changed,lzma)
28 +else
29 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
30 + $(call if_changed,lzma)
31 +endif
32 +
33 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
35 else
36 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
37 $(call if_changed,gzip)
39 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
40 + $(call if_changed,bzip2)
41 +
42 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
43 + $(call if_changed,lzma)
44 +
45 LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
46 endif
48 -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
49 +suffix_$(CONFIG_KERNEL_GZIP) = gz
50 +suffix_$(CONFIG_KERNEL_BZIP2) = bz2
51 +suffix_$(CONFIG_KERNEL_LZMA) = lzma
52 +
53 +$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
54 $(call if_changed,ld)
56 --- linux-2.6.29.3/arch/x86/boot/compressed/misc.c
57 +++ linux-2.6.29.3/arch/x86/boot/compressed/misc.c
58 @@ -136,12 +136,15 @@
59 */
60 #define WSIZE 0x80000000
62 +#ifdef CONFIG_KERNEL_GZIP
63 /* Input buffer: */
64 static unsigned char *inbuf;
65 +#endif
67 /* Sliding window buffer (and final output buffer): */
68 static unsigned char *window;
70 +#ifdef CONFIG_KERNEL_GZIP
71 /* Valid bytes in inbuf: */
72 static unsigned insize;
74 @@ -181,6 +184,7 @@
76 static int fill_inbuf(void);
77 static void flush_window(void);
78 +#endif
79 static void error(char *m);
81 /*
82 @@ -192,9 +196,9 @@
83 extern unsigned char input_data[];
84 extern int input_len;
86 -static long bytes_out;
87 -
88 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
89 static void *memset(void *s, int c, unsigned n);
90 +#endif
91 static void *memcpy(void *dest, const void *src, unsigned n);
93 static void __putstr(int, const char *);
94 @@ -209,12 +213,68 @@
95 static memptr free_mem_ptr;
96 static memptr free_mem_end_ptr;
98 +static void *malloc(int size)
99 +{
100 + void *p;
101 +
102 + if (size <0) error("Malloc error");
103 + if (free_mem_ptr <= 0) error("Memory error");
104 +
105 + free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
106 +
107 + p = (void *)free_mem_ptr;
108 + free_mem_ptr += size;
109 +
110 + if (free_mem_ptr >= free_mem_end_ptr)
111 + error("Out of memory");
112 +
113 + return p;
114 +}
115 +
116 +static void free(void *where)
117 +{ /* Don't care */
118 +}
119 +
120 static char *vidmem;
121 static int vidport;
122 static int lines, cols;
124 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
125 +
126 +#define large_malloc malloc
127 +#define large_free free
128 +
129 +#ifdef current
130 +#undef current
131 +#endif
132 +
133 +#define INCLUDED
134 +#endif
135 +
136 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
137 +
138 +#define large_malloc malloc
139 +#define large_free free
140 +
141 +#ifdef current
142 +#undef current
143 +#endif
144 +
145 +#define INCLUDED
146 +#endif
147 +
148 +#ifdef CONFIG_KERNEL_GZIP
149 #include "../../../../lib/inflate.c"
150 +#endif
152 +#ifdef CONFIG_KERNEL_BZIP2
153 +#include "../../../../lib/decompress_bunzip2.c"
154 +#endif
155 +
156 +#ifdef CONFIG_KERNEL_LZMA
157 +#include "../../../../lib/decompress_unlzma.c"
158 +#endif
159 +
160 static void scroll(void)
161 {
162 int i;
163 @@ -272,6 +332,7 @@
164 outb(0xff & (pos >> 1), vidport+1);
165 }
167 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
168 static void *memset(void *s, int c, unsigned n)
169 {
170 int i;
171 @@ -281,6 +342,7 @@
172 ss[i] = c;
173 return s;
174 }
175 +#endif
177 static void *memcpy(void *dest, const void *src, unsigned n)
178 {
179 @@ -293,7 +355,27 @@
180 return dest;
181 }
183 +#ifdef CONFIG_KERNEL_BZIP2
184 /* ===========================================================================
185 + * Write the output window window[0..outcnt-1].
186 + * (Used for the decompressed data only.)
187 + */
188 +static int compr_flush(char *data, unsigned int len)
189 +{
190 + unsigned n;
191 + uch *out;
192 +
193 + out = window;
194 + for (n = 0; n < len; n++) {
195 + *out++ = *data++;
196 + }
197 + window += (ulg)len;
198 + return len;
199 +}
200 +
201 +#endif
202 +#ifdef CONFIG_KERNEL_GZIP
203 +/* ===========================================================================
204 * Fill the input buffer. This is called only when the buffer is empty
205 * and at least one byte is really needed.
206 */
207 @@ -304,7 +386,7 @@
208 }
210 /* ===========================================================================
211 - * Write the output window window[0..outcnt-1] and update crc and bytes_out.
212 + * Write the output window window[0..outcnt-1] and update crc.
213 * (Used for the decompressed data only.)
214 */
215 static void flush_window(void)
216 @@ -322,9 +404,9 @@
217 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
218 }
219 crc = c;
220 - bytes_out += (unsigned long)outcnt;
221 outcnt = 0;
222 }
223 +#endif
225 static void error(char *x)
226 {
227 @@ -410,9 +492,11 @@
228 window = output; /* Output buffer (Normally at 1M) */
229 free_mem_ptr = heap; /* Heap */
230 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
231 +#ifdef CONFIG_KERNEL_GZIP
232 inbuf = input_data; /* Input buffer */
233 insize = input_len;
234 inptr = 0;
235 +#endif
237 #ifdef CONFIG_X86_64
238 if ((unsigned long)output & (__KERNEL_ALIGN - 1))
239 @@ -430,10 +514,22 @@
240 #endif
241 #endif
243 +#ifdef CONFIG_KERNEL_BZIP2
244 + putstr("\nBunzipping Linux... ");
245 + bunzip2(input_data, input_len-4, NULL, compr_flush, NULL);
246 +#endif
247 +
248 +#ifdef CONFIG_KERNEL_LZMA
249 + putstr("\nUnlzmaing Linux... ");
250 + unlzma(input_data, input_len-4, NULL, NULL, window);
251 +#endif
252 +
253 +#ifdef CONFIG_KERNEL_GZIP
254 makecrc();
255 if (!quiet)
256 putstr("\nDecompressing Linux... ");
257 gunzip();
258 +#endif
259 parse_elf(output);
260 if (!quiet)
261 putstr("done.\nBooting the kernel.\n");
263 --- linux-2.6.29.3/arch/x86/mm/init_32.c
264 +++ linux-2.6.29.3/arch/x86/mm/init_32.c
265 @@ -1221,7 +1221,8 @@
266 free_page(addr);
267 totalram_pages++;
268 }
269 - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
270 + if (what)
271 + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
272 #endif
273 }
275 @@ -1235,7 +1236,7 @@
276 #ifdef CONFIG_BLK_DEV_INITRD
277 void free_initrd_mem(unsigned long start, unsigned long end)
278 {
279 - free_init_pages("initrd memory", start, end);
280 + free_init_pages(NULL, start, end);
281 }
282 #endif
285 --- linux-2.6.29.3/drivers/block/Kconfig
286 +++ linux-2.6.29.3/drivers/block/Kconfig
287 @@ -358,6 +358,30 @@
288 will prevent RAM block device backing store memory from being
289 allocated from highmem (only a problem for highmem systems).
291 +config RD_BZIP2
292 + bool "Initial ramdisk compressed using bzip2"
293 + default n
294 + depends on BLK_DEV_INITRD=y
295 + help
296 + Support loading of a bzip2 encoded initial ramdisk or cpio buffer
297 + If unsure, say N.
298 +
299 +config RD_LZMA
300 + bool "Initial ramdisk compressed using lzma"
301 + default n
302 + depends on BLK_DEV_INITRD=y
303 + help
304 + Support loading of a lzma encoded initial ramdisk or cpio buffer
305 + If unsure, say N.
306 +
307 +config RD_GZIP
308 + bool "Initial ramdisk compressed using gzip"
309 + default y
310 + depends on BLK_DEV_INITRD=y
311 + help
312 + Support loading of a gzip encoded initial ramdisk or cpio buffer.
313 + If unsure, say Y.
314 +
315 config CDROM_PKTCDVD
316 tristate "Packet writing on CD/DVD media"
317 depends on !UML
319 --- linux-2.6.29.3/include/linux/decompress_bunzip2.h
320 +++ linux-2.6.29.3/include/linux/decompress_bunzip2.h
321 @@ -0,0 +1,16 @@
322 +#ifndef DECOMPRESS_BUNZIP2_H
323 +#define DECOMPRESS_BUNZIP2_H
324 +
325 +/* Other housekeeping constants */
326 +#define BZIP2_IOBUF_SIZE 4096
327 +
328 +#ifndef STATIC
329 +#define STATIC /**/
330 +#endif
331 +
332 +STATIC int bunzip2(char *inbuf, int len,
333 + int(*fill)(void*,unsigned int),
334 + int(*writebb)(char*,unsigned int),
335 + int *pos);
336 +
337 +#endif
339 --- linux-2.6.29.3/include/linux/decompress_generic.h
340 +++ linux-2.6.29.3/include/linux/decompress_generic.h
341 @@ -0,0 +1,28 @@
342 +#ifndef DECOMPRESS_GENERIC_H
343 +#define DECOMPRESS_GENERIC_H
344 +
345 +/* Minimal chunksize to be read.
346 + * Bzip2 prefers at least 4096
347 + * Lzma prefers 0x10000 */
348 +#define COMPR_IOBUF_SIZE 4096
349 +
350 +typedef int (*uncompress_fn) (char *inbuf, int len,
351 + int(*fill)(char*,unsigned int),
352 + int(*writebb)(char*,unsigned int),
353 + int *posp);
354 +
355 +/* inbuf - input buffer
356 + * len - len of pre-read data in inbuf
357 + * fill - function to fill inbuf if empty
358 + * writebb - function to write out outbug
359 + * posp - if non-null, input position (number of bytes read) will be
360 + * returned here
361 + *
362 + * If len != 0, the inbuf is initialized (with as much data), and fill
363 + * should not be called
364 + * If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
365 + * fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
366 + */
367 +
368 +
369 +#endif
371 --- linux-2.6.29.3/include/linux/decompress_unlzma.h
372 +++ linux-2.6.29.3/include/linux/decompress_unlzma.h
373 @@ -0,0 +1,15 @@
374 +#ifndef DECOMPRESS_UNLZMA_H
375 +#define DECOMPRESS_UNLZMA_H
376 +
377 +#define LZMA_IOBUF_SIZE 0x10000
378 +
379 +#ifndef STATIC
380 +#define STATIC /**/
381 +#endif
382 +
383 +STATIC int unlzma(char *inbuf, int len,
384 + int(*fill)(void*,unsigned int),
385 + int(*writebb)(char*,unsigned int),
386 + int *pos);
387 +
388 +#endif
390 --- linux-2.6.29.3/init/Kconfig
391 +++ linux-2.6.29.3/init/Kconfig
392 @@ -101,6 +101,56 @@
394 which is done within the script "scripts/setlocalversion".)
396 +choice
397 + prompt "Kernel compression mode"
398 + default KERNEL_GZIP
399 + help
400 + The linux kernel is a kind of self-extracting executable.
401 + Several compression algorithms are available, which differ
402 + in efficiency, compression and decompression speed.
403 + Compression speed is only relevant when building a kernel.
404 + Decompression speed is relevant at each boot.
405 +
406 + If you have any problems with bzip2 or lzma compressed
407 + kernels, mail me (Alain Knaff) <alain@knaff.lu>. (An older
408 + version of this functionality (bzip2 only), for 2.4, was
409 + supplied by Christian Ludwig)
410 +
411 + High compression options are mostly useful for users, who
412 + are low on disk space (embedded systems), but for whom ram
413 + size matters less.
414 +
415 + If in doubt, select 'gzip'
416 +
417 +config KERNEL_GZIP
418 + bool "Gzip"
419 + help
420 + The old and tries gzip compression. Its compression ratio is
421 + the poorest among the 3 choices; however its speed (both
422 + compression and decompression) is the fastest.
423 +
424 +config KERNEL_BZIP2
425 + bool "Bzip2"
426 + help
427 + Its compression ratio and speed is intermediate.
428 + Decompression speed is slowest among the 3.
429 + The kernel size is about 10 per cent smaller with bzip2,
430 + in comparison to gzip.
431 + Bzip2 uses a large amount of memory. For modern kernels
432 + you will need at least 8MB RAM or more for booting.
433 +
434 +config KERNEL_LZMA
435 + bool "LZMA"
436 + help
437 + The most recent compression algorithm.
438 + Its ratio is best, decompression speed is between the other
439 + 2. Compression is slowest.
440 + The kernel size is about 33 per cent smaller with lzma,
441 + in comparison to gzip.
442 +
443 +endchoice
444 +
445 +
446 config SWAP
447 bool "Support for paging of anonymous memory (swap)"
448 depends on MMU && BLOCK
450 --- linux-2.6.29.3/init/do_mounts_rd.c
451 +++ linux-2.6.29.3/init/do_mounts_rd.c
452 @@ -8,6 +8,16 @@
453 #include <linux/initrd.h>
454 #include <linux/string.h>
456 +#ifdef CONFIG_RD_BZIP2
457 +#include <linux/decompress_bunzip2.h>
458 +#undef STATIC
459 +#endif
460 +
461 +#ifdef CONFIG_RD_LZMA
462 +#include <linux/decompress_unlzma.h>
463 +#undef STATIC
464 +#endif
465 +
466 #include "do_mounts.h"
467 #include "../fs/squashfs/squashfs_fs.h"
469 @@ -29,7 +39,15 @@
470 }
471 __setup("ramdisk_start=", ramdisk_start_setup);
473 +#ifdef CONFIG_RD_GZIP
474 static int __init crd_load(int in_fd, int out_fd);
475 +#endif
476 +#ifdef CONFIG_RD_BZIP2
477 +static int __init crd_load_bzip2(int in_fd, int out_fd);
478 +#endif
479 +#ifdef CONFIG_RD_LZMA
480 +static int __init crd_load_lzma(int in_fd, int out_fd);
481 +#endif
483 /*
484 * This routine tries to find a RAM disk image to load, and returns the
485 @@ -46,7 +64,7 @@
486 * gzip
487 */
488 static int __init
489 -identify_ramdisk_image(int fd, int start_block)
490 +identify_ramdisk_image(int fd, int start_block, int *ztype)
491 {
492 const int size = 512;
493 struct minix_super_block *minixsb;
494 @@ -74,6 +92,7 @@
495 sys_lseek(fd, start_block * BLOCK_SIZE, 0);
496 sys_read(fd, buf, size);
498 +#ifdef CONFIG_RD_GZIP
499 /*
500 * If it matches the gzip magic numbers, return 0
501 */
502 @@ -81,10 +100,41 @@
503 printk(KERN_NOTICE
504 "RAMDISK: Compressed image found at block %d\n",
505 start_block);
506 + *ztype = 0;
507 nblocks = 0;
508 goto done;
509 }
510 +#endif
512 +#ifdef CONFIG_RD_BZIP2
513 + /*
514 + * If it matches the bzip magic numbers, return -1
515 + */
516 + if (buf[0] == 0x42 && (buf[1] == 0x5a)) {
517 + printk(KERN_NOTICE
518 + "RAMDISK: Bzipped image found at block %d\n",
519 + start_block);
520 + *ztype = 1;
521 + nblocks = 0;
522 + goto done;
523 + }
524 +#endif
525 +
526 +#ifdef CONFIG_RD_LZMA
527 + /*
528 + * If it matches the bzip magic numbers, return -1
529 + */
530 + if (buf[0] == 0x5d && (buf[1] == 0x00)) {
531 + printk(KERN_NOTICE
532 + "RAMDISK: Lzma image found at block %d\n",
533 + start_block);
534 + *ztype = 2;
535 + nblocks = 0;
536 + goto done;
537 + }
538 +#endif
539 +
540 +
541 /* romfs is at block zero too */
542 if (romfsb->word0 == ROMSB_WORD0 &&
543 romfsb->word1 == ROMSB_WORD1) {
544 @@ -157,6 +207,7 @@
545 int nblocks, i, disk;
546 char *buf = NULL;
547 unsigned short rotate = 0;
548 + int ztype=-1;
549 #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
550 char rotator[4] = { '|' , '/' , '-' , '\\' };
551 #endif
552 @@ -169,13 +220,37 @@
553 if (in_fd < 0)
554 goto noclose_input;
556 - nblocks = identify_ramdisk_image(in_fd, rd_image_start);
557 + nblocks = identify_ramdisk_image(in_fd, rd_image_start, &ztype);
558 if (nblocks < 0)
559 goto done;
561 if (nblocks == 0) {
562 - if (crd_load(in_fd, out_fd) == 0)
563 - goto successful_load;
564 + switch(ztype) {
565 +
566 +#ifdef CONFIG_RD_GZIP
567 + case 0:
568 + if (crd_load(in_fd, out_fd) == 0)
569 + goto successful_load;
570 + break;
571 +#endif
572 +
573 +#ifdef CONFIG_RD_BZIP2
574 + case 1:
575 + if (crd_load_bzip2(in_fd, out_fd) == 0)
576 + goto successful_load;
577 + break;
578 +#endif
579 +
580 +#ifdef CONFIG_RD_LZMA
581 + case 2:
582 + if (crd_load_lzma(in_fd, out_fd) == 0)
583 + goto successful_load;
584 + break;
585 +#endif
586 +
587 + default:
588 + break;
589 + }
590 goto done;
591 }
593 @@ -273,6 +348,7 @@
594 return rd_load_image("/dev/root");
595 }
597 +#ifdef CONFIG_RD_GZIP
598 /*
599 * gzip declarations
600 */
601 @@ -300,8 +376,11 @@
602 static int exit_code;
603 static int unzip_error;
604 static long bytes_out;
605 +#endif
606 +
607 static int crd_infd, crd_outfd;
609 +#ifdef CONFIG_RD_GZIP
610 #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
612 /* Diagnostic functions (stubbed out) */
613 @@ -342,7 +421,22 @@
615 return inbuf[0];
616 }
617 +#endif
619 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
620 +static int __init compr_fill(void *buf, unsigned int len)
621 +{
622 + int r = sys_read(crd_infd, buf, len);
623 + if(r < 0) {
624 + printk(KERN_ERR "RAMDISK: error while reading compressed data");
625 + } else if(r == 0) {
626 + printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
627 + }
628 + return r;
629 +}
630 +#endif
631 +
632 +#ifdef CONFIG_RD_GZIP
633 /* ===========================================================================
634 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
635 * (Used for the decompressed data only.)
636 @@ -368,13 +462,68 @@
637 bytes_out += (ulg)outcnt;
638 outcnt = 0;
639 }
640 +#endif
642 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
643 +static int __init compr_flush(void *window, unsigned int outcnt) {
644 + static int progressDots=0;
645 + int written = sys_write(crd_outfd, window, outcnt);
646 + if (written != outcnt) {
647 + printk(KERN_ERR "RAMDISK: incomplete write (%d != %d)\n",
648 + written, outcnt);
649 + }
650 + progressDots = (progressDots+1)%10;
651 + if(!progressDots)
652 + printk(".");
653 + return outcnt;
654 +}
655 +#endif
656 +
657 +#ifdef CONFIG_RD_GZIP
658 static void __init error(char *x)
659 {
660 printk(KERN_ERR "%s\n", x);
661 exit_code = 1;
662 unzip_error = 1;
663 }
664 +#endif
665 +
666 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
667 +static int __init crd_load_compr(int in_fd, int out_fd, int size,
668 + int (*deco)(char *,int,
669 + int(*fill)(void*,unsigned int),
670 + int(*flush)(void*,unsigned int),
671 + int *))
672 +{
673 + int result;
674 + char *inbuf = kmalloc(size, GFP_KERNEL);
675 + crd_infd = in_fd;
676 + crd_outfd = out_fd;
677 + if (inbuf == 0) {
678 + printk(KERN_ERR "RAMDISK: Couldn't allocate decompression buffer\n");
679 + return -1;
680 + }
681 + result=deco(inbuf, 0, compr_fill, compr_flush, NULL);
682 + kfree(inbuf);
683 + printk("\n");
684 + return result;
685 +}
686 +#endif
687 +
688 +#ifdef CONFIG_RD_BZIP2
689 +static int __init crd_load_bzip2(int in_fd, int out_fd)
690 +{
691 + return crd_load_compr(in_fd, out_fd, BZIP2_IOBUF_SIZE, bunzip2);
692 +}
693 +#endif
694 +
695 +#ifdef CONFIG_RD_LZMA
696 +static int __init crd_load_lzma(int in_fd, int out_fd)
697 +{
698 + return crd_load_compr(in_fd, out_fd, LZMA_IOBUF_SIZE, unlzma);
699 +}
700 +
701 +#endif
703 static int __init crd_load(int in_fd, int out_fd)
704 {
706 --- linux-2.6.29.3/init/initramfs.c
707 +++ linux-2.6.29.3/init/initramfs.c
708 @@ -410,6 +410,18 @@
709 }
710 }
712 +#ifdef CONFIG_RD_BZIP2
713 +#include <linux/decompress_bunzip2.h>
714 +#undef STATIC
715 +
716 +#endif
717 +
718 +#ifdef CONFIG_RD_LZMA
719 +#include <linux/decompress_unlzma.h>
720 +#undef STATIC
721 +
722 +#endif
723 +
724 /*
725 * gzip declarations
726 */
727 @@ -435,7 +447,32 @@
728 static unsigned outcnt; /* bytes in output buffer */
729 static long bytes_out;
731 -#define get_byte() (inptr < insize ? inbuf[inptr++] : -1)
732 +#define INITRD_PAGE ((PAGE_SIZE > 1024*1024) ? PAGE_SIZE : 1024*1024)
733 +static int fill_offset, fill_total;
734 +#include <linux/initrd.h>
735 +static void release_inbuf(int count)
736 +{
737 + if (fill_total < 0) return;
738 + fill_offset += count;
739 + fill_total += count;
740 + if (fill_offset >= INITRD_PAGE) {
741 + unsigned rem = fill_offset % INITRD_PAGE;
742 + unsigned end = initrd_start + fill_offset - rem;
743 + free_initrd_mem(initrd_start, end);
744 + printk(".");
745 + initrd_start = end;
746 + fill_offset = rem;
747 + }
748 +}
749 +
750 +static uch get_byte(void)
751 +{
752 + uch c;
753 + if (inptr >= insize) return -1;
754 + c = inbuf[inptr++];
755 + release_inbuf(1);
756 + return c;
757 +}
759 /* Diagnostic functions (stubbed out) */
760 #define Assert(cond,msg)
761 @@ -476,6 +513,17 @@
762 outcnt = 0;
763 }
765 +#ifdef CONFIG_RD_LZMA
766 +static int fill_buffer(void *buffer, unsigned size)
767 +{
768 + int max = initrd_end - initrd_start - fill_offset;
769 + if (size < max) max = size;
770 + memcpy(buffer, (void *)(initrd_start + fill_offset), max);
771 + release_inbuf(max);
772 + return max;
773 +}
774 +#endif
775 +
776 static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
777 {
778 int written;
779 @@ -489,11 +537,17 @@
780 state = Start;
781 this_header = 0;
782 message = NULL;
783 + fill_total = fill_offset = 0;
784 + if(buf != (char *) initrd_start) fill_total = -1;
785 while (!message && len) {
786 +#ifdef CONFIG_RD_LZMA
787 + int status;
788 +#endif
789 loff_t saved_offset = this_header;
790 if (*buf == '0' && !(this_header & 3)) {
791 state = Start;
792 written = write_buffer(buf, len);
793 + release_inbuf(written);
794 buf += written;
795 len -= written;
796 continue;
797 @@ -512,9 +566,42 @@
798 bytes_out = 0;
799 crc = (ulg)0xffffffffL; /* shift register contents */
800 makecrc();
801 - gunzip();
802 + if(!gunzip() && message == NULL)
803 + goto ok;
804 +
805 +#ifdef CONFIG_RD_BZIP2
806 + message = NULL; /* Zero out message, or else cpio will
807 + think an error has already occured */
808 + if(!bunzip2(buf, len, NULL, flush_buffer, &inptr) < 0 &&
809 + message == NULL) {
810 + goto ok;
811 + }
812 +#endif
813 +
814 +#ifdef CONFIG_RD_LZMA
815 + message = NULL; /* Zero out message, or else cpio will
816 + think an error has already occured */
817 + status = -1;
818 + if(buf == (char *) initrd_start) {
819 + char *work_buffer = malloc(LZMA_IOBUF_SIZE);
820 + if (work_buffer) {
821 + fill_total = fill_offset = 0;
822 + fill_buffer(work_buffer, LZMA_IOBUF_SIZE);
823 + status = unlzma(work_buffer, LZMA_IOBUF_SIZE,
824 + fill_buffer, flush_buffer, NULL);
825 + inptr = fill_total;
826 + free(work_buffer);
827 + }
828 + }
829 + else status = unlzma(buf,len, NULL, flush_buffer, &inptr);
830 + if (status == 0 && message == NULL) {
831 + goto ok;
832 + }
833 +#endif
834 + ok:
835 +
836 if (state != Reset)
837 - error("junk in gzipped archive");
838 + error("junk in compressed archive");
839 this_header = saved_offset + inptr;
840 buf += inptr;
841 len -= inptr;
842 @@ -581,7 +668,7 @@
843 if (err)
844 panic(err);
845 if (initrd_start) {
846 -#ifdef CONFIG_BLK_DEV_RAM
847 +#ifdef NOT_IN_SLITAZ_CONFIG_BLK_DEV_RAM
848 int fd;
849 printk(KERN_INFO "checking if image is initramfs...");
850 err = unpack_to_rootfs((char *)initrd_start,
852 --- linux-2.6.29.3/lib/Makefile
853 +++ linux-2.6.29.3/lib/Makefile
854 @@ -59,6 +59,9 @@
855 obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
856 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
858 +obj-$(CONFIG_RD_BZIP2) += decompress_bunzip2.o
859 +obj-$(CONFIG_RD_LZMA) += decompress_unlzma.o unlzma_syms.o
860 +
861 obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
862 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
863 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
865 --- linux-2.6.29.3/lib/decompress_bunzip2.c
866 +++ linux-2.6.29.3/lib/decompress_bunzip2.c
867 @@ -0,0 +1,645 @@
868 +/* vi: set sw=4 ts=4: */
869 +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
870 +
871 + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
872 + which also acknowledges contributions by Mike Burrows, David Wheeler,
873 + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
874 + Robert Sedgewick, and Jon L. Bentley.
875 +
876 + This code is licensed under the LGPLv2:
877 + LGPL (http://www.gnu.org/copyleft/lgpl.html
878 +*/
879 +
880 +/*
881 + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
882 +
883 + More efficient reading of Huffman codes, a streamlined read_bunzip()
884 + function, and various other tweaks. In (limited) tests, approximately
885 + 20% faster than bzcat on x86 and about 10% faster on arm.
886 +
887 + Note that about 2/3 of the time is spent in read_unzip() reversing
888 + the Burrows-Wheeler transformation. Much of that time is delay
889 + resulting from cache misses.
890 +
891 + I would ask that anyone benefiting from this work, especially those
892 + using it in commercial products, consider making a donation to my local
893 + non-profit hospice organization in the name of the woman I loved, who
894 + passed away Feb. 12, 2003.
895 +
896 + In memory of Toni W. Hagan
897 +
898 + Hospice of Acadiana, Inc.
899 + 2600 Johnston St., Suite 200
900 + Lafayette, LA 70503-3240
901 +
902 + Phone (337) 232-1234 or 1-800-738-2226
903 + Fax (337) 232-1297
904 +
905 + http://www.hospiceacadiana.com/
906 +
907 + Manuel
908 + */
909 +
910 +/*
911 + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
912 +*/
913 +
914 +
915 +#ifndef STATIC
916 +
917 +#include <linux/kernel.h>
918 +#include <linux/fs.h>
919 +#include <linux/string.h>
920 +
921 +#ifdef TEST
922 +#include "test.h"
923 +#else
924 +#include <linux/vmalloc.h>
925 +#endif
926 +
927 +static void __init *large_malloc(size_t size)
928 +{
929 + return vmalloc(size);
930 +}
931 +
932 +static void __init large_free(void *where)
933 +{
934 + vfree(where);
935 +}
936 +
937 +#ifndef TEST
938 +static void __init *malloc(size_t size)
939 +{
940 + return kmalloc(size, GFP_KERNEL);
941 +}
942 +
943 +static void __init free(void *where)
944 +{
945 + kfree(where);
946 +}
947 +
948 +static void __init error(char *x)
949 +{
950 + printk(KERN_ERR "%s\n", x);
951 +}
952 +#endif
953 +
954 +#define STATIC /**/
955 +
956 +#endif
957 +
958 +#include <linux/decompress_bunzip2.h>
959 +
960 +
961 +/* Constants for Huffman coding */
962 +#define MAX_GROUPS 6
963 +#define GROUP_SIZE 50 /* 64 would have been more efficient */
964 +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
965 +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
966 +#define SYMBOL_RUNA 0
967 +#define SYMBOL_RUNB 1
968 +
969 +/* Status return values */
970 +#define RETVAL_OK 0
971 +#define RETVAL_LAST_BLOCK (-1)
972 +#define RETVAL_NOT_BZIP_DATA (-2)
973 +#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
974 +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
975 +#define RETVAL_DATA_ERROR (-5)
976 +#define RETVAL_OUT_OF_MEMORY (-6)
977 +#define RETVAL_OBSOLETE_INPUT (-7)
978 +
979 +
980 +/* This is what we know about each Huffman coding group */
981 +struct group_data {
982 + /* We have an extra slot at the end of limit[] for a sentinal value. */
983 + int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS];
984 + int minLen, maxLen;
985 +};
986 +
987 +/* Structure holding all the housekeeping data, including IO buffers and
988 + memory that persists between calls to bunzip */
989 +typedef struct {
990 + /* State for interrupting output loop */
991 + int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent;
992 + /* I/O tracking data (file handles, buffers, positions, etc.) */
993 + int (*fill)(void*,unsigned int);
994 + int inbufCount,inbufPos /*,outbufPos*/;
995 + unsigned char *inbuf /*,*outbuf*/;
996 + unsigned int inbufBitCount, inbufBits;
997 + /* The CRC values stored in the block header and calculated from the data */
998 + unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC;
999 + /* Intermediate buffer and its size (in bytes) */
1000 + unsigned int *dbuf, dbufSize;
1001 + /* These things are a bit too big to go on the stack */
1002 + unsigned char selectors[32768]; /* nSelectors=15 bits */
1003 + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
1004 + int io_error; /* non-zero if we have IO error */
1005 +} bunzip_data;
1006 +
1007 +
1008 +/* Return the next nnn bits of input. All reads from the compressed input
1009 + are done through this function. All reads are big endian */
1010 +static unsigned int get_bits(bunzip_data *bd, char bits_wanted)
1011 +{
1012 + unsigned int bits=0;
1013 +
1014 + /* If we need to get more data from the byte buffer, do so. (Loop getting
1015 + one byte at a time to enforce endianness and avoid unaligned access.) */
1016 + while (bd->inbufBitCount<bits_wanted) {
1017 + /* If we need to read more data from file into byte buffer, do so */
1018 + if(bd->inbufPos==bd->inbufCount) {
1019 + if(bd->io_error)
1020 + return 0;
1021 + if((bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE)) <= 0) {
1022 + bd->io_error=RETVAL_UNEXPECTED_INPUT_EOF;
1023 + return 0;
1024 + }
1025 + bd->inbufPos=0;
1026 + }
1027 + /* Avoid 32-bit overflow (dump bit buffer to top of output) */
1028 + if(bd->inbufBitCount>=24) {
1029 + bits=bd->inbufBits&((1<<bd->inbufBitCount)-1);
1030 + bits_wanted-=bd->inbufBitCount;
1031 + bits<<=bits_wanted;
1032 + bd->inbufBitCount=0;
1033 + }
1034 + /* Grab next 8 bits of input from buffer. */
1035 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
1036 + bd->inbufBitCount+=8;
1037 + }
1038 + /* Calculate result */
1039 + bd->inbufBitCount-=bits_wanted;
1040 + bits|=(bd->inbufBits>>bd->inbufBitCount)&((1<<bits_wanted)-1);
1041 +
1042 + return bits;
1043 +}
1044 +
1045 +/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
1046 +
1047 +static int get_next_block(bunzip_data *bd)
1048 +{
1049 + struct group_data *hufGroup=NULL;
1050 + int *base=NULL;
1051 + int *limit=NULL;
1052 + int dbufCount,nextSym,dbufSize,groupCount,selector,
1053 + i,j,k,t,runPos,symCount,symTotal,nSelectors,byteCount[256];
1054 + unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
1055 + unsigned int *dbuf,origPtr;
1056 +
1057 + dbuf=bd->dbuf;
1058 + dbufSize=bd->dbufSize;
1059 + selectors=bd->selectors;
1060 +
1061 + /* Read in header signature and CRC, then validate signature.
1062 + (last block signature means CRC is for whole file, return now) */
1063 + i = get_bits(bd,24);
1064 + j = get_bits(bd,24);
1065 + bd->headerCRC=get_bits(bd,32);
1066 + if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK;
1067 + if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA;
1068 + /* We can add support for blockRandomised if anybody complains. There was
1069 + some code for this in busybox 1.0.0-pre3, but nobody ever noticed that
1070 + it didn't actually work. */
1071 + if(get_bits(bd,1)) return RETVAL_OBSOLETE_INPUT;
1072 + if((origPtr=get_bits(bd,24)) > dbufSize) return RETVAL_DATA_ERROR;
1073 + /* mapping table: if some byte values are never used (encoding things
1074 + like ascii text), the compression code removes the gaps to have fewer
1075 + symbols to deal with, and writes a sparse bitfield indicating which
1076 + values were present. We make a translation table to convert the symbols
1077 + back to the corresponding bytes. */
1078 + t=get_bits(bd, 16);
1079 + symTotal=0;
1080 + for (i=0;i<16;i++) {
1081 + if(t&(1<<(15-i))) {
1082 + k=get_bits(bd,16);
1083 + for(j=0;j<16;j++)
1084 + if(k&(1<<(15-j))) symToByte[symTotal++]=(16*i)+j;
1085 + }
1086 + }
1087 + /* How many different Huffman coding groups does this block use? */
1088 + groupCount=get_bits(bd,3);
1089 + if (groupCount<2 || groupCount>MAX_GROUPS) return RETVAL_DATA_ERROR;
1090 + /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding
1091 + group. Read in the group selector list, which is stored as MTF encoded
1092 + bit runs. (MTF=Move To Front, as each value is used it's moved to the
1093 + start of the list.) */
1094 + if(!(nSelectors=get_bits(bd, 15))) return RETVAL_DATA_ERROR;
1095 + for(i=0; i<groupCount; i++) mtfSymbol[i] = i;
1096 + for(i=0; i<nSelectors; i++) {
1097 + /* Get next value */
1098 + for(j=0;get_bits(bd,1);j++) if (j>=groupCount) return RETVAL_DATA_ERROR;
1099 + /* Decode MTF to get the next selector */
1100 + uc = mtfSymbol[j];
1101 + for(;j;j--) mtfSymbol[j] = mtfSymbol[j-1];
1102 + mtfSymbol[0]=selectors[i]=uc;
1103 + }
1104 + /* Read the Huffman coding tables for each group, which code for symTotal
1105 + literal symbols, plus two run symbols (RUNA, RUNB) */
1106 + symCount=symTotal+2;
1107 + for (j=0; j<groupCount; j++) {
1108 + unsigned char length[MAX_SYMBOLS],temp[MAX_HUFCODE_BITS+1];
1109 + int minLen, maxLen, pp;
1110 + /* Read Huffman code lengths for each symbol. They're stored in
1111 + a way similar to mtf; record a starting value for the first symbol,
1112 + and an offset from the previous value for everys symbol after that.
1113 + (Subtracting 1 before the loop and then adding it back at the end is
1114 + an optimization that makes the test inside the loop simpler: symbol
1115 + length 0 becomes negative, so an unsigned inequality catches it.) */
1116 + t=get_bits(bd, 5)-1;
1117 + for (i = 0; i < symCount; i++) {
1118 + for(;;) {
1119 + if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
1120 + return RETVAL_DATA_ERROR;
1121 + /* If first bit is 0, stop. Else second bit indicates whether
1122 + to increment or decrement the value. Optimization: grab 2
1123 + bits and unget the second if the first was 0. */
1124 + k = get_bits(bd,2);
1125 + if (k < 2) {
1126 + bd->inbufBitCount++;
1127 + break;
1128 + }
1129 + /* Add one if second bit 1, else subtract 1. Avoids if/else */
1130 + t+=(((k+1)&2)-1);
1131 + }
1132 + /* Correct for the initial -1, to get the final symbol length */
1133 + length[i]=t+1;
1134 + }
1135 + /* Find largest and smallest lengths in this group */
1136 + minLen=maxLen=length[0];
1137 + for(i = 1; i < symCount; i++) {
1138 + if(length[i] > maxLen) maxLen = length[i];
1139 + else if(length[i] < minLen) minLen = length[i];
1140 + }
1141 + /* Calculate permute[], base[], and limit[] tables from length[].
1142 + *
1143 + * permute[] is the lookup table for converting Huffman coded symbols
1144 + * into decoded symbols. base[] is the amount to subtract from the
1145 + * value of a Huffman symbol of a given length when using permute[].
1146 + *
1147 + * limit[] indicates the largest numerical value a symbol with a given
1148 + * number of bits can have. This is how the Huffman codes can vary in
1149 + * length: each code with a value>limit[length] needs another bit.
1150 + */
1151 + hufGroup=bd->groups+j;
1152 + hufGroup->minLen = minLen;
1153 + hufGroup->maxLen = maxLen;
1154 + /* Note that minLen can't be smaller than 1, so we adjust the base
1155 + and limit array pointers so we're not always wasting the first
1156 + entry. We do this again when using them (during symbol decoding).*/
1157 + base=hufGroup->base-1;
1158 + limit=hufGroup->limit-1;
1159 + /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */
1160 + pp=0;
1161 + for(i=minLen;i<=maxLen;i++) {
1162 + temp[i]=limit[i]=0;
1163 + for(t=0;t<symCount;t++)
1164 + if(length[t]==i) hufGroup->permute[pp++] = t;
1165 + }
1166 + /* Count symbols coded for at each bit length */
1167 + for (i=0;i<symCount;i++) temp[length[i]]++;
1168 + /* Calculate limit[] (the largest symbol-coding value at each bit
1169 + * length, which is (previous limit<<1)+symbols at this level), and
1170 + * base[] (number of symbols to ignore at each bit length, which is
1171 + * limit minus the cumulative count of symbols coded for already). */
1172 + pp=t=0;
1173 + for (i=minLen; i<maxLen; i++) {
1174 + pp+=temp[i];
1175 + /* We read the largest possible symbol size and then unget bits
1176 + after determining how many we need, and those extra bits could
1177 + be set to anything. (They're noise from future symbols.) At
1178 + each level we're really only interested in the first few bits,
1179 + so here we set all the trailing to-be-ignored bits to 1 so they
1180 + don't affect the value>limit[length] comparison. */
1181 + limit[i]= (pp << (maxLen - i)) - 1;
1182 + pp<<=1;
1183 + base[i+1]=pp-(t+=temp[i]);
1184 + }
1185 + limit[maxLen+1] = INT_MAX; /* Sentinal value for reading next sym. */
1186 + limit[maxLen]=pp+temp[maxLen]-1;
1187 + base[minLen]=0;
1188 + }
1189 + /* We've finished reading and digesting the block header. Now read this
1190 + block's Huffman coded symbols from the file and undo the Huffman coding
1191 + and run length encoding, saving the result into dbuf[dbufCount++]=uc */
1192 +
1193 + /* Initialize symbol occurrence counters and symbol Move To Front table */
1194 + for(i=0;i<256;i++) {
1195 + byteCount[i] = 0;
1196 + mtfSymbol[i]=(unsigned char)i;
1197 + }
1198 + /* Loop through compressed symbols. */
1199 + runPos=dbufCount=symCount=selector=0;
1200 + for(;;) {
1201 + /* Determine which Huffman coding group to use. */
1202 + if(!(symCount--)) {
1203 + symCount=GROUP_SIZE-1;
1204 + if(selector>=nSelectors) return RETVAL_DATA_ERROR;
1205 + hufGroup=bd->groups+selectors[selector++];
1206 + base=hufGroup->base-1;
1207 + limit=hufGroup->limit-1;
1208 + }
1209 + /* Read next Huffman-coded symbol. */
1210 + /* Note: It is far cheaper to read maxLen bits and back up than it is
1211 + to read minLen bits and then an additional bit at a time, testing
1212 + as we go. Because there is a trailing last block (with file CRC),
1213 + there is no danger of the overread causing an unexpected EOF for a
1214 + valid compressed file. As a further optimization, we do the read
1215 + inline (falling back to a call to get_bits if the buffer runs
1216 + dry). The following (up to got_huff_bits:) is equivalent to
1217 + j=get_bits(bd,hufGroup->maxLen);
1218 + */
1219 + while (bd->inbufBitCount<hufGroup->maxLen) {
1220 + if(bd->inbufPos==bd->inbufCount) {
1221 + j = get_bits(bd,hufGroup->maxLen);
1222 + goto got_huff_bits;
1223 + }
1224 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
1225 + bd->inbufBitCount+=8;
1226 + };
1227 + bd->inbufBitCount-=hufGroup->maxLen;
1228 + j = (bd->inbufBits>>bd->inbufBitCount)&((1<<hufGroup->maxLen)-1);
1229 +got_huff_bits:
1230 + /* Figure how how many bits are in next symbol and unget extras */
1231 + i=hufGroup->minLen;
1232 + while(j>limit[i]) ++i;
1233 + bd->inbufBitCount += (hufGroup->maxLen - i);
1234 + /* Huffman decode value to get nextSym (with bounds checking) */
1235 + if ((i > hufGroup->maxLen)
1236 + || (((unsigned)(j=(j>>(hufGroup->maxLen-i))-base[i]))
1237 + >= MAX_SYMBOLS))
1238 + return RETVAL_DATA_ERROR;
1239 + nextSym = hufGroup->permute[j];
1240 + /* We have now decoded the symbol, which indicates either a new literal
1241 + byte, or a repeated run of the most recent literal byte. First,
1242 + check if nextSym indicates a repeated run, and if so loop collecting
1243 + how many times to repeat the last literal. */
1244 + if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
1245 + /* If this is the start of a new run, zero out counter */
1246 + if(!runPos) {
1247 + runPos = 1;
1248 + t = 0;
1249 + }
1250 + /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at
1251 + each bit position, add 1 or 2 instead. For example,
1252 + 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2.
1253 + You can make any bit pattern that way using 1 less symbol than
1254 + the basic or 0/1 method (except all bits 0, which would use no
1255 + symbols, but a run of length 0 doesn't mean anything in this
1256 + context). Thus space is saved. */
1257 + t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */
1258 + runPos <<= 1;
1259 + continue;
1260 + }
1261 + /* When we hit the first non-run symbol after a run, we now know
1262 + how many times to repeat the last literal, so append that many
1263 + copies to our buffer of decoded symbols (dbuf) now. (The last
1264 + literal used is the one at the head of the mtfSymbol array.) */
1265 + if(runPos) {
1266 + runPos=0;
1267 + if(dbufCount+t>=dbufSize) return RETVAL_DATA_ERROR;
1268 +
1269 + uc = symToByte[mtfSymbol[0]];
1270 + byteCount[uc] += t;
1271 + while(t--) dbuf[dbufCount++]=uc;
1272 + }
1273 + /* Is this the terminating symbol? */
1274 + if(nextSym>symTotal) break;
1275 + /* At this point, nextSym indicates a new literal character. Subtract
1276 + one to get the position in the MTF array at which this literal is
1277 + currently to be found. (Note that the result can't be -1 or 0,
1278 + because 0 and 1 are RUNA and RUNB. But another instance of the
1279 + first symbol in the mtf array, position 0, would have been handled
1280 + as part of a run above. Therefore 1 unused mtf position minus
1281 + 2 non-literal nextSym values equals -1.) */
1282 + if(dbufCount>=dbufSize) return RETVAL_DATA_ERROR;
1283 + i = nextSym - 1;
1284 + uc = mtfSymbol[i];
1285 + /* Adjust the MTF array. Since we typically expect to move only a
1286 + * small number of symbols, and are bound by 256 in any case, using
1287 + * memmove here would typically be bigger and slower due to function
1288 + * call overhead and other assorted setup costs. */
1289 + do {
1290 + mtfSymbol[i] = mtfSymbol[i-1];
1291 + } while (--i);
1292 + mtfSymbol[0] = uc;
1293 + uc=symToByte[uc];
1294 + /* We have our literal byte. Save it into dbuf. */
1295 + byteCount[uc]++;
1296 + dbuf[dbufCount++] = (unsigned int)uc;
1297 + }
1298 + /* At this point, we've read all the Huffman-coded symbols (and repeated
1299 + runs) for this block from the input stream, and decoded them into the
1300 + intermediate buffer. There are dbufCount many decoded bytes in dbuf[].
1301 + Now undo the Burrows-Wheeler transform on dbuf.
1302 + See http://dogma.net/markn/articles/bwt/bwt.htm
1303 + */
1304 + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
1305 + j=0;
1306 + for(i=0;i<256;i++) {
1307 + k=j+byteCount[i];
1308 + byteCount[i] = j;
1309 + j=k;
1310 + }
1311 + /* Figure out what order dbuf would be in if we sorted it. */
1312 + for (i=0;i<dbufCount;i++) {
1313 + uc=(unsigned char)(dbuf[i] & 0xff);
1314 + dbuf[byteCount[uc]] |= (i << 8);
1315 + byteCount[uc]++;
1316 + }
1317 + /* Decode first byte by hand to initialize "previous" byte. Note that it
1318 + doesn't get output, and if the first three characters are identical
1319 + it doesn't qualify as a run (hence writeRunCountdown=5). */
1320 + if(dbufCount) {
1321 + if(origPtr>=dbufCount) return RETVAL_DATA_ERROR;
1322 + bd->writePos=dbuf[origPtr];
1323 + bd->writeCurrent=(unsigned char)(bd->writePos&0xff);
1324 + bd->writePos>>=8;
1325 + bd->writeRunCountdown=5;
1326 + }
1327 + bd->writeCount=dbufCount;
1328 +
1329 + return RETVAL_OK;
1330 +}
1331 +
1332 +/* Undo burrows-wheeler transform on intermediate buffer to produce output.
1333 + If start_bunzip was initialized with out_fd=-1, then up to len bytes of
1334 + data are written to outbuf. Return value is number of bytes written or
1335 + error (all errors are negative numbers). If out_fd!=-1, outbuf and len
1336 + are ignored, data is written to out_fd and return is RETVAL_OK or error.
1337 +*/
1338 +
1339 +static int read_bunzip(bunzip_data *bd, char *outbuf, int len)
1340 +{
1341 + const unsigned int *dbuf;
1342 + int pos,xcurrent,previous,gotcount;
1343 +
1344 + /* If last read was short due to end of file, return last block now */
1345 + if(bd->writeCount<0) return bd->writeCount;
1346 +
1347 + gotcount = 0;
1348 + dbuf=bd->dbuf;
1349 + pos=bd->writePos;
1350 + xcurrent=bd->writeCurrent;
1351 +
1352 + /* We will always have pending decoded data to write into the output
1353 + buffer unless this is the very first call (in which case we haven't
1354 + Huffman-decoded a block into the intermediate buffer yet). */
1355 +
1356 + if (bd->writeCopies) {
1357 + /* Inside the loop, writeCopies means extra copies (beyond 1) */
1358 + --bd->writeCopies;
1359 + /* Loop outputting bytes */
1360 + for(;;) {
1361 + /* If the output buffer is full, snapshot state and return */
1362 + if(gotcount >= len) {
1363 + bd->writePos=pos;
1364 + bd->writeCurrent=xcurrent;
1365 + bd->writeCopies++;
1366 + return len;
1367 + }
1368 + /* Write next byte into output buffer, updating CRC */
1369 + outbuf[gotcount++] = xcurrent;
1370 + bd->writeCRC=(((bd->writeCRC)<<8)
1371 + ^bd->crc32Table[((bd->writeCRC)>>24)^xcurrent]);
1372 + /* Loop now if we're outputting multiple copies of this byte */
1373 + if (bd->writeCopies) {
1374 + --bd->writeCopies;
1375 + continue;
1376 + }
1377 +decode_next_byte:
1378 + if (!bd->writeCount--) break;
1379 + /* Follow sequence vector to undo Burrows-Wheeler transform */
1380 + previous=xcurrent;
1381 + pos=dbuf[pos];
1382 + xcurrent=pos&0xff;
1383 + pos>>=8;
1384 + /* After 3 consecutive copies of the same byte, the 4th is a repeat
1385 + count. We count down from 4 instead
1386 + * of counting up because testing for non-zero is faster */
1387 + if(--bd->writeRunCountdown) {
1388 + if(xcurrent!=previous) bd->writeRunCountdown=4;
1389 + } else {
1390 + /* We have a repeated run, this byte indicates the count */
1391 + bd->writeCopies=xcurrent;
1392 + xcurrent=previous;
1393 + bd->writeRunCountdown=5;
1394 + /* Sometimes there are just 3 bytes (run length 0) */
1395 + if(!bd->writeCopies) goto decode_next_byte;
1396 + /* Subtract the 1 copy we'd output anyway to get extras */
1397 + --bd->writeCopies;
1398 + }
1399 + }
1400 + /* Decompression of this block completed successfully */
1401 + bd->writeCRC=~bd->writeCRC;
1402 + bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC;
1403 + /* If this block had a CRC error, force file level CRC error. */
1404 + if(bd->writeCRC!=bd->headerCRC) {
1405 + bd->totalCRC=bd->headerCRC+1;
1406 + return RETVAL_LAST_BLOCK;
1407 + }
1408 + }
1409 +
1410 + /* Refill the intermediate buffer by Huffman-decoding next block of input */
1411 + /* (previous is just a convenient unused temp variable here) */
1412 + previous=get_next_block(bd);
1413 + if(previous) {
1414 + bd->writeCount=previous;
1415 + return (previous!=RETVAL_LAST_BLOCK) ? previous : gotcount;
1416 + }
1417 + bd->writeCRC=0xffffffffUL;
1418 + pos=bd->writePos;
1419 + xcurrent=bd->writeCurrent;
1420 + goto decode_next_byte;
1421 +}
1422 +
1423 +static int nofill(void *buf,unsigned int len) {
1424 + return -1;
1425 +}
1426 +
1427 +/* Allocate the structure, read file header. If in_fd==-1, inbuf must contain
1428 + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
1429 + ignored, and data is read from file handle into temporary buffer. */
1430 +static int start_bunzip(bunzip_data **bdp, void *inbuf, int len,
1431 + int (*fill)(void*,unsigned int))
1432 +{
1433 + bunzip_data *bd;
1434 + unsigned int i,j,c;
1435 + const unsigned int BZh0=(((unsigned int)'B')<<24)+(((unsigned int)'Z')<<16)
1436 + +(((unsigned int)'h')<<8)+(unsigned int)'0';
1437 +
1438 + /* Figure out how much data to allocate */
1439 + i=sizeof(bunzip_data);
1440 +
1441 + /* Allocate bunzip_data. Most fields initialize to zero. */
1442 + bd=*bdp=malloc(i);
1443 + memset(bd,0,sizeof(bunzip_data));
1444 + /* Setup input buffer */
1445 + bd->inbuf=inbuf;
1446 + bd->inbufCount=len;
1447 + if(fill != NULL)
1448 + bd->fill=fill;
1449 + else
1450 + bd->fill=nofill;
1451 +
1452 + /* Init the CRC32 table (big endian) */
1453 + for(i=0;i<256;i++) {
1454 + c=i<<24;
1455 + for(j=8;j;j--)
1456 + c=c&0x80000000 ? (c<<1)^0x04c11db7 : (c<<1);
1457 + bd->crc32Table[i]=c;
1458 + }
1459 +
1460 + /* Ensure that file starts with "BZh['1'-'9']." */
1461 + i = get_bits(bd,32);
1462 + if (((unsigned int)(i-BZh0-1)) >= 9) return RETVAL_NOT_BZIP_DATA;
1463 +
1464 + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
1465 + uncompressed data. Allocate intermediate buffer for block. */
1466 + bd->dbufSize=100000*(i-BZh0);
1467 +
1468 + bd->dbuf=large_malloc(bd->dbufSize * sizeof(int));
1469 + return RETVAL_OK;
1470 +}
1471 +
1472 +/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip data,
1473 + not end of file.) */
1474 +STATIC int bunzip2(char *inbuf, int len,
1475 + int(*fill)(void*,unsigned int),
1476 + int(*writebb)(char*,unsigned int),
1477 + int *pos)
1478 +{
1479 + char *outbuf;
1480 + bunzip_data *bd;
1481 + int i;
1482 +
1483 + outbuf=malloc(BZIP2_IOBUF_SIZE);
1484 + if(!(i=start_bunzip(&bd,inbuf,len,fill))) {
1485 + for(;;) {
1486 + if((i=read_bunzip(bd,outbuf,BZIP2_IOBUF_SIZE)) <= 0) break;
1487 + if(i!=writebb(outbuf,i)) {
1488 + i=RETVAL_UNEXPECTED_OUTPUT_EOF;
1489 + break;
1490 + }
1491 + }
1492 + }
1493 + /* Check CRC and release memory */
1494 + if(i==RETVAL_LAST_BLOCK) {
1495 + if (bd->headerCRC!=bd->totalCRC) {
1496 + error("Data integrity error when decompressing.");
1497 + } else {
1498 + i=RETVAL_OK;
1499 + }
1500 + }
1501 + else if (i==RETVAL_UNEXPECTED_OUTPUT_EOF) {
1502 + error("Compressed file ends unexpectedly");
1503 + }
1504 + if(bd->dbuf) large_free(bd->dbuf);
1505 + if(pos)
1506 + *pos = bd->inbufPos;
1507 + free(bd);
1508 + free(outbuf);
1509 +
1510 + return i;
1511 +}
1512 +
1514 --- linux-2.6.29.3/lib/decompress_unlzma.c
1515 +++ linux-2.6.29.3/lib/decompress_unlzma.c
1516 @@ -0,0 +1,577 @@
1517 +/* Lzma decompressor for Linux kernel. Shamelessly snarfed
1518 + * from busybox 1.1.1
1519 + *
1520 + * Linux kernel adaptation
1521 + * Copyright (C) 2006 Alain <alain@knaff.lu>
1522 + *
1523 + * Based on small lzma deflate implementation/Small range coder
1524 + * implementation for lzma.
1525 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1526 + *
1527 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1528 + * Copyright (C) 1999-2005 Igor Pavlov
1529 + *
1530 + * Copyrights of the parts, see headers below.
1531 + *
1532 + *
1533 + * This program is free software; you can redistribute it and/or
1534 + * modify it under the terms of the GNU Lesser General Public
1535 + * License as published by the Free Software Foundation; either
1536 + * version 2.1 of the License, or (at your option) any later version.
1537 + *
1538 + * This program is distributed in the hope that it will be useful,
1539 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1540 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1541 + * Lesser General Public License for more details.
1542 + *
1543 + * You should have received a copy of the GNU Lesser General Public
1544 + * License along with this library; if not, write to the Free Software
1545 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1546 + */
1547 +
1548 +#ifndef STATIC
1549 +
1550 +#include <linux/kernel.h>
1551 +#include <linux/fs.h>
1552 +#include <linux/string.h>
1553 +
1554 +#ifdef TEST
1555 +#include "test.h"
1556 +#else
1557 +#include <linux/vmalloc.h>
1558 +#endif
1559 +
1560 +static void __init *large_malloc(size_t size)
1561 +{
1562 + return vmalloc(size);
1563 +}
1564 +
1565 +static void __init large_free(void *where)
1566 +{
1567 + vfree(where);
1568 +}
1569 +
1570 +#ifndef TEST
1571 +static void __init error(char *x)
1572 +{
1573 + printk(KERN_ERR "%s\n", x);
1574 +}
1575 +
1576 +#endif
1577 +
1578 +#define STATIC /**/
1579 +
1580 +#endif
1581 +
1582 +#define CONFIG_FEATURE_LZMA_FAST
1583 +#include <linux/decompress_unlzma.h>
1584 +
1585 +#define MIN(a,b) (((a)<(b))?(a):(b))
1586 +
1587 +static long long read_int(unsigned char *ptr, int size)
1588 +{
1589 + int i;
1590 + long long ret=0;
1591 +
1592 + for(i=0; i<size; i++) {
1593 + ret = (ret << 8) | ptr[size-i-1];
1594 + }
1595 + return ret;
1596 +}
1597 +
1598 +#define ENDIAN_CONVERT(x) x=(typeof(x))read_int((unsigned char*)&x,sizeof(x))
1599 +
1600 +
1601 +/* Small range coder implementation for lzma.
1602 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1603 + *
1604 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1605 + * Copyright (c) 1999-2005 Igor Pavlov
1606 + */
1607 +
1608 +#ifndef always_inline
1609 +# if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >0)
1610 +# define always_inline __attribute__((always_inline)) inline
1611 +# else
1612 +# define always_inline inline
1613 +# endif
1614 +#endif
1615 +
1616 +#ifdef CONFIG_FEATURE_LZMA_FAST
1617 +# define speed_inline always_inline
1618 +# define size_inline
1619 +#else
1620 +# define speed_inline
1621 +# define size_inline always_inline
1622 +#endif
1623 +
1624 +
1625 +typedef struct {
1626 + int (*fill)(void*,unsigned int);
1627 + uint8_t *ptr;
1628 + uint8_t *buffer;
1629 + uint8_t *buffer_end;
1630 + int buffer_size;
1631 + uint32_t code;
1632 + uint32_t range;
1633 +} rc_t;
1634 +
1635 +
1636 +#define RC_TOP_BITS 24
1637 +#define RC_MOVE_BITS 5
1638 +#define RC_MODEL_TOTAL_BITS 11
1639 +
1640 +
1641 +/* Called twice: once at startup and once in rc_normalize() */
1642 +static size_inline void rc_read(rc_t * rc)
1643 +{
1644 + if (!rc->buffer_size) return;
1645 + if (rc->fill) {
1646 + rc->buffer_size = rc->fill((char*)rc->buffer, LZMA_IOBUF_SIZE);
1647 + rc->ptr = rc->buffer;
1648 + rc->buffer_end = rc->buffer + rc->buffer_size;
1649 + if (rc->buffer_size > 0) return;
1650 + }
1651 + error("unexpected EOF");
1652 + rc->buffer_size = 0;
1653 +}
1654 +
1655 +/* Called once */
1656 +static always_inline void rc_init(rc_t * rc, int (*fill)(void*,unsigned int),
1657 + char *buffer, int buffer_size)
1658 +{
1659 + rc->fill = fill;
1660 + rc->buffer = (uint8_t *)buffer;
1661 + rc->buffer_size = buffer_size;
1662 + rc->buffer_end = rc->buffer + rc->buffer_size;
1663 + rc->ptr = rc->buffer;
1664 +
1665 + rc->code = 0;
1666 + rc->range = 0xFFFFFFFF;
1667 +}
1668 +
1669 +static always_inline void rc_init_code(rc_t * rc)
1670 +{
1671 + int i;
1672 +
1673 + for (i = 0; i < 5; i++) {
1674 + if (rc->ptr >= rc->buffer_end)
1675 + rc_read(rc);
1676 + rc->code = (rc->code << 8) | *rc->ptr++;
1677 + }
1678 +}
1679 +
1680 +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_1() */
1681 +static speed_inline void rc_do_normalize(rc_t * rc)
1682 +{
1683 + if (rc->ptr >= rc->buffer_end)
1684 + rc_read(rc);
1685 + rc->range <<= 8;
1686 + rc->code = (rc->code << 8) | *rc->ptr++;
1687 +}
1688 +static always_inline void rc_normalize(rc_t * rc)
1689 +{
1690 + if (rc->range < (1 << RC_TOP_BITS)) {
1691 + rc_do_normalize(rc);
1692 + }
1693 +}
1694 +
1695 +/* Called 9 times */
1696 +static speed_inline int rc_is_bit_1(rc_t * rc, uint16_t * p)
1697 +{
1698 + uint32_t bound;
1699 + rc_normalize(rc);
1700 + bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
1701 + if (rc->code < bound) {
1702 + rc->range = bound;
1703 + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
1704 + return 0;
1705 + }
1706 + else {
1707 + rc->code -= bound;
1708 + rc->range -= bound;
1709 + *p -= *p >> RC_MOVE_BITS;
1710 + return 1;
1711 + }
1712 +}
1713 +
1714 +/* Called 4 times in unlzma loop */
1715 +static speed_inline int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
1716 +{
1717 + int ret = rc_is_bit_1(rc, p);
1718 + *symbol = *symbol * 2 + ret;
1719 + return ret;
1720 +}
1721 +
1722 +/* Called once */
1723 +static always_inline int rc_direct_bit(rc_t * rc)
1724 +{
1725 + rc_normalize(rc);
1726 + rc->range >>= 1;
1727 + if (rc->code >= rc->range) {
1728 + rc->code -= rc->range;
1729 + return 1;
1730 + }
1731 + return 0;
1732 +}
1733 +
1734 +/* Called twice */
1735 +static speed_inline void
1736 +rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
1737 +{
1738 + int i = num_levels;
1739 +
1740 + *symbol = 1;
1741 + while (i--)
1742 + rc_get_bit(rc, p + *symbol, symbol);
1743 + *symbol -= 1 << num_levels;
1744 +}
1745 +
1746 +
1747 +/*
1748 + * Small lzma deflate implementation.
1749 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1750 + *
1751 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1752 + * Copyright (C) 1999-2005 Igor Pavlov
1753 + */
1754 +
1755 +
1756 +typedef struct {
1757 + uint8_t pos;
1758 + uint32_t dict_size;
1759 + uint64_t dst_size;
1760 +} __attribute__ ((packed)) lzma_header_t;
1761 +
1762 +
1763 +#define LZMA_BASE_SIZE 1846
1764 +#define LZMA_LIT_SIZE 768
1765 +
1766 +#define LZMA_NUM_POS_BITS_MAX 4
1767 +
1768 +#define LZMA_LEN_NUM_LOW_BITS 3
1769 +#define LZMA_LEN_NUM_MID_BITS 3
1770 +#define LZMA_LEN_NUM_HIGH_BITS 8
1771 +
1772 +#define LZMA_LEN_CHOICE 0
1773 +#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
1774 +#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
1775 +#define LZMA_LEN_MID (LZMA_LEN_LOW \
1776 + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
1777 +#define LZMA_LEN_HIGH (LZMA_LEN_MID \
1778 + +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
1779 +#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
1780 +
1781 +#define LZMA_NUM_STATES 12
1782 +#define LZMA_NUM_LIT_STATES 7
1783 +
1784 +#define LZMA_START_POS_MODEL_INDEX 4
1785 +#define LZMA_END_POS_MODEL_INDEX 14
1786 +#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
1787 +
1788 +#define LZMA_NUM_POS_SLOT_BITS 6
1789 +#define LZMA_NUM_LEN_TO_POS_STATES 4
1790 +
1791 +#define LZMA_NUM_ALIGN_BITS 4
1792 +
1793 +#define LZMA_MATCH_MIN_LEN 2
1794 +
1795 +#define LZMA_IS_MATCH 0
1796 +#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES <<LZMA_NUM_POS_BITS_MAX))
1797 +#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
1798 +#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
1799 +#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
1800 +#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
1801 +#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
1802 + + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
1803 +#define LZMA_SPEC_POS (LZMA_POS_SLOT \
1804 + +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
1805 +#define LZMA_ALIGN (LZMA_SPEC_POS \
1806 + + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
1807 +#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
1808 +#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
1809 +#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
1810 +
1811 +
1812 +STATIC int unlzma(char *inbuf, int in_len,
1813 + int(*fill)(void*,unsigned int),
1814 + int(*writebb)(char*,unsigned int),
1815 + int *posp)
1816 +{
1817 + lzma_header_t header;
1818 + int lc, pb, lp;
1819 + uint32_t pos_state_mask;
1820 + uint32_t literal_pos_mask;
1821 + uint32_t pos;
1822 + uint16_t *p;
1823 + uint16_t *prob;
1824 + uint16_t *prob_lit;
1825 + int num_bits;
1826 + int num_probs;
1827 + rc_t rc;
1828 + int i, mi;
1829 + uint8_t *buffer;
1830 + uint8_t previous_byte = 0;
1831 + size_t buffer_pos = 0, global_pos = 0;
1832 + int len = 0;
1833 + int state = 0;
1834 + int bufsize;
1835 + uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
1836 +
1837 + rc_init(&rc, fill, inbuf, in_len);
1838 +
1839 + header.dict_size = (uint32_t) -1L;
1840 + header.dst_size = (uint64_t) -1LL;
1841 + if (inbuf && in_len > 0 && inbuf[0] == 0) {
1842 + const int LZMA_LC = 3, LZMA_LP = 0, LZMA_PB = 2;
1843 + header.pos = (LZMA_PB * 45) + (LZMA_LP * 5) + LZMA_LC;
1844 + rc.ptr++;
1845 + }
1846 + else {
1847 + int hdrsize = sizeof(header);
1848 + if (inbuf && in_len > 12 &&
1849 + (1 + * (unsigned long *) &inbuf[9]) > 1U)
1850 + hdrsize = 5;
1851 + for (i = 0; i < hdrsize; i++) {
1852 + if (rc.ptr >= rc.buffer_end)
1853 + rc_read(&rc);
1854 + ((unsigned char *)&header)[i] = *rc.ptr++;
1855 + }
1856 + }
1857 +
1858 + if (header.pos >= (9 * 5 * 5)) {
1859 + error("bad header");
1860 + return -1;
1861 + }
1862 +
1863 + mi = header.pos / 9;
1864 + lc = header.pos % 9;
1865 + pb = mi / 5;
1866 + lp = mi % 5;
1867 + pos_state_mask = (1 << pb) - 1;
1868 + literal_pos_mask = (1 << lp) - 1;
1869 +
1870 + ENDIAN_CONVERT(header.dict_size);
1871 + ENDIAN_CONVERT(header.dst_size);
1872 +
1873 + if (header.dict_size == 0)
1874 + header.dict_size = 1;
1875 +
1876 + bufsize = MIN(header.dst_size, header.dict_size);
1877 + buffer = (uint8_t *) posp;
1878 + if (writebb) buffer = large_malloc(bufsize);
1879 + if(buffer == NULL)
1880 + return -1;
1881 +
1882 + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
1883 + p = large_malloc(num_probs * sizeof(*p));
1884 + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
1885 + for (i = 0; i < num_probs; i++)
1886 + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
1887 +
1888 + rc_init_code(&rc);
1889 +
1890 + while (global_pos + buffer_pos < header.dst_size) {
1891 + int pos_state = (buffer_pos + global_pos) & pos_state_mask;
1892 +
1893 + prob =
1894 + p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
1895 + if (!rc_is_bit_1(&rc, prob)) {
1896 + mi = 1;
1897 + prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE
1898 + * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
1899 + + (previous_byte >> (8 - lc)))));
1900 +
1901 + if (state >= LZMA_NUM_LIT_STATES) {
1902 + int match_byte;
1903 +
1904 + pos = buffer_pos - rep0;
1905 +
1906 + while (pos >= header.dict_size)
1907 + pos += header.dict_size;
1908 + if(pos >= bufsize) {
1909 + goto fail;
1910 + }
1911 +
1912 + match_byte = buffer[pos];
1913 + do {
1914 + int bit;
1915 +
1916 + match_byte <<= 1;
1917 + bit = match_byte & 0x100;
1918 + prob_lit = prob + 0x100 + bit + mi;
1919 + bit ^= (rc_get_bit(&rc, prob_lit, &mi) << 8);
1920 + if (bit)
1921 + break;
1922 + } while (mi < 0x100);
1923 + }
1924 + while (mi < 0x100) {
1925 + prob_lit = prob + mi;
1926 + rc_get_bit(&rc, prob_lit, &mi);
1927 + }
1928 + state -= 3;
1929 + if (state < 4 - 3)
1930 + state = 0;
1931 + if (state >= 10-3)
1932 + state -= 6-3;
1933 + previous_byte = (uint8_t) mi;
1934 + goto store_previous_byte;
1935 + } else {
1936 + int offset;
1937 + uint16_t *prob_len;
1938 +
1939 + prob = p + LZMA_IS_REP + state;
1940 + if (!rc_is_bit_1(&rc, prob)) {
1941 + rep3 = rep2;
1942 + rep2 = rep1;
1943 + rep1 = rep0;
1944 + state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
1945 + prob = p + LZMA_LEN_CODER;
1946 + } else {
1947 + prob += LZMA_IS_REP_G0 - LZMA_IS_REP;
1948 + if (!rc_is_bit_1(&rc, prob)) {
1949 + prob = (p + LZMA_IS_REP_0_LONG
1950 + + (state << LZMA_NUM_POS_BITS_MAX)
1951 + + pos_state);
1952 + if (!rc_is_bit_1(&rc, prob)) {
1953 +
1954 + state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
1955 + pos = buffer_pos - rep0;
1956 +
1957 + while (pos >= header.dict_size)
1958 + pos += header.dict_size;
1959 + if(pos >= bufsize) {
1960 + goto fail;
1961 + }
1962 +
1963 + previous_byte = buffer[pos];
1964 + store_previous_byte:
1965 + if (!rc.buffer_size) goto eof;
1966 + buffer[buffer_pos++] = previous_byte;
1967 + if (writebb && buffer_pos == header.dict_size) {
1968 + buffer_pos = 0;
1969 + global_pos += header.dict_size;
1970 + writebb((char*)buffer, header.dict_size);
1971 + }
1972 + continue;
1973 + }
1974 + } else {
1975 + uint32_t distance;
1976 +
1977 + prob += LZMA_IS_REP_G1 - LZMA_IS_REP_G0;
1978 + distance = rep1;
1979 + if (rc_is_bit_1(&rc, prob)) {
1980 + prob += LZMA_IS_REP_G2 - LZMA_IS_REP_G1;
1981 + distance = rep2;
1982 + if (rc_is_bit_1(&rc, prob)) {
1983 + distance = rep3;
1984 + rep3 = rep2;
1985 + }
1986 + rep2 = rep1;
1987 + }
1988 + rep1 = rep0;
1989 + rep0 = distance;
1990 + }
1991 + state = state < LZMA_NUM_LIT_STATES ? 8 : 11;
1992 + prob = p + LZMA_REP_LEN_CODER;
1993 + }
1994 +
1995 + prob_len = prob + LZMA_LEN_CHOICE;
1996 + if (!rc_is_bit_1(&rc, prob_len)) {
1997 + prob_len += LZMA_LEN_LOW - LZMA_LEN_CHOICE
1998 + + (pos_state << LZMA_LEN_NUM_LOW_BITS);
1999 + offset = 0;
2000 + num_bits = LZMA_LEN_NUM_LOW_BITS;
2001 + } else {
2002 + prob_len += LZMA_LEN_CHOICE_2 - LZMA_LEN_CHOICE;
2003 + if (!rc_is_bit_1(&rc, prob_len)) {
2004 + prob_len += LZMA_LEN_MID - LZMA_LEN_CHOICE_2
2005 + + (pos_state << LZMA_LEN_NUM_MID_BITS);
2006 + offset = 1 << LZMA_LEN_NUM_LOW_BITS;
2007 + num_bits = LZMA_LEN_NUM_MID_BITS;
2008 + } else {
2009 + prob_len += LZMA_LEN_HIGH - LZMA_LEN_CHOICE_2;
2010 + offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
2011 + + (1 << LZMA_LEN_NUM_MID_BITS));
2012 + num_bits = LZMA_LEN_NUM_HIGH_BITS;
2013 + }
2014 + }
2015 + rc_bit_tree_decode(&rc, prob_len, num_bits, &len);
2016 + len += offset;
2017 +
2018 + if (state < 4) {
2019 + int pos_slot;
2020 +
2021 + state += LZMA_NUM_LIT_STATES;
2022 + prob = p + LZMA_POS_SLOT +
2023 + ((len <
2024 + LZMA_NUM_LEN_TO_POS_STATES ? len :
2025 + LZMA_NUM_LEN_TO_POS_STATES - 1)
2026 + << LZMA_NUM_POS_SLOT_BITS);
2027 + rc_bit_tree_decode(&rc, prob, LZMA_NUM_POS_SLOT_BITS,
2028 + &pos_slot);
2029 + rep0 = pos_slot;
2030 + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
2031 + num_bits = (pos_slot >> 1) - 1;
2032 + rep0 = 2 | (pos_slot & 1);
2033 + prob = p + LZMA_ALIGN;
2034 + if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
2035 + rep0 <<= num_bits;
2036 + prob += LZMA_SPEC_POS - LZMA_ALIGN - 1 + rep0 - pos_slot;
2037 + } else {
2038 + num_bits -= LZMA_NUM_ALIGN_BITS;
2039 + while (num_bits--)
2040 + rep0 = (rep0 << 1) | rc_direct_bit(&rc);
2041 + rep0 <<= LZMA_NUM_ALIGN_BITS;
2042 + num_bits = LZMA_NUM_ALIGN_BITS;
2043 + }
2044 + i = 1;
2045 + mi = 1;
2046 + while (num_bits--) {
2047 + if (rc_get_bit(&rc, prob + mi, &mi))
2048 + rep0 |= i;
2049 + i <<= 1;
2050 + }
2051 + }
2052 + if (++rep0 == 0)
2053 + break;
2054 + }
2055 +
2056 + len += LZMA_MATCH_MIN_LEN;
2057 +
2058 + if (!rc.buffer_size) goto eof;
2059 + do {
2060 + pos = buffer_pos - rep0;
2061 +
2062 + while (pos >= header.dict_size)
2063 + pos += header.dict_size;
2064 + if(pos >= bufsize) {
2065 + goto fail;
2066 + }
2067 +
2068 + previous_byte = buffer[pos];
2069 + buffer[buffer_pos++] = previous_byte;
2070 + if (writebb && buffer_pos == header.dict_size) {
2071 + buffer_pos = 0;
2072 + global_pos += header.dict_size;
2073 + writebb((char*)buffer, header.dict_size);
2074 + }
2075 + len--;
2076 + } while (len != 0 && (global_pos + buffer_pos) < header.dst_size);
2077 + }
2078 + }
2079 + eof:
2080 + if (writebb) {
2081 + writebb((char*)buffer, buffer_pos);
2082 + if(posp) {
2083 + *posp = rc.ptr-rc.buffer;
2084 + }
2085 + large_free(buffer);
2086 + }
2087 + large_free(p);
2088 + return 0;
2089 + fail:
2090 + if (writebb) large_free(buffer);
2091 + large_free(p);
2092 + return -1;
2093 +}
2095 --- linux-2.6.29.3/lib/unlzma_syms.c
2096 +++ linux-2.6.29.3/lib/unlzma_syms.c
2097 @@ -0,0 +1,14 @@
2098 +/*
2099 + * linux/lib/unlzma_syms.c
2100 + *
2101 + * Exported symbols for the unlzma functionality.
2102 + *
2103 + */
2104 +
2105 +#include <linux/module.h>
2106 +#include <linux/init.h>
2107 +
2108 +#include <linux/decompress_unlzma.h>
2109 +
2110 +EXPORT_SYMBOL(unlzma);
2111 +MODULE_LICENSE("GPL");
2113 --- linux-2.6.29.3/scripts/Makefile.lib
2114 +++ linux-2.6.29.3/scripts/Makefile.lib
2115 @@ -185,4 +185,17 @@
2116 quiet_cmd_gzip = GZIP $@
2117 cmd_gzip = gzip -f -9 < $< > $@
2119 +# Append size
2120 +size_append=perl -e 'print(pack("i",(stat($$ARGV[0]))[7]));'
2122 +# Bzip2
2123 +# ---------------------------------------------------------------------------
2124 +
2125 +quiet_cmd_bzip2 = BZIP2 $@
2126 +cmd_bzip2 = (bzip2 -9 < $< ; $(size_append) $<) > $@
2127 +
2128 +# Lzma
2129 +# ---------------------------------------------------------------------------
2130 +
2131 +quiet_cmd_lzma = LZMA $@
2132 +cmd_lzma = (lzma e $< -so ; $(size_append) $<) >$@