wok-stable view linux/stuff/linux-lzma-2.6.25.5.u @ rev 2075

Add: xfdesktop, xfprint*, xfwm4, xfwm4-themes
author Eric Joseph-Alexandre <erjo@slitaz.org>
date Fri Jan 23 23:10:35 2009 +0100 (2009-01-23)
parents 039aaf6420b3
children a36fcdad71b1
line source
1 --- linux-2.6.25.5/arch/x86/boot/compressed/Makefile
2 +++ linux-2.6.25.5/arch/x86/boot/compressed/Makefile
3 @@ -4,7 +4,7 @@
4 # create a compressed vmlinux image from the original vmlinux
5 #
7 -targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
8 +targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
10 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
11 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
12 @@ -50,15 +50,41 @@
13 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
14 $(call if_changed,gzip)
15 endif
16 +
17 +ifdef CONFIG_RELOCATABLE
18 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
19 + $(call if_changed,bzip2)
20 +else
21 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
22 + $(call if_changed,bzip2)
23 +endif
24 +
25 +ifdef CONFIG_RELOCATABLE
26 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
27 + $(call if_changed,lzma)
28 +else
29 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
30 + $(call if_changed,lzma)
31 +endif
32 +
33 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
35 else
36 +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
37 + $(call if_changed,bzip2)
38 +
39 +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
40 + $(call if_changed,lzma)
41 +
42 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
43 $(call if_changed,gzip)
45 LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
46 endif
48 +suffix_$(CONFIG_KERNEL_GZIP) = gz
49 +suffix_$(CONFIG_KERNEL_BZIP2) = bz2
50 +suffix_$(CONFIG_KERNEL_LZMA) = lzma
52 -$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
53 +$(obj)/piggy.o: $(src)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
54 $(call if_changed,ld)
56 --- linux-2.6.25.5/arch/x86/boot/compressed/misc.c
57 +++ linux-2.6.25.5/arch/x86/boot/compressed/misc.c
58 @@ -130,9 +130,12 @@
59 * always be larger than our output buffer.
60 */
62 +#ifdef CONFIG_KERNEL_GZIP
63 static uch *inbuf; /* input buffer */
64 +#endif
65 static uch *window; /* Sliding window buffer, (and final output buffer) */
67 +#ifdef CONFIG_KERNEL_GZIP
68 static unsigned insize; /* valid bytes in inbuf */
69 static unsigned inptr; /* index of next byte to be processed in inbuf */
70 static unsigned outcnt; /* bytes in output buffer */
71 @@ -167,9 +170,12 @@
73 static int fill_inbuf(void);
74 static void flush_window(void);
75 +#endif
76 static void error(char *m);
77 +#ifdef CONFIG_KERNEL_GZIP
78 static void gzip_mark(void **);
79 static void gzip_release(void **);
80 +#endif
82 /*
83 * This is set up by the setup-routine at boot-time
84 @@ -185,12 +191,12 @@
85 extern unsigned char input_data[];
86 extern int input_len;
88 -static long bytes_out = 0;
89 -
90 static void *malloc(int size);
91 static void free(void *where);
93 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
94 static void *memset(void *s, int c, unsigned n);
95 +#endif
96 static void *memcpy(void *dest, const void *src, unsigned n);
98 static void putstr(const char *);
99 @@ -204,11 +210,15 @@
100 static memptr free_mem_ptr;
101 static memptr free_mem_end_ptr;
103 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
104 +#define HEAP_SIZE 0x400000
105 +#else
106 #ifdef CONFIG_X86_64
107 #define HEAP_SIZE 0x7000
108 #else
109 #define HEAP_SIZE 0x4000
110 #endif
111 +#endif
113 static char *vidmem = (char *)0xb8000;
114 static int vidport;
115 @@ -218,7 +228,29 @@
116 void *xquad_portio;
117 #endif
119 +#if (defined CONFIG_KERNEL_BZIP2 || defined CONFIG_KERNEL_LZMA)
120 +
121 +#define large_malloc malloc
122 +#define large_free free
123 +
124 +#ifdef current
125 +#undef current
126 +#endif
127 +
128 +#define INCLUDED
129 +#endif
130 +
131 +#ifdef CONFIG_KERNEL_GZIP
132 #include "../../../../lib/inflate.c"
133 +#endif
134 +
135 +#ifdef CONFIG_KERNEL_BZIP2
136 +#include "../../../../lib/decompress_bunzip2.c"
137 +#endif
138 +
139 +#ifdef CONFIG_KERNEL_LZMA
140 +#include "../../../../lib/decompress_unlzma.c"
141 +#endif
143 static void *malloc(int size)
144 {
145 @@ -242,6 +274,7 @@
146 { /* Don't care */
147 }
149 +#ifdef CONFIG_KERNEL_GZIP
150 static void gzip_mark(void **ptr)
151 {
152 *ptr = (void *) free_mem_ptr;
153 @@ -251,6 +284,7 @@
154 {
155 free_mem_ptr = (memptr) *ptr;
156 }
157 +#endif
159 static void scroll(void)
160 {
161 @@ -303,6 +337,7 @@
162 outb(0xff & (pos >> 1), vidport+1);
163 }
165 +#if (defined CONFIG_KERNEL_GZIP || defined CONFIG_KERNEL_BZIP2)
166 static void* memset(void* s, int c, unsigned n)
167 {
168 int i;
169 @@ -311,6 +346,7 @@
170 for (i=0;i<n;i++) ss[i] = c;
171 return s;
172 }
173 +#endif
175 static void* memcpy(void* dest, const void* src, unsigned n)
176 {
177 @@ -322,6 +358,26 @@
178 return dest;
179 }
181 +#ifdef CONFIG_KERNEL_BZIP2
182 +/* ===========================================================================
183 + * Write the output window window[0..outcnt-1].
184 + * (Used for the decompressed data only.)
185 + */
186 +static int compr_flush(char *data, unsigned int len)
187 +{
188 + unsigned n;
189 + uch *out;
190 +
191 + out = window;
192 + for (n = 0; n < len; n++) {
193 + *out++ = *data++;
194 + }
195 + window += (ulg)len;
196 + return len;
197 +}
198 +
199 +#endif
200 +#ifdef CONFIG_KERNEL_GZIP
201 /* ===========================================================================
202 * Fill the input buffer. This is called only when the buffer is empty
203 * and at least one byte is really needed.
204 @@ -333,7 +389,7 @@
205 }
207 /* ===========================================================================
208 - * Write the output window window[0..outcnt-1] and update crc and bytes_out.
209 + * Write the output window window[0..outcnt-1] and update crc.
210 * (Used for the decompressed data only.)
211 */
212 static void flush_window(void)
213 @@ -351,9 +407,9 @@
214 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
215 }
216 crc = c;
217 - bytes_out += (ulg)outcnt;
218 outcnt = 0;
219 }
220 +#endif
222 static void error(char *x)
223 {
224 @@ -385,9 +441,11 @@
225 window = output; /* Output buffer (Normally at 1M) */
226 free_mem_ptr = heap; /* Heap */
227 free_mem_end_ptr = heap + HEAP_SIZE;
228 +#ifdef CONFIG_KERNEL_GZIP
229 inbuf = input_data; /* Input buffer */
230 insize = input_len;
231 inptr = 0;
232 +#endif
234 #ifdef CONFIG_X86_64
235 if ((ulg)output & (__KERNEL_ALIGN - 1))
236 @@ -405,9 +463,21 @@
237 #endif
238 #endif
240 +#ifdef CONFIG_KERNEL_BZIP2
241 + putstr("\nBunzipping Linux... ");
242 + bunzip2(input_data, input_len-4, NULL, compr_flush, NULL);
243 +#endif
244 +
245 +#ifdef CONFIG_KERNEL_LZMA
246 + putstr("\nUnlzmaing Linux... ");
247 + unlzma(input_data, input_len-4, NULL, NULL, window);
248 +#endif
249 +
250 +#ifdef CONFIG_KERNEL_GZIP
251 makecrc();
252 putstr("\nDecompressing Linux... ");
253 gunzip();
254 +#endif
255 putstr("done.\nBooting the kernel.\n");
256 return;
257 }
259 --- linux-2.6.25.5/drivers/block/Kconfig
260 +++ linux-2.6.25.5/drivers/block/Kconfig
261 @@ -357,6 +357,30 @@
262 will prevent RAM block device backing store memory from being
263 allocated from highmem (only a problem for highmem systems).
265 +config RD_BZIP2
266 + bool "Initial ramdisk compressed using bzip2"
267 + default n
268 + depends on BLK_DEV_INITRD=y
269 + help
270 + Support loading of a bzip2 encoded initial ramdisk or cpio buffer
271 + If unsure, say N.
272 +
273 +config RD_LZMA
274 + bool "Initial ramdisk compressed using lzma"
275 + default n
276 + depends on BLK_DEV_INITRD=y
277 + help
278 + Support loading of a lzma encoded initial ramdisk or cpio buffer
279 + If unsure, say N.
280 +
281 +config RD_GZIP
282 + bool "Initial ramdisk compressed using gzip"
283 + default y
284 + depends on BLK_DEV_INITRD=y
285 + help
286 + Support loading of a gzip encoded initial ramdisk or cpio buffer.
287 + If unsure, say Y.
288 +
289 config CDROM_PKTCDVD
290 tristate "Packet writing on CD/DVD media"
291 depends on !UML
293 --- linux-2.6.25.5/include/linux/decompress_bunzip2.h
294 +++ linux-2.6.25.5/include/linux/decompress_bunzip2.h
295 @@ -0,0 +1,16 @@
296 +#ifndef DECOMPRESS_BUNZIP2_H
297 +#define DECOMPRESS_BUNZIP2_H
298 +
299 +/* Other housekeeping constants */
300 +#define BZIP2_IOBUF_SIZE 4096
301 +
302 +#ifndef STATIC
303 +#define STATIC /**/
304 +#endif
305 +
306 +STATIC int bunzip2(char *inbuf, int len,
307 + int(*fill)(void*,unsigned int),
308 + int(*writebb)(char*,unsigned int),
309 + int *pos);
310 +
311 +#endif
313 --- linux-2.6.25.5/include/linux/decompress_generic.h
314 +++ linux-2.6.25.5/include/linux/decompress_generic.h
315 @@ -0,0 +1,28 @@
316 +#ifndef DECOMPRESS_GENERIC_H
317 +#define DECOMPRESS_GENERIC_H
318 +
319 +/* Minimal chunksize to be read.
320 + * Bzip2 prefers at least 4096
321 + * Lzma prefers 0x10000 */
322 +#define COMPR_IOBUF_SIZE 4096
323 +
324 +typedef int (*uncompress_fn) (char *inbuf, int len,
325 + int(*fill)(char*,unsigned int),
326 + int(*writebb)(char*,unsigned int),
327 + int *posp);
328 +
329 +/* inbuf - input buffer
330 + * len - len of pre-read data in inbuf
331 + * fill - function to fill inbuf if empty
332 + * writebb - function to write out outbug
333 + * posp - if non-null, input position (number of bytes read) will be
334 + * returned here
335 + *
336 + * If len != 0, the inbuf is initialized (with as much data), and fill
337 + * should not be called
338 + * If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
339 + * fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
340 + */
341 +
342 +
343 +#endif
345 --- linux-2.6.25.5/include/linux/decompress_unlzma.h
346 +++ linux-2.6.25.5/include/linux/decompress_unlzma.h
347 @@ -0,0 +1,15 @@
348 +#ifndef DECOMPRESS_UNLZMA_H
349 +#define DECOMPRESS_UNLZMA_H
350 +
351 +#define LZMA_IOBUF_SIZE 0x10000
352 +
353 +#ifndef STATIC
354 +#define STATIC /**/
355 +#endif
356 +
357 +STATIC int unlzma(char *inbuf, int len,
358 + int(*fill)(void*,unsigned int),
359 + int(*writebb)(char*,unsigned int),
360 + int *pos);
361 +
362 +#endif
364 --- linux-2.6.25.5/init/do_mounts_rd.c
365 +++ linux-2.6.25.5/init/do_mounts_rd.c
366 @@ -8,6 +8,16 @@
367 #include <linux/initrd.h>
368 #include <linux/string.h>
370 +#ifdef CONFIG_RD_BZIP2
371 +#include <linux/decompress_bunzip2.h>
372 +#undef STATIC
373 +#endif
374 +
375 +#ifdef CONFIG_RD_LZMA
376 +#include <linux/decompress_unlzma.h>
377 +#undef STATIC
378 +#endif
379 +
380 #include "do_mounts.h"
382 #define BUILD_CRAMDISK
383 @@ -30,7 +40,15 @@ static int __init ramdisk_start_setup(ch
384 }
385 __setup("ramdisk_start=", ramdisk_start_setup);
387 +#ifdef CONFIG_RD_GZIP
388 static int __init crd_load(int in_fd, int out_fd);
389 +#endif
390 +#ifdef CONFIG_RD_BZIP2
391 +static int __init crd_load_bzip2(int in_fd, int out_fd);
392 +#endif
393 +#ifdef CONFIG_RD_LZMA
394 +static int __init crd_load_lzma(int in_fd, int out_fd);
395 +#endif
397 /*
398 * This routine tries to find a RAM disk image to load, and returns the
399 @@ -46,7 +64,7 @@ static int __init crd_load(int in_fd, in
400 * gzip
401 */
402 static int __init
403 -identify_ramdisk_image(int fd, int start_block)
404 +identify_ramdisk_image(int fd, int start_block, int *ztype)
405 {
406 const int size = 512;
407 struct minix_super_block *minixsb;
408 @@ -72,6 +90,7 @@ identify_ramdisk_image(int fd, int start
409 sys_lseek(fd, start_block * BLOCK_SIZE, 0);
410 sys_read(fd, buf, size);
412 +#ifdef CONFIG_RD_GZIP
413 /*
414 * If it matches the gzip magic numbers, return -1
415 */
416 @@ -79,9 +98,40 @@ identify_ramdisk_image(int fd, int start
417 printk(KERN_NOTICE
418 "RAMDISK: Compressed image found at block %d\n",
419 start_block);
420 + *ztype = 0;
421 + nblocks = 0;
422 + goto done;
423 + }
424 +#endif
425 +
426 +#ifdef CONFIG_RD_BZIP2
427 + /*
428 + * If it matches the bzip magic numbers, return -1
429 + */
430 + if (buf[0] == 0x42 && (buf[1] == 0x5a)) {
431 + printk(KERN_NOTICE
432 + "RAMDISK: Bzipped image found at block %d\n",
433 + start_block);
434 + *ztype = 1;
435 + nblocks = 0;
436 + goto done;
437 + }
438 +#endif
439 +
440 +#ifdef CONFIG_RD_LZMA
441 + /*
442 + * If it matches the bzip magic numbers, return -1
443 + */
444 + if (buf[0] == 0x5d && (buf[1] == 0x00)) {
445 + printk(KERN_NOTICE
446 + "RAMDISK: Lzma image found at block %d\n",
447 + start_block);
448 + *ztype = 2;
449 nblocks = 0;
450 goto done;
451 }
452 +#endif
453 +
455 /* romfs is at block zero too */
456 if (romfsb->word0 == ROMSB_WORD0 &&
457 @@ -145,6 +195,7 @@ int __init rd_load_image(char *from)
458 int nblocks, i, disk;
459 char *buf = NULL;
460 unsigned short rotate = 0;
461 + int ztype=-1;
462 #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
463 char rotator[4] = { '|' , '/' , '-' , '\\' };
464 #endif
465 @@ -157,14 +208,38 @@ int __init rd_load_image(char *from)
466 if (in_fd < 0)
467 goto noclose_input;
469 - nblocks = identify_ramdisk_image(in_fd, rd_image_start);
470 + nblocks = identify_ramdisk_image(in_fd, rd_image_start, &ztype);
471 if (nblocks < 0)
472 goto done;
474 if (nblocks == 0) {
475 #ifdef BUILD_CRAMDISK
476 - if (crd_load(in_fd, out_fd) == 0)
477 - goto successful_load;
478 + switch(ztype) {
479 +
480 +#ifdef CONFIG_RD_GZIP
481 + case 0:
482 + if (crd_load(in_fd, out_fd) == 0)
483 + goto successful_load;
484 + break;
485 +#endif
486 +
487 +#ifdef CONFIG_RD_BZIP2
488 + case 1:
489 + if (crd_load_bzip2(in_fd, out_fd) == 0)
490 + goto successful_load;
491 + break;
492 +#endif
493 +
494 +#ifdef CONFIG_RD_LZMA
495 + case 2:
496 + if (crd_load_lzma(in_fd, out_fd) == 0)
497 + goto successful_load;
498 + break;
499 +#endif
500 +
501 + default:
502 + break;
503 + }
504 #else
505 printk(KERN_NOTICE
506 "RAMDISK: Kernel does not support compressed "
507 @@ -269,6 +344,7 @@ int __init rd_load_disk(int n)
509 #ifdef BUILD_CRAMDISK
511 +#ifdef CONFIG_RD_GZIP
512 /*
513 * gzip declarations
514 */
515 @@ -296,8 +372,11 @@ static unsigned outcnt; /* bytes in out
516 static int exit_code;
517 static int unzip_error;
518 static long bytes_out;
519 +#endif
520 +
521 static int crd_infd, crd_outfd;
523 +#ifdef CONFIG_RD_GZIP
524 #define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
526 /* Diagnostic functions (stubbed out) */
527 @@ -359,7 +438,22 @@ static int __init fill_inbuf(void)
529 return inbuf[0];
530 }
531 +#endif
532 +
533 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
534 +static int __init compr_fill(void *buf, unsigned int len)
535 +{
536 + int r = sys_read(crd_infd, buf, len);
537 + if(r < 0) {
538 + printk(KERN_ERR "RAMDISK: error while reading compressed data");
539 + } else if(r == 0) {
540 + printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
541 + }
542 + return r;
543 +}
544 +#endif
546 +#ifdef CONFIG_RD_GZIP
547 /* ===========================================================================
548 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
549 * (Used for the decompressed data only.)
550 @@ -385,7 +479,24 @@ static void __init flush_window(void)
551 bytes_out += (ulg)outcnt;
552 outcnt = 0;
553 }
554 +#endif
555 +
556 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
557 +static int __init compr_flush(void *window, unsigned int outcnt) {
558 + static int progressDots=0;
559 + int written = sys_write(crd_outfd, window, outcnt);
560 + if (written != outcnt) {
561 + printk(KERN_ERR "RAMDISK: incomplete write (%d != %d)\n",
562 + written, outcnt);
563 + }
564 + progressDots = (progressDots+1)%10;
565 + if(!progressDots)
566 + printk(".");
567 + return outcnt;
568 +}
569 +#endif
571 +#ifdef CONFIG_RD_GZIP
572 static void __init error(char *x)
573 {
574 printk(KERN_ERR "%s\n", x);
575 @@ -425,5 +536,43 @@ static int __init crd_load(int in_fd, in
576 kfree(window);
577 return result;
578 }
579 +#endif
580 +
581 +#if (defined CONFIG_RD_BZIP2 || defined CONFIG_RD_LZMA)
582 +static int __init crd_load_compr(int in_fd, int out_fd, int size,
583 + int (*deco)(char *,int,
584 + int(*fill)(void*,unsigned int),
585 + int(*flush)(void*,unsigned int),
586 + int *))
587 +{
588 + int result;
589 + char *inbuf = kmalloc(size, GFP_KERNEL);
590 + crd_infd = in_fd;
591 + crd_outfd = out_fd;
592 + if (inbuf == 0) {
593 + printk(KERN_ERR "RAMDISK: Couldn't allocate decompression buffer\n");
594 + return -1;
595 + }
596 + result=deco(inbuf, 0, compr_fill, compr_flush, NULL);
597 + kfree(inbuf);
598 + printk("\n");
599 + return result;
600 +}
601 +#endif
602 +
603 +#ifdef CONFIG_RD_BZIP2
604 +static int __init crd_load_bzip2(int in_fd, int out_fd)
605 +{
606 + return crd_load_compr(in_fd, out_fd, BZIP2_IOBUF_SIZE, bunzip2);
607 +}
608 +#endif
609 +
610 +#ifdef CONFIG_RD_LZMA
611 +static int __init crd_load_lzma(int in_fd, int out_fd)
612 +{
613 + return crd_load_compr(in_fd, out_fd, LZMA_IOBUF_SIZE, unlzma);
614 +}
615 +
616 +#endif
618 #endif /* BUILD_CRAMDISK */
620 --- linux-2.6.25.5/init/initramfs.c
621 +++ linux-2.6.25.5/init/initramfs.c
622 @@ -367,6 +367,18 @@
623 }
624 }
626 +#ifdef CONFIG_RD_BZIP2
627 +#include <linux/decompress_bunzip2.h>
628 +#undef STATIC
629 +
630 +#endif
631 +
632 +#ifdef CONFIG_RD_LZMA
633 +#include <linux/decompress_unlzma.h>
634 +#undef STATIC
635 +
636 +#endif
637 +
638 /*
639 * gzip declarations
640 */
641 @@ -441,6 +453,29 @@
642 outcnt = 0;
643 }
645 +#include <linux/initrd.h>
646 +#ifdef CONFIG_RD_LZMA
647 +#define INITRD_PAGE ((PAGE_SIZE > 1024*1024) ? PAGE_SIZE : 1024*1024)
648 +static int fill_offset, fill_total;
649 +static int fill_buffer(void *buffer, unsigned size)
650 +{
651 + int max = initrd_end - initrd_start - fill_offset;
652 + if (size < max) max = size;
653 + memcpy(buffer, (void *)(initrd_start + fill_offset), max);
654 + fill_offset += max;
655 + fill_total += max;
656 + if (fill_offset >= INITRD_PAGE) {
657 + unsigned rem = fill_offset % INITRD_PAGE;
658 + unsigned end = initrd_start + fill_offset - rem;
659 + free_initrd_mem(initrd_start, end);
660 + printk(".");
661 + initrd_start = end;
662 + fill_offset = rem;
663 + }
664 + return max;
665 +}
666 +#endif
667 +
668 static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
669 {
670 int written;
671 @@ -455,6 +490,9 @@
672 this_header = 0;
673 message = NULL;
674 while (!message && len) {
675 +#ifdef CONFIG_RD_LZMA
676 + int status;
677 +#endif
678 loff_t saved_offset = this_header;
679 if (*buf == '0' && !(this_header & 3)) {
680 state = Start;
681 @@ -477,9 +515,42 @@
682 bytes_out = 0;
683 crc = (ulg)0xffffffffL; /* shift register contents */
684 makecrc();
685 - gunzip();
686 + if(!gunzip() && message == NULL)
687 + goto ok;
688 +
689 +#ifdef CONFIG_RD_BZIP2
690 + message = NULL; /* Zero out message, or else cpio will
691 + think an error has already occured */
692 + if(!bunzip2(buf, len, NULL, flush_buffer, &inptr) < 0 &&
693 + message == NULL) {
694 + goto ok;
695 + }
696 +#endif
697 +
698 +#ifdef CONFIG_RD_LZMA
699 + message = NULL; /* Zero out message, or else cpio will
700 + think an error has already occured */
701 + status = -1;
702 + if(buf == (char *) initrd_start) {
703 + char *work_buffer = malloc(LZMA_IOBUF_SIZE);
704 + if (work_buffer) {
705 + fill_total = fill_offset = 0;
706 + fill_buffer(work_buffer, LZMA_IOBUF_SIZE);
707 + status = unlzma(work_buffer, LZMA_IOBUF_SIZE,
708 + fill_buffer, flush_buffer, NULL);
709 + inptr = fill_total;
710 + free(work_buffer);
711 + }
712 + }
713 + else status = unlzma(buf,len, NULL, flush_buffer, &inptr);
714 + if (status == 0 && message == NULL) {
715 + goto ok;
716 + }
717 +#endif
718 + ok:
719 +
720 if (state != Reset)
721 - error("junk in gzipped archive");
722 + error("junk in compressed archive");
723 this_header = saved_offset + inptr;
724 buf += inptr;
725 len -= inptr;
726 @@ -545,7 +616,7 @@
727 if (err)
728 panic(err);
729 if (initrd_start) {
730 -#ifdef CONFIG_BLK_DEV_RAM
731 +#ifdef NOT_IN_SLITAZ_CONFIG_BLK_DEV_RAM
732 int fd;
733 printk(KERN_INFO "checking if image is initramfs...");
734 err = unpack_to_rootfs((char *)initrd_start,
736 --- linux-2.6.25.5/init/Kconfig
737 +++ linux-2.6.25.5/init/Kconfig
738 @@ -100,6 +100,56 @@
740 which is done within the script "scripts/setlocalversion".)
742 +choice
743 + prompt "Kernel compression mode"
744 + default KERNEL_GZIP
745 + help
746 + The linux kernel is a kind of self-extracting executable.
747 + Several compression algorithms are available, which differ
748 + in efficiency, compression and decompression speed.
749 + Compression speed is only relevant when building a kernel.
750 + Decompression speed is relevant at each boot.
751 +
752 + If you have any problems with bzip2 or lzma compressed
753 + kernels, mail me (Alain Knaff) <alain@knaff.lu>. (An older
754 + version of this functionality (bzip2 only), for 2.4, was
755 + supplied by Christian Ludwig)
756 +
757 + High compression options are mostly useful for users, who
758 + are low on disk space (embedded systems), but for whom ram
759 + size matters less.
760 +
761 + If in doubt, select 'gzip'
762 +
763 +config KERNEL_GZIP
764 + bool "Gzip"
765 + help
766 + The old and tries gzip compression. Its compression ratio is
767 + the poorest among the 3 choices; however its speed (both
768 + compression and decompression) is the fastest.
769 +
770 +config KERNEL_BZIP2
771 + bool "Bzip2"
772 + help
773 + Its compression ratio and speed is intermediate.
774 + Decompression speed is slowest among the 3.
775 + The kernel size is about 10 per cent smaller with bzip2,
776 + in comparison to gzip.
777 + Bzip2 uses a large amount of memory. For modern kernels
778 + you will need at least 8MB RAM or more for booting.
779 +
780 +config KERNEL_LZMA
781 + bool "LZMA"
782 + help
783 + The most recent compression algorithm.
784 + Its ratio is best, decompression speed is between the other
785 + 2. Compression is slowest.
786 + The kernel size is about 33 per cent smaller with lzma,
787 + in comparison to gzip.
788 +
789 +endchoice
790 +
791 +
792 config SWAP
793 bool "Support for paging of anonymous memory (swap)"
794 depends on MMU && BLOCK
796 --- linux-2.6.25.5/lib/decompress_bunzip2.c
797 +++ linux-2.6.25.5/lib/decompress_bunzip2.c
798 @@ -0,0 +1,645 @@
799 +/* vi: set sw=4 ts=4: */
800 +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
801 +
802 + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
803 + which also acknowledges contributions by Mike Burrows, David Wheeler,
804 + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
805 + Robert Sedgewick, and Jon L. Bentley.
806 +
807 + This code is licensed under the LGPLv2:
808 + LGPL (http://www.gnu.org/copyleft/lgpl.html
809 +*/
810 +
811 +/*
812 + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org).
813 +
814 + More efficient reading of Huffman codes, a streamlined read_bunzip()
815 + function, and various other tweaks. In (limited) tests, approximately
816 + 20% faster than bzcat on x86 and about 10% faster on arm.
817 +
818 + Note that about 2/3 of the time is spent in read_unzip() reversing
819 + the Burrows-Wheeler transformation. Much of that time is delay
820 + resulting from cache misses.
821 +
822 + I would ask that anyone benefiting from this work, especially those
823 + using it in commercial products, consider making a donation to my local
824 + non-profit hospice organization in the name of the woman I loved, who
825 + passed away Feb. 12, 2003.
826 +
827 + In memory of Toni W. Hagan
828 +
829 + Hospice of Acadiana, Inc.
830 + 2600 Johnston St., Suite 200
831 + Lafayette, LA 70503-3240
832 +
833 + Phone (337) 232-1234 or 1-800-738-2226
834 + Fax (337) 232-1297
835 +
836 + http://www.hospiceacadiana.com/
837 +
838 + Manuel
839 + */
840 +
841 +/*
842 + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
843 +*/
844 +
845 +
846 +#ifndef STATIC
847 +
848 +#include <linux/kernel.h>
849 +#include <linux/fs.h>
850 +#include <linux/string.h>
851 +
852 +#ifdef TEST
853 +#include "test.h"
854 +#else
855 +#include <linux/vmalloc.h>
856 +#endif
857 +
858 +static void __init *large_malloc(size_t size)
859 +{
860 + return vmalloc(size);
861 +}
862 +
863 +static void __init large_free(void *where)
864 +{
865 + vfree(where);
866 +}
867 +
868 +#ifndef TEST
869 +static void __init *malloc(size_t size)
870 +{
871 + return kmalloc(size, GFP_KERNEL);
872 +}
873 +
874 +static void __init free(void *where)
875 +{
876 + kfree(where);
877 +}
878 +
879 +static void __init error(char *x)
880 +{
881 + printk(KERN_ERR "%s\n", x);
882 +}
883 +#endif
884 +
885 +#define STATIC /**/
886 +
887 +#endif
888 +
889 +#include <linux/decompress_bunzip2.h>
890 +
891 +
892 +/* Constants for Huffman coding */
893 +#define MAX_GROUPS 6
894 +#define GROUP_SIZE 50 /* 64 would have been more efficient */
895 +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */
896 +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */
897 +#define SYMBOL_RUNA 0
898 +#define SYMBOL_RUNB 1
899 +
900 +/* Status return values */
901 +#define RETVAL_OK 0
902 +#define RETVAL_LAST_BLOCK (-1)
903 +#define RETVAL_NOT_BZIP_DATA (-2)
904 +#define RETVAL_UNEXPECTED_INPUT_EOF (-3)
905 +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4)
906 +#define RETVAL_DATA_ERROR (-5)
907 +#define RETVAL_OUT_OF_MEMORY (-6)
908 +#define RETVAL_OBSOLETE_INPUT (-7)
909 +
910 +
911 +/* This is what we know about each Huffman coding group */
912 +struct group_data {
913 + /* We have an extra slot at the end of limit[] for a sentinal value. */
914 + int limit[MAX_HUFCODE_BITS+1],base[MAX_HUFCODE_BITS],permute[MAX_SYMBOLS];
915 + int minLen, maxLen;
916 +};
917 +
918 +/* Structure holding all the housekeeping data, including IO buffers and
919 + memory that persists between calls to bunzip */
920 +typedef struct {
921 + /* State for interrupting output loop */
922 + int writeCopies,writePos,writeRunCountdown,writeCount,writeCurrent;
923 + /* I/O tracking data (file handles, buffers, positions, etc.) */
924 + int (*fill)(void*,unsigned int);
925 + int inbufCount,inbufPos /*,outbufPos*/;
926 + unsigned char *inbuf /*,*outbuf*/;
927 + unsigned int inbufBitCount, inbufBits;
928 + /* The CRC values stored in the block header and calculated from the data */
929 + unsigned int crc32Table[256],headerCRC, totalCRC, writeCRC;
930 + /* Intermediate buffer and its size (in bytes) */
931 + unsigned int *dbuf, dbufSize;
932 + /* These things are a bit too big to go on the stack */
933 + unsigned char selectors[32768]; /* nSelectors=15 bits */
934 + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */
935 + int io_error; /* non-zero if we have IO error */
936 +} bunzip_data;
937 +
938 +
939 +/* Return the next nnn bits of input. All reads from the compressed input
940 + are done through this function. All reads are big endian */
941 +static unsigned int get_bits(bunzip_data *bd, char bits_wanted)
942 +{
943 + unsigned int bits=0;
944 +
945 + /* If we need to get more data from the byte buffer, do so. (Loop getting
946 + one byte at a time to enforce endianness and avoid unaligned access.) */
947 + while (bd->inbufBitCount<bits_wanted) {
948 + /* If we need to read more data from file into byte buffer, do so */
949 + if(bd->inbufPos==bd->inbufCount) {
950 + if(bd->io_error)
951 + return 0;
952 + if((bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE)) <= 0) {
953 + bd->io_error=RETVAL_UNEXPECTED_INPUT_EOF;
954 + return 0;
955 + }
956 + bd->inbufPos=0;
957 + }
958 + /* Avoid 32-bit overflow (dump bit buffer to top of output) */
959 + if(bd->inbufBitCount>=24) {
960 + bits=bd->inbufBits&((1<<bd->inbufBitCount)-1);
961 + bits_wanted-=bd->inbufBitCount;
962 + bits<<=bits_wanted;
963 + bd->inbufBitCount=0;
964 + }
965 + /* Grab next 8 bits of input from buffer. */
966 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
967 + bd->inbufBitCount+=8;
968 + }
969 + /* Calculate result */
970 + bd->inbufBitCount-=bits_wanted;
971 + bits|=(bd->inbufBits>>bd->inbufBitCount)&((1<<bits_wanted)-1);
972 +
973 + return bits;
974 +}
975 +
976 +/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
977 +
978 +static int get_next_block(bunzip_data *bd)
979 +{
980 + struct group_data *hufGroup=NULL;
981 + int *base=NULL;
982 + int *limit=NULL;
983 + int dbufCount,nextSym,dbufSize,groupCount,selector,
984 + i,j,k,t,runPos,symCount,symTotal,nSelectors,byteCount[256];
985 + unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
986 + unsigned int *dbuf,origPtr;
987 +
988 + dbuf=bd->dbuf;
989 + dbufSize=bd->dbufSize;
990 + selectors=bd->selectors;
991 +
992 + /* Read in header signature and CRC, then validate signature.
993 + (last block signature means CRC is for whole file, return now) */
994 + i = get_bits(bd,24);
995 + j = get_bits(bd,24);
996 + bd->headerCRC=get_bits(bd,32);
997 + if ((i == 0x177245) && (j == 0x385090)) return RETVAL_LAST_BLOCK;
998 + if ((i != 0x314159) || (j != 0x265359)) return RETVAL_NOT_BZIP_DATA;
999 + /* We can add support for blockRandomised if anybody complains. There was
1000 + some code for this in busybox 1.0.0-pre3, but nobody ever noticed that
1001 + it didn't actually work. */
1002 + if(get_bits(bd,1)) return RETVAL_OBSOLETE_INPUT;
1003 + if((origPtr=get_bits(bd,24)) > dbufSize) return RETVAL_DATA_ERROR;
1004 + /* mapping table: if some byte values are never used (encoding things
1005 + like ascii text), the compression code removes the gaps to have fewer
1006 + symbols to deal with, and writes a sparse bitfield indicating which
1007 + values were present. We make a translation table to convert the symbols
1008 + back to the corresponding bytes. */
1009 + t=get_bits(bd, 16);
1010 + symTotal=0;
1011 + for (i=0;i<16;i++) {
1012 + if(t&(1<<(15-i))) {
1013 + k=get_bits(bd,16);
1014 + for(j=0;j<16;j++)
1015 + if(k&(1<<(15-j))) symToByte[symTotal++]=(16*i)+j;
1016 + }
1017 + }
1018 + /* How many different Huffman coding groups does this block use? */
1019 + groupCount=get_bits(bd,3);
1020 + if (groupCount<2 || groupCount>MAX_GROUPS) return RETVAL_DATA_ERROR;
1021 + /* nSelectors: Every GROUP_SIZE many symbols we select a new Huffman coding
1022 + group. Read in the group selector list, which is stored as MTF encoded
1023 + bit runs. (MTF=Move To Front, as each value is used it's moved to the
1024 + start of the list.) */
1025 + if(!(nSelectors=get_bits(bd, 15))) return RETVAL_DATA_ERROR;
1026 + for(i=0; i<groupCount; i++) mtfSymbol[i] = i;
1027 + for(i=0; i<nSelectors; i++) {
1028 + /* Get next value */
1029 + for(j=0;get_bits(bd,1);j++) if (j>=groupCount) return RETVAL_DATA_ERROR;
1030 + /* Decode MTF to get the next selector */
1031 + uc = mtfSymbol[j];
1032 + for(;j;j--) mtfSymbol[j] = mtfSymbol[j-1];
1033 + mtfSymbol[0]=selectors[i]=uc;
1034 + }
1035 + /* Read the Huffman coding tables for each group, which code for symTotal
1036 + literal symbols, plus two run symbols (RUNA, RUNB) */
1037 + symCount=symTotal+2;
1038 + for (j=0; j<groupCount; j++) {
1039 + unsigned char length[MAX_SYMBOLS],temp[MAX_HUFCODE_BITS+1];
1040 + int minLen, maxLen, pp;
1041 + /* Read Huffman code lengths for each symbol. They're stored in
1042 + a way similar to mtf; record a starting value for the first symbol,
1043 + and an offset from the previous value for everys symbol after that.
1044 + (Subtracting 1 before the loop and then adding it back at the end is
1045 + an optimization that makes the test inside the loop simpler: symbol
1046 + length 0 becomes negative, so an unsigned inequality catches it.) */
1047 + t=get_bits(bd, 5)-1;
1048 + for (i = 0; i < symCount; i++) {
1049 + for(;;) {
1050 + if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
1051 + return RETVAL_DATA_ERROR;
1052 + /* If first bit is 0, stop. Else second bit indicates whether
1053 + to increment or decrement the value. Optimization: grab 2
1054 + bits and unget the second if the first was 0. */
1055 + k = get_bits(bd,2);
1056 + if (k < 2) {
1057 + bd->inbufBitCount++;
1058 + break;
1059 + }
1060 + /* Add one if second bit 1, else subtract 1. Avoids if/else */
1061 + t+=(((k+1)&2)-1);
1062 + }
1063 + /* Correct for the initial -1, to get the final symbol length */
1064 + length[i]=t+1;
1065 + }
1066 + /* Find largest and smallest lengths in this group */
1067 + minLen=maxLen=length[0];
1068 + for(i = 1; i < symCount; i++) {
1069 + if(length[i] > maxLen) maxLen = length[i];
1070 + else if(length[i] < minLen) minLen = length[i];
1071 + }
1072 + /* Calculate permute[], base[], and limit[] tables from length[].
1073 + *
1074 + * permute[] is the lookup table for converting Huffman coded symbols
1075 + * into decoded symbols. base[] is the amount to subtract from the
1076 + * value of a Huffman symbol of a given length when using permute[].
1077 + *
1078 + * limit[] indicates the largest numerical value a symbol with a given
1079 + * number of bits can have. This is how the Huffman codes can vary in
1080 + * length: each code with a value>limit[length] needs another bit.
1081 + */
1082 + hufGroup=bd->groups+j;
1083 + hufGroup->minLen = minLen;
1084 + hufGroup->maxLen = maxLen;
1085 + /* Note that minLen can't be smaller than 1, so we adjust the base
1086 + and limit array pointers so we're not always wasting the first
1087 + entry. We do this again when using them (during symbol decoding).*/
1088 + base=hufGroup->base-1;
1089 + limit=hufGroup->limit-1;
1090 + /* Calculate permute[]. Concurently, initialize temp[] and limit[]. */
1091 + pp=0;
1092 + for(i=minLen;i<=maxLen;i++) {
1093 + temp[i]=limit[i]=0;
1094 + for(t=0;t<symCount;t++)
1095 + if(length[t]==i) hufGroup->permute[pp++] = t;
1096 + }
1097 + /* Count symbols coded for at each bit length */
1098 + for (i=0;i<symCount;i++) temp[length[i]]++;
1099 + /* Calculate limit[] (the largest symbol-coding value at each bit
1100 + * length, which is (previous limit<<1)+symbols at this level), and
1101 + * base[] (number of symbols to ignore at each bit length, which is
1102 + * limit minus the cumulative count of symbols coded for already). */
1103 + pp=t=0;
1104 + for (i=minLen; i<maxLen; i++) {
1105 + pp+=temp[i];
1106 + /* We read the largest possible symbol size and then unget bits
1107 + after determining how many we need, and those extra bits could
1108 + be set to anything. (They're noise from future symbols.) At
1109 + each level we're really only interested in the first few bits,
1110 + so here we set all the trailing to-be-ignored bits to 1 so they
1111 + don't affect the value>limit[length] comparison. */
1112 + limit[i]= (pp << (maxLen - i)) - 1;
1113 + pp<<=1;
1114 + base[i+1]=pp-(t+=temp[i]);
1115 + }
1116 + limit[maxLen+1] = INT_MAX; /* Sentinal value for reading next sym. */
1117 + limit[maxLen]=pp+temp[maxLen]-1;
1118 + base[minLen]=0;
1119 + }
1120 + /* We've finished reading and digesting the block header. Now read this
1121 + block's Huffman coded symbols from the file and undo the Huffman coding
1122 + and run length encoding, saving the result into dbuf[dbufCount++]=uc */
1124 + /* Initialize symbol occurrence counters and symbol Move To Front table */
1125 + for(i=0;i<256;i++) {
1126 + byteCount[i] = 0;
1127 + mtfSymbol[i]=(unsigned char)i;
1128 + }
1129 + /* Loop through compressed symbols. */
1130 + runPos=dbufCount=symCount=selector=0;
1131 + for(;;) {
1132 + /* Determine which Huffman coding group to use. */
1133 + if(!(symCount--)) {
1134 + symCount=GROUP_SIZE-1;
1135 + if(selector>=nSelectors) return RETVAL_DATA_ERROR;
1136 + hufGroup=bd->groups+selectors[selector++];
1137 + base=hufGroup->base-1;
1138 + limit=hufGroup->limit-1;
1139 + }
1140 + /* Read next Huffman-coded symbol. */
1141 + /* Note: It is far cheaper to read maxLen bits and back up than it is
1142 + to read minLen bits and then an additional bit at a time, testing
1143 + as we go. Because there is a trailing last block (with file CRC),
1144 + there is no danger of the overread causing an unexpected EOF for a
1145 + valid compressed file. As a further optimization, we do the read
1146 + inline (falling back to a call to get_bits if the buffer runs
1147 + dry). The following (up to got_huff_bits:) is equivalent to
1148 + j=get_bits(bd,hufGroup->maxLen);
1149 + */
1150 + while (bd->inbufBitCount<hufGroup->maxLen) {
1151 + if(bd->inbufPos==bd->inbufCount) {
1152 + j = get_bits(bd,hufGroup->maxLen);
1153 + goto got_huff_bits;
1154 + }
1155 + bd->inbufBits=(bd->inbufBits<<8)|bd->inbuf[bd->inbufPos++];
1156 + bd->inbufBitCount+=8;
1157 + };
1158 + bd->inbufBitCount-=hufGroup->maxLen;
1159 + j = (bd->inbufBits>>bd->inbufBitCount)&((1<<hufGroup->maxLen)-1);
1160 +got_huff_bits:
1161 + /* Figure how how many bits are in next symbol and unget extras */
1162 + i=hufGroup->minLen;
1163 + while(j>limit[i]) ++i;
1164 + bd->inbufBitCount += (hufGroup->maxLen - i);
1165 + /* Huffman decode value to get nextSym (with bounds checking) */
1166 + if ((i > hufGroup->maxLen)
1167 + || (((unsigned)(j=(j>>(hufGroup->maxLen-i))-base[i]))
1168 + >= MAX_SYMBOLS))
1169 + return RETVAL_DATA_ERROR;
1170 + nextSym = hufGroup->permute[j];
1171 + /* We have now decoded the symbol, which indicates either a new literal
1172 + byte, or a repeated run of the most recent literal byte. First,
1173 + check if nextSym indicates a repeated run, and if so loop collecting
1174 + how many times to repeat the last literal. */
1175 + if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
1176 + /* If this is the start of a new run, zero out counter */
1177 + if(!runPos) {
1178 + runPos = 1;
1179 + t = 0;
1180 + }
1181 + /* Neat trick that saves 1 symbol: instead of or-ing 0 or 1 at
1182 + each bit position, add 1 or 2 instead. For example,
1183 + 1011 is 1<<0 + 1<<1 + 2<<2. 1010 is 2<<0 + 2<<1 + 1<<2.
1184 + You can make any bit pattern that way using 1 less symbol than
1185 + the basic or 0/1 method (except all bits 0, which would use no
1186 + symbols, but a run of length 0 doesn't mean anything in this
1187 + context). Thus space is saved. */
1188 + t += (runPos << nextSym); /* +runPos if RUNA; +2*runPos if RUNB */
1189 + runPos <<= 1;
1190 + continue;
1191 + }
1192 + /* When we hit the first non-run symbol after a run, we now know
1193 + how many times to repeat the last literal, so append that many
1194 + copies to our buffer of decoded symbols (dbuf) now. (The last
1195 + literal used is the one at the head of the mtfSymbol array.) */
1196 + if(runPos) {
1197 + runPos=0;
1198 + if(dbufCount+t>=dbufSize) return RETVAL_DATA_ERROR;
1200 + uc = symToByte[mtfSymbol[0]];
1201 + byteCount[uc] += t;
1202 + while(t--) dbuf[dbufCount++]=uc;
1203 + }
1204 + /* Is this the terminating symbol? */
1205 + if(nextSym>symTotal) break;
1206 + /* At this point, nextSym indicates a new literal character. Subtract
1207 + one to get the position in the MTF array at which this literal is
1208 + currently to be found. (Note that the result can't be -1 or 0,
1209 + because 0 and 1 are RUNA and RUNB. But another instance of the
1210 + first symbol in the mtf array, position 0, would have been handled
1211 + as part of a run above. Therefore 1 unused mtf position minus
1212 + 2 non-literal nextSym values equals -1.) */
1213 + if(dbufCount>=dbufSize) return RETVAL_DATA_ERROR;
1214 + i = nextSym - 1;
1215 + uc = mtfSymbol[i];
1216 + /* Adjust the MTF array. Since we typically expect to move only a
1217 + * small number of symbols, and are bound by 256 in any case, using
1218 + * memmove here would typically be bigger and slower due to function
1219 + * call overhead and other assorted setup costs. */
1220 + do {
1221 + mtfSymbol[i] = mtfSymbol[i-1];
1222 + } while (--i);
1223 + mtfSymbol[0] = uc;
1224 + uc=symToByte[uc];
1225 + /* We have our literal byte. Save it into dbuf. */
1226 + byteCount[uc]++;
1227 + dbuf[dbufCount++] = (unsigned int)uc;
1228 + }
1229 + /* At this point, we've read all the Huffman-coded symbols (and repeated
1230 + runs) for this block from the input stream, and decoded them into the
1231 + intermediate buffer. There are dbufCount many decoded bytes in dbuf[].
1232 + Now undo the Burrows-Wheeler transform on dbuf.
1233 + See http://dogma.net/markn/articles/bwt/bwt.htm
1234 + */
1235 + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
1236 + j=0;
1237 + for(i=0;i<256;i++) {
1238 + k=j+byteCount[i];
1239 + byteCount[i] = j;
1240 + j=k;
1241 + }
1242 + /* Figure out what order dbuf would be in if we sorted it. */
1243 + for (i=0;i<dbufCount;i++) {
1244 + uc=(unsigned char)(dbuf[i] & 0xff);
1245 + dbuf[byteCount[uc]] |= (i << 8);
1246 + byteCount[uc]++;
1247 + }
1248 + /* Decode first byte by hand to initialize "previous" byte. Note that it
1249 + doesn't get output, and if the first three characters are identical
1250 + it doesn't qualify as a run (hence writeRunCountdown=5). */
1251 + if(dbufCount) {
1252 + if(origPtr>=dbufCount) return RETVAL_DATA_ERROR;
1253 + bd->writePos=dbuf[origPtr];
1254 + bd->writeCurrent=(unsigned char)(bd->writePos&0xff);
1255 + bd->writePos>>=8;
1256 + bd->writeRunCountdown=5;
1257 + }
1258 + bd->writeCount=dbufCount;
1260 + return RETVAL_OK;
1261 +}
1263 +/* Undo burrows-wheeler transform on intermediate buffer to produce output.
1264 + If start_bunzip was initialized with out_fd=-1, then up to len bytes of
1265 + data are written to outbuf. Return value is number of bytes written or
1266 + error (all errors are negative numbers). If out_fd!=-1, outbuf and len
1267 + are ignored, data is written to out_fd and return is RETVAL_OK or error.
1268 +*/
1270 +static int read_bunzip(bunzip_data *bd, char *outbuf, int len)
1271 +{
1272 + const unsigned int *dbuf;
1273 + int pos,xcurrent,previous,gotcount;
1275 + /* If last read was short due to end of file, return last block now */
1276 + if(bd->writeCount<0) return bd->writeCount;
1278 + gotcount = 0;
1279 + dbuf=bd->dbuf;
1280 + pos=bd->writePos;
1281 + xcurrent=bd->writeCurrent;
1283 + /* We will always have pending decoded data to write into the output
1284 + buffer unless this is the very first call (in which case we haven't
1285 + Huffman-decoded a block into the intermediate buffer yet). */
1287 + if (bd->writeCopies) {
1288 + /* Inside the loop, writeCopies means extra copies (beyond 1) */
1289 + --bd->writeCopies;
1290 + /* Loop outputting bytes */
1291 + for(;;) {
1292 + /* If the output buffer is full, snapshot state and return */
1293 + if(gotcount >= len) {
1294 + bd->writePos=pos;
1295 + bd->writeCurrent=xcurrent;
1296 + bd->writeCopies++;
1297 + return len;
1298 + }
1299 + /* Write next byte into output buffer, updating CRC */
1300 + outbuf[gotcount++] = xcurrent;
1301 + bd->writeCRC=(((bd->writeCRC)<<8)
1302 + ^bd->crc32Table[((bd->writeCRC)>>24)^xcurrent]);
1303 + /* Loop now if we're outputting multiple copies of this byte */
1304 + if (bd->writeCopies) {
1305 + --bd->writeCopies;
1306 + continue;
1307 + }
1308 +decode_next_byte:
1309 + if (!bd->writeCount--) break;
1310 + /* Follow sequence vector to undo Burrows-Wheeler transform */
1311 + previous=xcurrent;
1312 + pos=dbuf[pos];
1313 + xcurrent=pos&0xff;
1314 + pos>>=8;
1315 + /* After 3 consecutive copies of the same byte, the 4th is a repeat
1316 + count. We count down from 4 instead
1317 + * of counting up because testing for non-zero is faster */
1318 + if(--bd->writeRunCountdown) {
1319 + if(xcurrent!=previous) bd->writeRunCountdown=4;
1320 + } else {
1321 + /* We have a repeated run, this byte indicates the count */
1322 + bd->writeCopies=xcurrent;
1323 + xcurrent=previous;
1324 + bd->writeRunCountdown=5;
1325 + /* Sometimes there are just 3 bytes (run length 0) */
1326 + if(!bd->writeCopies) goto decode_next_byte;
1327 + /* Subtract the 1 copy we'd output anyway to get extras */
1328 + --bd->writeCopies;
1329 + }
1330 + }
1331 + /* Decompression of this block completed successfully */
1332 + bd->writeCRC=~bd->writeCRC;
1333 + bd->totalCRC=((bd->totalCRC<<1) | (bd->totalCRC>>31)) ^ bd->writeCRC;
1334 + /* If this block had a CRC error, force file level CRC error. */
1335 + if(bd->writeCRC!=bd->headerCRC) {
1336 + bd->totalCRC=bd->headerCRC+1;
1337 + return RETVAL_LAST_BLOCK;
1338 + }
1339 + }
1341 + /* Refill the intermediate buffer by Huffman-decoding next block of input */
1342 + /* (previous is just a convenient unused temp variable here) */
1343 + previous=get_next_block(bd);
1344 + if(previous) {
1345 + bd->writeCount=previous;
1346 + return (previous!=RETVAL_LAST_BLOCK) ? previous : gotcount;
1347 + }
1348 + bd->writeCRC=0xffffffffUL;
1349 + pos=bd->writePos;
1350 + xcurrent=bd->writeCurrent;
1351 + goto decode_next_byte;
1352 +}
1354 +static int nofill(void *buf,unsigned int len) {
1355 + return -1;
1356 +}
1358 +/* Allocate the structure, read file header. If in_fd==-1, inbuf must contain
1359 + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are
1360 + ignored, and data is read from file handle into temporary buffer. */
1361 +static int start_bunzip(bunzip_data **bdp, void *inbuf, int len,
1362 + int (*fill)(void*,unsigned int))
1363 +{
1364 + bunzip_data *bd;
1365 + unsigned int i,j,c;
1366 + const unsigned int BZh0=(((unsigned int)'B')<<24)+(((unsigned int)'Z')<<16)
1367 + +(((unsigned int)'h')<<8)+(unsigned int)'0';
1369 + /* Figure out how much data to allocate */
1370 + i=sizeof(bunzip_data);
1372 + /* Allocate bunzip_data. Most fields initialize to zero. */
1373 + bd=*bdp=malloc(i);
1374 + memset(bd,0,sizeof(bunzip_data));
1375 + /* Setup input buffer */
1376 + bd->inbuf=inbuf;
1377 + bd->inbufCount=len;
1378 + if(fill != NULL)
1379 + bd->fill=fill;
1380 + else
1381 + bd->fill=nofill;
1383 + /* Init the CRC32 table (big endian) */
1384 + for(i=0;i<256;i++) {
1385 + c=i<<24;
1386 + for(j=8;j;j--)
1387 + c=c&0x80000000 ? (c<<1)^0x04c11db7 : (c<<1);
1388 + bd->crc32Table[i]=c;
1389 + }
1391 + /* Ensure that file starts with "BZh['1'-'9']." */
1392 + i = get_bits(bd,32);
1393 + if (((unsigned int)(i-BZh0-1)) >= 9) return RETVAL_NOT_BZIP_DATA;
1395 + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
1396 + uncompressed data. Allocate intermediate buffer for block. */
1397 + bd->dbufSize=100000*(i-BZh0);
1399 + bd->dbuf=large_malloc(bd->dbufSize * sizeof(int));
1400 + return RETVAL_OK;
1401 +}
1403 +/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip data,
1404 + not end of file.) */
1405 +STATIC int bunzip2(char *inbuf, int len,
1406 + int(*fill)(void*,unsigned int),
1407 + int(*writebb)(char*,unsigned int),
1408 + int *pos)
1409 +{
1410 + char *outbuf;
1411 + bunzip_data *bd;
1412 + int i;
1414 + outbuf=malloc(BZIP2_IOBUF_SIZE);
1415 + if(!(i=start_bunzip(&bd,inbuf,len,fill))) {
1416 + for(;;) {
1417 + if((i=read_bunzip(bd,outbuf,BZIP2_IOBUF_SIZE)) <= 0) break;
1418 + if(i!=writebb(outbuf,i)) {
1419 + i=RETVAL_UNEXPECTED_OUTPUT_EOF;
1420 + break;
1421 + }
1422 + }
1423 + }
1424 + /* Check CRC and release memory */
1425 + if(i==RETVAL_LAST_BLOCK) {
1426 + if (bd->headerCRC!=bd->totalCRC) {
1427 + error("Data integrity error when decompressing.");
1428 + } else {
1429 + i=RETVAL_OK;
1430 + }
1431 + }
1432 + else if (i==RETVAL_UNEXPECTED_OUTPUT_EOF) {
1433 + error("Compressed file ends unexpectedly");
1434 + }
1435 + if(bd->dbuf) large_free(bd->dbuf);
1436 + if(pos)
1437 + *pos = bd->inbufPos;
1438 + free(bd);
1439 + free(outbuf);
1441 + return i;
1442 +}
1445 --- linux-2.6.25.5/lib/decompress_unlzma.c
1446 +++ linux-2.6.25.5/lib/decompress_unlzma.c
1447 @@ -0,0 +1,616 @@
1448 +/* Lzma decompressor for Linux kernel. Shamelessly snarfed
1449 + * from busybox 1.1.1
1450 + *
1451 + * Linux kernel adaptation
1452 + * Copyright (C) 2006 Alain <alain@knaff.lu>
1453 + *
1454 + * Based on small lzma deflate implementation/Small range coder
1455 + * implementation for lzma.
1456 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1457 + *
1458 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1459 + * Copyright (C) 1999-2005 Igor Pavlov
1460 + *
1461 + * Copyrights of the parts, see headers below.
1462 + *
1463 + *
1464 + * This program is free software; you can redistribute it and/or
1465 + * modify it under the terms of the GNU Lesser General Public
1466 + * License as published by the Free Software Foundation; either
1467 + * version 2.1 of the License, or (at your option) any later version.
1468 + *
1469 + * This program is distributed in the hope that it will be useful,
1470 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1471 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
1472 + * Lesser General Public License for more details.
1473 + *
1474 + * You should have received a copy of the GNU Lesser General Public
1475 + * License along with this library; if not, write to the Free Software
1476 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1477 + */
1479 +#ifndef STATIC
1481 +#include <linux/kernel.h>
1482 +#include <linux/fs.h>
1483 +#include <linux/string.h>
1485 +#ifdef TEST
1486 +#include "test.h"
1487 +#else
1488 +#include <linux/vmalloc.h>
1489 +#endif
1491 +static void __init *large_malloc(size_t size)
1492 +{
1493 + return vmalloc(size);
1494 +}
1496 +static void __init large_free(void *where)
1497 +{
1498 + vfree(where);
1499 +}
1501 +#ifndef TEST
1502 +static void __init error(char *x)
1503 +{
1504 + printk(KERN_ERR "%s\n", x);
1505 +}
1507 +#endif
1509 +#define STATIC /**/
1511 +#endif
1513 +#define CONFIG_FEATURE_LZMA_FAST
1514 +#include <linux/decompress_unlzma.h>
1516 +#define MIN(a,b) (((a)<(b))?(a):(b))
1518 +static long long read_int(unsigned char *ptr, int size)
1519 +{
1520 + int i;
1521 + long long ret=0;
1523 + for(i=0; i<size; i++) {
1524 + ret = (ret << 8) | ptr[size-i-1];
1525 + }
1526 + return ret;
1527 +}
1529 +#define ENDIAN_CONVERT(x) x=(typeof(x))read_int((unsigned char*)&x,sizeof(x))
1532 +/* Small range coder implementation for lzma.
1533 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1534 + *
1535 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1536 + * Copyright (c) 1999-2005 Igor Pavlov
1537 + */
1539 +#ifndef always_inline
1540 +# if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >0)
1541 +# define always_inline __attribute__((always_inline)) inline
1542 +# else
1543 +# define always_inline inline
1544 +# endif
1545 +#endif
1547 +#ifdef CONFIG_FEATURE_LZMA_FAST
1548 +# define speed_inline always_inline
1549 +#else
1550 +# define speed_inline
1551 +#endif
1554 +typedef struct {
1555 + int (*fill)(void*,unsigned int);
1556 + uint8_t *ptr;
1557 + uint8_t *buffer;
1558 + uint8_t *buffer_end;
1559 + int buffer_size;
1560 + uint32_t code;
1561 + uint32_t range;
1562 + uint32_t bound;
1563 +} rc_t;
1566 +#define RC_TOP_BITS 24
1567 +#define RC_MOVE_BITS 5
1568 +#define RC_MODEL_TOTAL_BITS 11
1571 +/* Called twice: once at startup and once in rc_normalize() */
1572 +static void rc_read(rc_t * rc)
1573 +{
1574 + if (!rc->buffer_size) return;
1575 + if (rc->fill) {
1576 + rc->buffer_size = rc->fill((char*)rc->buffer, LZMA_IOBUF_SIZE);
1577 + rc->ptr = rc->buffer;
1578 + rc->buffer_end = rc->buffer + rc->buffer_size;
1579 + if (rc->buffer_size > 0) return;
1580 + }
1581 + error("unexpected EOF");
1582 + rc->buffer_size = 0;
1583 +}
1585 +/* Called once */
1586 +static always_inline void rc_init(rc_t * rc, int (*fill)(void*,unsigned int),
1587 + char *buffer, int buffer_size)
1588 +{
1589 + rc->fill = fill;
1590 + rc->buffer = (uint8_t *)buffer;
1591 + rc->buffer_size = buffer_size;
1592 + rc->buffer_end = rc->buffer + rc->buffer_size;
1593 + rc->ptr = rc->buffer;
1595 + rc->code = 0;
1596 + rc->range = 0xFFFFFFFF;
1597 +}
1599 +static always_inline void rc_init_code(rc_t * rc)
1600 +{
1601 + int i;
1603 + for (i = 0; i < 5; i++) {
1604 + if (rc->ptr >= rc->buffer_end)
1605 + rc_read(rc);
1606 + rc->code = (rc->code << 8) | *rc->ptr++;
1607 + }
1608 +}
1610 +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
1611 +static void rc_do_normalize(rc_t * rc)
1612 +{
1613 + if (rc->ptr >= rc->buffer_end)
1614 + rc_read(rc);
1615 + rc->range <<= 8;
1616 + rc->code = (rc->code << 8) | *rc->ptr++;
1617 +}
1618 +static always_inline void rc_normalize(rc_t * rc)
1619 +{
1620 + if (rc->range < (1 << RC_TOP_BITS)) {
1621 + rc_do_normalize(rc);
1622 + }
1623 +}
1625 +/* Called 9 times */
1626 +/* Why rc_is_bit_0_helper exists?
1627 + * Because we want to always expose (rc->code < rc->bound) to optimizer
1628 + */
1629 +static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p)
1630 +{
1631 + rc_normalize(rc);
1632 + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
1633 + return rc->bound;
1634 +}
1635 +static always_inline int rc_is_bit_0(rc_t * rc, uint16_t * p)
1636 +{
1637 + uint32_t t = rc_is_bit_0_helper(rc, p);
1638 + return rc->code < t;
1639 +}
1641 +/* Called ~10 times, but very small, thus inlined */
1642 +static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p)
1643 +{
1644 + rc->range = rc->bound;
1645 + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
1646 +}
1647 +static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p)
1648 +{
1649 + rc->range -= rc->bound;
1650 + rc->code -= rc->bound;
1651 + *p -= *p >> RC_MOVE_BITS;
1652 +}
1654 +/* Called 4 times in unlzma loop */
1655 +static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol)
1656 +{
1657 + if (rc_is_bit_0(rc, p)) {
1658 + rc_update_bit_0(rc, p);
1659 + *symbol *= 2;
1660 + return 0;
1661 + } else {
1662 + rc_update_bit_1(rc, p);
1663 + *symbol = *symbol * 2 + 1;
1664 + return 1;
1665 + }
1666 +}
1668 +/* Called once */
1669 +static always_inline int rc_direct_bit(rc_t * rc)
1670 +{
1671 + rc_normalize(rc);
1672 + rc->range >>= 1;
1673 + if (rc->code >= rc->range) {
1674 + rc->code -= rc->range;
1675 + return 1;
1676 + }
1677 + return 0;
1678 +}
1680 +/* Called twice */
1681 +static speed_inline void
1682 +rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol)
1683 +{
1684 + int i = num_levels;
1686 + *symbol = 1;
1687 + while (i--)
1688 + rc_get_bit(rc, p + *symbol, symbol);
1689 + *symbol -= 1 << num_levels;
1690 +}
1693 +/*
1694 + * Small lzma deflate implementation.
1695 + * Copyright (C) 2006 Aurelien Jacobs <aurel@gnuage.org>
1696 + *
1697 + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
1698 + * Copyright (C) 1999-2005 Igor Pavlov
1699 + */
1702 +typedef struct {
1703 + uint8_t pos;
1704 + uint32_t dict_size;
1705 + uint64_t dst_size;
1706 +} __attribute__ ((packed)) lzma_header_t;
1709 +#define LZMA_BASE_SIZE 1846
1710 +#define LZMA_LIT_SIZE 768
1712 +#define LZMA_NUM_POS_BITS_MAX 4
1714 +#define LZMA_LEN_NUM_LOW_BITS 3
1715 +#define LZMA_LEN_NUM_MID_BITS 3
1716 +#define LZMA_LEN_NUM_HIGH_BITS 8
1718 +#define LZMA_LEN_CHOICE 0
1719 +#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
1720 +#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
1721 +#define LZMA_LEN_MID (LZMA_LEN_LOW \
1722 + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
1723 +#define LZMA_LEN_HIGH (LZMA_LEN_MID \
1724 + +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
1725 +#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
1727 +#define LZMA_NUM_STATES 12
1728 +#define LZMA_NUM_LIT_STATES 7
1730 +#define LZMA_START_POS_MODEL_INDEX 4
1731 +#define LZMA_END_POS_MODEL_INDEX 14
1732 +#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
1734 +#define LZMA_NUM_POS_SLOT_BITS 6
1735 +#define LZMA_NUM_LEN_TO_POS_STATES 4
1737 +#define LZMA_NUM_ALIGN_BITS 4
1739 +#define LZMA_MATCH_MIN_LEN 2
1741 +#define LZMA_IS_MATCH 0
1742 +#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES <<LZMA_NUM_POS_BITS_MAX))
1743 +#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
1744 +#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
1745 +#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
1746 +#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
1747 +#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
1748 + + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
1749 +#define LZMA_SPEC_POS (LZMA_POS_SLOT \
1750 + +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
1751 +#define LZMA_ALIGN (LZMA_SPEC_POS \
1752 + + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
1753 +#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
1754 +#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
1755 +#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
1758 +STATIC int unlzma(char *inbuf, int in_len,
1759 + int(*fill)(void*,unsigned int),
1760 + int(*writebb)(char*,unsigned int),
1761 + int *posp)
1762 +{
1763 + lzma_header_t header;
1764 + int lc, pb, lp;
1765 + uint32_t pos_state_mask;
1766 + uint32_t literal_pos_mask;
1767 + uint32_t pos;
1768 + uint16_t *p;
1769 + uint16_t *prob;
1770 + uint16_t *prob_lit;
1771 + int num_bits;
1772 + int num_probs;
1773 + rc_t rc;
1774 + int i, mi;
1775 + uint8_t *buffer;
1776 + uint8_t previous_byte = 0;
1777 + size_t buffer_pos = 0, global_pos = 0;
1778 + int len = 0;
1779 + int state = 0;
1780 + int bufsize;
1781 + uint32_t rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
1783 + rc_init(&rc, fill, inbuf, in_len);
1785 + header.dict_size = (uint32_t) -1L;
1786 + header.dst_size = (uint64_t) -1LL;
1787 + if (inbuf && in_len > 0 && inbuf[0] == 0) {
1788 + const int LZMA_LC = 3, LZMA_LP = 0, LZMA_PB = 2;
1789 + header.pos = (LZMA_PB * 45) + (LZMA_LP * 5) + LZMA_LC;
1790 + rc.ptr++;
1791 + }
1792 + else {
1793 + int hdrsize = sizeof(header);
1794 + if (inbuf && in_len > 12 &&
1795 + (1 + * (unsigned long *) &inbuf[9]) > 1U)
1796 + hdrsize = 5;
1797 + for (i = 0; i < hdrsize; i++) {
1798 + if (rc.ptr >= rc.buffer_end)
1799 + rc_read(&rc);
1800 + ((unsigned char *)&header)[i] = *rc.ptr++;
1801 + }
1802 + }
1804 + if (header.pos >= (9 * 5 * 5)) {
1805 + error("bad header");
1806 + return -1;
1807 + }
1809 + mi = header.pos / 9;
1810 + lc = header.pos % 9;
1811 + pb = mi / 5;
1812 + lp = mi % 5;
1813 + pos_state_mask = (1 << pb) - 1;
1814 + literal_pos_mask = (1 << lp) - 1;
1816 + ENDIAN_CONVERT(header.dict_size);
1817 + ENDIAN_CONVERT(header.dst_size);
1819 + if (header.dict_size == 0)
1820 + header.dict_size = 1;
1822 + bufsize = MIN(header.dst_size, header.dict_size);
1823 + buffer = (uint8_t *) posp;
1824 + if (writebb) buffer = large_malloc(bufsize);
1825 + if(buffer == NULL)
1826 + return -1;
1828 + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
1829 + p = large_malloc(num_probs * sizeof(*p));
1830 + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
1831 + for (i = 0; i < num_probs; i++)
1832 + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
1834 + rc_init_code(&rc);
1836 + while (global_pos + buffer_pos < header.dst_size) {
1837 + int pos_state = (buffer_pos + global_pos) & pos_state_mask;
1839 + prob =
1840 + p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
1841 + if (rc_is_bit_0(&rc, prob)) {
1842 + mi = 1;
1843 + rc_update_bit_0(&rc, prob);
1844 + prob = (p + LZMA_LITERAL + (LZMA_LIT_SIZE
1845 + * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
1846 + + (previous_byte >> (8 - lc)))));
1848 + if (state >= LZMA_NUM_LIT_STATES) {
1849 + int match_byte;
1851 + pos = buffer_pos - rep0;
1853 + while (pos >= header.dict_size)
1854 + pos += header.dict_size;
1855 + if(pos >= bufsize) {
1856 + goto fail;
1857 + }
1859 + match_byte = buffer[pos];
1860 + do {
1861 + int bit;
1863 + match_byte <<= 1;
1864 + bit = match_byte & 0x100;
1865 + prob_lit = prob + 0x100 + bit + mi;
1866 + if (rc_get_bit(&rc, prob_lit, &mi)) {
1867 + if (!bit)
1868 + break;
1869 + } else {
1870 + if (bit)
1871 + break;
1872 + }
1873 + } while (mi < 0x100);
1874 + }
1875 + while (mi < 0x100) {
1876 + prob_lit = prob + mi;
1877 + rc_get_bit(&rc, prob_lit, &mi);
1878 + }
1879 + previous_byte = (uint8_t) mi;
1880 + if (state < 4)
1881 + state = 0;
1882 + else if (state < 10)
1883 + state -= 3;
1884 + else
1885 + state -= 6;
1886 + goto store_previous_byte;
1887 + } else {
1888 + int offset;
1889 + uint16_t *prob_len;
1891 + rc_update_bit_1(&rc, prob);
1892 + prob = p + LZMA_IS_REP + state;
1893 + if (rc_is_bit_0(&rc, prob)) {
1894 + rc_update_bit_0(&rc, prob);
1895 + rep3 = rep2;
1896 + rep2 = rep1;
1897 + rep1 = rep0;
1898 + state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
1899 + prob = p + LZMA_LEN_CODER;
1900 + } else {
1901 + rc_update_bit_1(&rc, prob);
1902 + prob = p + LZMA_IS_REP_G0 + state;
1903 + if (rc_is_bit_0(&rc, prob)) {
1904 + rc_update_bit_0(&rc, prob);
1905 + prob = (p + LZMA_IS_REP_0_LONG
1906 + + (state << LZMA_NUM_POS_BITS_MAX) + pos_state);
1907 + if (rc_is_bit_0(&rc, prob)) {
1908 + rc_update_bit_0(&rc, prob);
1910 + state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
1911 + pos = buffer_pos - rep0;
1913 + while (pos >= header.dict_size)
1914 + pos += header.dict_size;
1915 + if(pos >= bufsize) {
1916 + goto fail;
1917 + }
1919 + previous_byte = buffer[pos];
1920 + store_previous_byte:
1921 + if (!rc.buffer_size) goto eof;
1922 + buffer[buffer_pos++] = previous_byte;
1923 + if (writebb && buffer_pos == header.dict_size) {
1924 + buffer_pos = 0;
1925 + global_pos += header.dict_size;
1926 + writebb((char*)buffer, header.dict_size);
1927 + }
1928 + continue;
1929 + } else {
1930 + rc_update_bit_1(&rc, prob);
1931 + }
1932 + } else {
1933 + uint32_t distance;
1935 + rc_update_bit_1(&rc, prob);
1936 + prob = p + LZMA_IS_REP_G1 + state;
1937 + if (rc_is_bit_0(&rc, prob)) {
1938 + rc_update_bit_0(&rc, prob);
1939 + distance = rep1;
1940 + } else {
1941 + rc_update_bit_1(&rc, prob);
1942 + prob = p + LZMA_IS_REP_G2 + state;
1943 + if (rc_is_bit_0(&rc, prob)) {
1944 + rc_update_bit_0(&rc, prob);
1945 + distance = rep2;
1946 + } else {
1947 + rc_update_bit_1(&rc, prob);
1948 + distance = rep3;
1949 + rep3 = rep2;
1950 + }
1951 + rep2 = rep1;
1952 + }
1953 + rep1 = rep0;
1954 + rep0 = distance;
1955 + }
1956 + state = state < LZMA_NUM_LIT_STATES ? 8 : 11;
1957 + prob = p + LZMA_REP_LEN_CODER;
1958 + }
1960 + prob_len = prob + LZMA_LEN_CHOICE;
1961 + if (rc_is_bit_0(&rc, prob_len)) {
1962 + rc_update_bit_0(&rc, prob_len);
1963 + prob_len = (prob + LZMA_LEN_LOW
1964 + + (pos_state << LZMA_LEN_NUM_LOW_BITS));
1965 + offset = 0;
1966 + num_bits = LZMA_LEN_NUM_LOW_BITS;
1967 + } else {
1968 + rc_update_bit_1(&rc, prob_len);
1969 + prob_len = prob + LZMA_LEN_CHOICE_2;
1970 + if (rc_is_bit_0(&rc, prob_len)) {
1971 + rc_update_bit_0(&rc, prob_len);
1972 + prob_len = (prob + LZMA_LEN_MID
1973 + + (pos_state << LZMA_LEN_NUM_MID_BITS));
1974 + offset = 1 << LZMA_LEN_NUM_LOW_BITS;
1975 + num_bits = LZMA_LEN_NUM_MID_BITS;
1976 + } else {
1977 + rc_update_bit_1(&rc, prob_len);
1978 + prob_len = prob + LZMA_LEN_HIGH;
1979 + offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
1980 + + (1 << LZMA_LEN_NUM_MID_BITS));
1981 + num_bits = LZMA_LEN_NUM_HIGH_BITS;
1982 + }
1983 + }
1984 + rc_bit_tree_decode(&rc, prob_len, num_bits, &len);
1985 + len += offset;
1987 + if (state < 4) {
1988 + int pos_slot;
1990 + state += LZMA_NUM_LIT_STATES;
1991 + prob =
1992 + p + LZMA_POS_SLOT +
1993 + ((len <
1994 + LZMA_NUM_LEN_TO_POS_STATES ? len :
1995 + LZMA_NUM_LEN_TO_POS_STATES - 1)
1996 + << LZMA_NUM_POS_SLOT_BITS);
1997 + rc_bit_tree_decode(&rc, prob, LZMA_NUM_POS_SLOT_BITS,
1998 + &pos_slot);
1999 + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
2000 + num_bits = (pos_slot >> 1) - 1;
2001 + rep0 = 2 | (pos_slot & 1);
2002 + if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
2003 + rep0 <<= num_bits;
2004 + prob = p + LZMA_SPEC_POS + rep0 - pos_slot - 1;
2005 + } else {
2006 + num_bits -= LZMA_NUM_ALIGN_BITS;
2007 + while (num_bits--)
2008 + rep0 = (rep0 << 1) | rc_direct_bit(&rc);
2009 + prob = p + LZMA_ALIGN;
2010 + rep0 <<= LZMA_NUM_ALIGN_BITS;
2011 + num_bits = LZMA_NUM_ALIGN_BITS;
2012 + }
2013 + i = 1;
2014 + mi = 1;
2015 + while (num_bits--) {
2016 + if (rc_get_bit(&rc, prob + mi, &mi))
2017 + rep0 |= i;
2018 + i <<= 1;
2019 + }
2020 + } else
2021 + rep0 = pos_slot;
2022 + if (++rep0 == 0)
2023 + break;
2024 + }
2026 + len += LZMA_MATCH_MIN_LEN;
2028 + if (!rc.buffer_size) goto eof;
2029 + do {
2030 + pos = buffer_pos - rep0;
2032 + while (pos >= header.dict_size)
2033 + pos += header.dict_size;
2034 + if(pos >= bufsize) {
2035 + goto fail;
2036 + }
2038 + previous_byte = buffer[pos];
2039 + buffer[buffer_pos++] = previous_byte;
2040 + if (writebb && buffer_pos == header.dict_size) {
2041 + buffer_pos = 0;
2042 + global_pos += header.dict_size;
2043 + writebb((char*)buffer, header.dict_size);
2044 + }
2045 + len--;
2046 + } while (len != 0 && (global_pos + buffer_pos) < header.dst_size);
2047 + }
2048 + }
2049 + eof:
2050 + if (writebb) {
2051 + writebb((char*)buffer, buffer_pos);
2052 + if(posp) {
2053 + *posp = rc.ptr-rc.buffer;
2054 + }
2055 + large_free(buffer);
2056 + }
2057 + large_free(p);
2058 + return 0;
2059 + fail:
2060 + if (writebb) large_free(buffer);
2061 + large_free(p);
2062 + return -1;
2063 +}
2065 --- linux-2.6.25.5/lib/unlzma_syms.c
2066 +++ linux-2.6.25.5/lib/unlzma_syms.c
2067 @@ -0,0 +1,14 @@
2068 +/*
2069 + * linux/lib/unlzma_syms.c
2070 + *
2071 + * Exported symbols for the unlzma functionality.
2072 + *
2073 + */
2075 +#include <linux/module.h>
2076 +#include <linux/init.h>
2078 +#include <linux/decompress_unlzma.h>
2080 +EXPORT_SYMBOL(unlzma);
2081 +MODULE_LICENSE("GPL");
2083 --- linux-2.6.25.5/lib/Makefile
2084 +++ linux-2.6.25.5/lib/Makefile
2085 @@ -50,6 +50,9 @@ obj-$(CONFIG_CRC7) += crc7.o
2086 obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
2087 obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
2089 +obj-$(CONFIG_RD_BZIP2) += decompress_bunzip2.o
2090 +obj-$(CONFIG_RD_LZMA) += decompress_unlzma.o unlzma_syms.o
2092 obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
2093 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
2094 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
2096 --- linux-2.6.25.5/scripts/Makefile.lib
2097 +++ linux-2.6.25.5/scripts/Makefile.lib
2098 @@ -172,4 +172,17 @@
2099 quiet_cmd_gzip = GZIP $@
2100 cmd_gzip = gzip -f -9 < $< > $@
2102 +# Append size
2103 +size_append=perl -e 'print(pack("i",(stat($$ARGV[0]))[7]));'
2105 +# Bzip2
2106 +# ---------------------------------------------------------------------------
2108 +quiet_cmd_bzip2 = BZIP2 $@
2109 +cmd_bzip2 = (bzip2 -9 < $< ; $(size_append) $<) > $@
2111 +# Lzma
2112 +# ---------------------------------------------------------------------------
2114 +quiet_cmd_lzma = LZMA $@
2115 +cmd_lzma = (lzma e $< -so ; $(size_append) $<) >$@
2117 --- linux-2.6.25.5/arch/x86/mm/init_32.c
2118 +++ linux-2.6.25.5/arch/x86/mm/init_32.c
2119 @@ -788,7 +788,8 @@
2120 free_page(addr);
2121 totalram_pages++;
2123 - printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
2124 + if (what)
2125 + printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
2126 #endif
2129 @@ -802,6 +803,6 @@
2130 #ifdef CONFIG_BLK_DEV_INITRD
2131 void free_initrd_mem(unsigned long start, unsigned long end)
2133 - free_init_pages("initrd memory", start, end);
2134 + free_init_pages(NULL, start, end);
2136 #endif
2138 --- linux-2.6.25.5/init/initramfs.c
2139 +++ linux-2.6.25.5/init/initramfs.c
2140 @@ -404,7 +404,32 @@
2141 static unsigned outcnt; /* bytes in output buffer */
2142 static long bytes_out;
2144 -#define get_byte() (inptr < insize ? inbuf[inptr++] : -1)
2145 +#define INITRD_PAGE ((PAGE_SIZE > 1024*1024) ? PAGE_SIZE : 1024*1024)
2146 +static int fill_offset, fill_total;
2147 +#include <linux/initrd.h>
2148 +static void release_inbuf(int count)
2149 +{
2150 + if (fill_total < 0) return;
2151 + fill_offset += count;
2152 + fill_total += count;
2153 + if (fill_offset >= INITRD_PAGE) {
2154 + unsigned rem = fill_offset % INITRD_PAGE;
2155 + unsigned end = initrd_start + fill_offset - rem;
2156 + free_initrd_mem(initrd_start, end);
2157 + printk(".");
2158 + initrd_start = end;
2159 + fill_offset = rem;
2160 + }
2161 +}
2163 +static uch get_byte(void)
2164 +{
2165 + uch c;
2166 + if (inptr >= insize) return -1;
2167 + c = inbuf[inptr++];
2168 + release_inbuf(1);
2169 + return c;
2170 +}
2172 /* Diagnostic functions (stubbed out) */
2173 #define Assert(cond,msg)
2174 @@ -453,25 +478,13 @@
2175 outcnt = 0;
2178 -#include <linux/initrd.h>
2179 #ifdef CONFIG_RD_LZMA
2180 -#define INITRD_PAGE ((PAGE_SIZE > 1024*1024) ? PAGE_SIZE : 1024*1024)
2181 -static int fill_offset, fill_total;
2182 static int fill_buffer(void *buffer, unsigned size)
2184 int max = initrd_end - initrd_start - fill_offset;
2185 if (size < max) max = size;
2186 memcpy(buffer, (void *)(initrd_start + fill_offset), max);
2187 - fill_offset += max;
2188 - fill_total += max;
2189 - if (fill_offset >= INITRD_PAGE) {
2190 - unsigned rem = fill_offset % INITRD_PAGE;
2191 - unsigned end = initrd_start + fill_offset - rem;
2192 - free_initrd_mem(initrd_start, end);
2193 - printk(".");
2194 - initrd_start = end;
2195 - fill_offset = rem;
2196 - }
2197 + release_inbuf(max);
2198 return max;
2200 #endif
2201 @@ -489,6 +502,8 @@
2202 state = Start;
2203 this_header = 0;
2204 message = NULL;
2205 + fill_total = fill_offset = 0;
2206 + if(buf != (char *) initrd_start) fill_total = -1;
2207 while (!message && len) {
2208 #ifdef CONFIG_RD_LZMA
2209 int status;
2210 @@ -497,6 +512,7 @@
2211 if (*buf == '0' && !(this_header & 3)) {
2212 state = Start;
2213 written = write_buffer(buf, len);
2214 + release_inbuf(written);
2215 buf += written;
2216 len -= written;
2217 continue;