wok-4.x view busybox/stuff/busybox-1.12.0-unlzma.u @ rev 3260

Update gphoto2 DEPENDS
author Rohit Joshi <jozee@slitaz.org>
date Mon Jun 01 10:36:31 2009 +0000 (2009-06-01)
parents
children
line source
1 --- busybox-1.12.0/archival/libunarchive/decompress_unlzma.c
2 +++ busybox-1.12.0/archival/libunarchive/decompress_unlzma.c
3 @@ -14,8 +14,10 @@
5 #if ENABLE_FEATURE_LZMA_FAST
6 # define speed_inline ALWAYS_INLINE
7 +# define size_inline
8 #else
9 # define speed_inline
10 +# define size_inline ALWAYS_INLINE
11 #endif
14 @@ -44,8 +46,8 @@
15 #define RC_MODEL_TOTAL_BITS 11
18 -/* Called twice: once at startup and once in rc_normalize() */
19 -static void rc_read(rc_t *rc)
20 +/* Called twice: once at startup (LZMA_FAST only) and once in rc_normalize() */
21 +static size_inline void rc_read(rc_t *rc)
22 {
23 int buffer_size = safe_read(rc->fd, RC_BUFFER, RC_BUFFER_SIZE);
24 if (buffer_size <= 0)
25 @@ -54,8 +56,17 @@
26 rc->buffer_end = RC_BUFFER + buffer_size;
27 }
29 +/* Called twice, but one callsite is in speed_inline'd rc_is_bit_1() */
30 +static void rc_do_normalize(rc_t *rc)
31 +{
32 + if (rc->ptr >= rc->buffer_end)
33 + rc_read(rc);
34 + rc->range <<= 8;
35 + rc->code = (rc->code << 8) | *rc->ptr++;
36 +}
37 +
38 /* Called once */
39 -static rc_t* rc_init(int fd) /*, int buffer_size) */
40 +static ALWAYS_INLINE rc_t* rc_init(int fd) /*, int buffer_size) */
41 {
42 int i;
43 rc_t *rc;
44 @@ -63,17 +74,18 @@
45 rc = xmalloc(sizeof(*rc) + RC_BUFFER_SIZE);
47 rc->fd = fd;
48 - /* rc->buffer_size = buffer_size; */
49 - rc->buffer_end = RC_BUFFER + RC_BUFFER_SIZE;
50 rc->ptr = rc->buffer_end;
52 - rc->code = 0;
53 - rc->range = 0xFFFFFFFF;
54 for (i = 0; i < 5; i++) {
55 +#if ENABLE_FEATURE_LZMA_FAST
56 if (rc->ptr >= rc->buffer_end)
57 rc_read(rc);
58 rc->code = (rc->code << 8) | *rc->ptr++;
59 +#else
60 + rc_do_normalize(rc);
61 +#endif
62 }
63 + rc->range = 0xFFFFFFFF;
64 return rc;
65 }
67 @@ -83,14 +95,6 @@
68 free(rc);
69 }
71 -/* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */
72 -static void rc_do_normalize(rc_t *rc)
73 -{
74 - if (rc->ptr >= rc->buffer_end)
75 - rc_read(rc);
76 - rc->range <<= 8;
77 - rc->code = (rc->code << 8) | *rc->ptr++;
78 -}
79 static ALWAYS_INLINE void rc_normalize(rc_t *rc)
80 {
81 if (rc->range < (1 << RC_TOP_BITS)) {
82 @@ -98,49 +102,30 @@
83 }
84 }
86 -/* rc_is_bit_0 is called 9 times */
87 -/* Why rc_is_bit_0_helper exists?
88 - * Because we want to always expose (rc->code < rc->bound) to optimizer.
89 - * Thus rc_is_bit_0 is always inlined, and rc_is_bit_0_helper is inlined
90 - * only if we compile for speed.
91 - */
92 -static speed_inline uint32_t rc_is_bit_0_helper(rc_t *rc, uint16_t *p)
93 +/* rc_is_bit_1 is called 9 times */
94 +static speed_inline int rc_is_bit_1(rc_t *rc, uint16_t *p)
95 {
96 rc_normalize(rc);
97 rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
98 - return rc->bound;
99 + if (rc->code < rc->bound) {
100 + rc->range = rc->bound;
101 + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
102 + return 0;
103 + }
104 + else {
105 + rc->range -= rc->bound;
106 + rc->code -= rc->bound;
107 + *p -= *p >> RC_MOVE_BITS;
108 + return 1;
109 + }
110 }
111 -static ALWAYS_INLINE int rc_is_bit_0(rc_t *rc, uint16_t *p)
112 -{
113 - uint32_t t = rc_is_bit_0_helper(rc, p);
114 - return rc->code < t;
115 -}
117 -/* Called ~10 times, but very small, thus inlined */
118 -static speed_inline void rc_update_bit_0(rc_t *rc, uint16_t *p)
119 -{
120 - rc->range = rc->bound;
121 - *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
122 -}
123 -static speed_inline void rc_update_bit_1(rc_t *rc, uint16_t *p)
124 -{
125 - rc->range -= rc->bound;
126 - rc->code -= rc->bound;
127 - *p -= *p >> RC_MOVE_BITS;
128 -}
129 -
130 /* Called 4 times in unlzma loop */
131 -static int rc_get_bit(rc_t *rc, uint16_t *p, int *symbol)
132 +static speed_inline int rc_get_bit(rc_t *rc, uint16_t *p, int *symbol)
133 {
134 - if (rc_is_bit_0(rc, p)) {
135 - rc_update_bit_0(rc, p);
136 - *symbol *= 2;
137 - return 0;
138 - } else {
139 - rc_update_bit_1(rc, p);
140 - *symbol = *symbol * 2 + 1;
141 - return 1;
142 - }
143 + int ret = rc_is_bit_1(rc, p);
144 + *symbol = *symbol * 2 + ret;
145 + return ret;
146 }
148 /* Called once */
149 @@ -266,13 +251,13 @@
150 header.dst_size = SWAP_LE64(header.dst_size);
152 if (header.dict_size == 0)
153 - header.dict_size = 1;
154 + header.dict_size++;
156 buffer = xmalloc(MIN(header.dst_size, header.dict_size));
158 num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
159 p = xmalloc(num_probs * sizeof(*p));
160 - num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
161 + num_probs += LZMA_LITERAL - LZMA_BASE_SIZE;
162 for (i = 0; i < num_probs; i++)
163 p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
165 @@ -282,9 +267,8 @@
166 int pos_state = (buffer_pos + global_pos) & pos_state_mask;
168 prob = p + LZMA_IS_MATCH + (state << LZMA_NUM_POS_BITS_MAX) + pos_state;
169 - if (rc_is_bit_0(rc, prob)) {
170 + if (!rc_is_bit_1(rc, prob)) {
171 mi = 1;
172 - rc_update_bit_0(rc, prob);
173 prob = (p + LZMA_LITERAL
174 + (LZMA_LIT_SIZE * ((((buffer_pos + global_pos) & literal_pos_mask) << lc)
175 + (previous_byte >> (8 - lc))
176 @@ -340,26 +324,21 @@
177 int offset;
178 uint16_t *prob_len;
180 - rc_update_bit_1(rc, prob);
181 prob = p + LZMA_IS_REP + state;
182 - if (rc_is_bit_0(rc, prob)) {
183 - rc_update_bit_0(rc, prob);
184 + if (!rc_is_bit_1(rc, prob)) {
185 rep3 = rep2;
186 rep2 = rep1;
187 rep1 = rep0;
188 state = state < LZMA_NUM_LIT_STATES ? 0 : 3;
189 prob = p + LZMA_LEN_CODER;
190 } else {
191 - rc_update_bit_1(rc, prob);
192 - prob = p + LZMA_IS_REP_G0 + state;
193 - if (rc_is_bit_0(rc, prob)) {
194 - rc_update_bit_0(rc, prob);
195 + prob += LZMA_IS_REP_G0 - LZMA_IS_REP;
196 + if (!rc_is_bit_1(rc, prob)) {
197 prob = (p + LZMA_IS_REP_0_LONG
198 + (state << LZMA_NUM_POS_BITS_MAX)
199 + pos_state
200 );
201 - if (rc_is_bit_0(rc, prob)) {
202 - rc_update_bit_0(rc, prob);
203 + if (!rc_is_bit_1(rc, prob)) {
205 state = state < LZMA_NUM_LIT_STATES ? 9 : 11;
206 #if ENABLE_FEATURE_LZMA_FAST
207 @@ -372,25 +351,16 @@
208 len = 1;
209 goto string;
210 #endif
211 - } else {
212 - rc_update_bit_1(rc, prob);
213 }
214 } else {
215 uint32_t distance;
217 - rc_update_bit_1(rc, prob);
218 - prob = p + LZMA_IS_REP_G1 + state;
219 - if (rc_is_bit_0(rc, prob)) {
220 - rc_update_bit_0(rc, prob);
221 - distance = rep1;
222 - } else {
223 - rc_update_bit_1(rc, prob);
224 - prob = p + LZMA_IS_REP_G2 + state;
225 - if (rc_is_bit_0(rc, prob)) {
226 - rc_update_bit_0(rc, prob);
227 - distance = rep2;
228 - } else {
229 - rc_update_bit_1(rc, prob);
230 + prob += LZMA_IS_REP_G1 - LZMA_IS_REP_G0;
231 + distance = rep1;
232 + if (rc_is_bit_1(rc, prob)) {
233 + prob += LZMA_IS_REP_G2 - LZMA_IS_REP_G1;
234 + distance = rep2;
235 + if (rc_is_bit_1(rc, prob)) {
236 distance = rep3;
237 rep3 = rep2;
238 }
239 @@ -404,24 +374,20 @@
240 }
242 prob_len = prob + LZMA_LEN_CHOICE;
243 - if (rc_is_bit_0(rc, prob_len)) {
244 - rc_update_bit_0(rc, prob_len);
245 - prob_len = (prob + LZMA_LEN_LOW
246 - + (pos_state << LZMA_LEN_NUM_LOW_BITS));
247 + if (!rc_is_bit_1(rc, prob_len)) {
248 + prob_len += LZMA_LEN_LOW - LZMA_LEN_CHOICE
249 + + (pos_state << LZMA_LEN_NUM_LOW_BITS);
250 offset = 0;
251 num_bits = LZMA_LEN_NUM_LOW_BITS;
252 } else {
253 - rc_update_bit_1(rc, prob_len);
254 - prob_len = prob + LZMA_LEN_CHOICE_2;
255 - if (rc_is_bit_0(rc, prob_len)) {
256 - rc_update_bit_0(rc, prob_len);
257 - prob_len = (prob + LZMA_LEN_MID
258 - + (pos_state << LZMA_LEN_NUM_MID_BITS));
259 + prob_len += LZMA_LEN_CHOICE_2 - LZMA_LEN_CHOICE;
260 + if (!rc_is_bit_1(rc, prob_len)) {
261 + prob_len += LZMA_LEN_MID - LZMA_LEN_CHOICE_2
262 + + (pos_state << LZMA_LEN_NUM_MID_BITS);
263 offset = 1 << LZMA_LEN_NUM_LOW_BITS;
264 num_bits = LZMA_LEN_NUM_MID_BITS;
265 } else {
266 - rc_update_bit_1(rc, prob_len);
267 - prob_len = prob + LZMA_LEN_HIGH;
268 + prob_len += LZMA_LEN_HIGH - LZMA_LEN_CHOICE_2;
269 offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
270 + (1 << LZMA_LEN_NUM_MID_BITS));
271 num_bits = LZMA_LEN_NUM_HIGH_BITS;
272 @@ -440,17 +406,18 @@
273 << LZMA_NUM_POS_SLOT_BITS);
274 rc_bit_tree_decode(rc, prob, LZMA_NUM_POS_SLOT_BITS,
275 &pos_slot);
276 + rep0 = pos_slot;
277 if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
278 num_bits = (pos_slot >> 1) - 1;
279 rep0 = 2 | (pos_slot & 1);
280 + prob = p + LZMA_ALIGN;
281 if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
282 rep0 <<= num_bits;
283 - prob = p + LZMA_SPEC_POS + rep0 - pos_slot - 1;
284 + prob += LZMA_SPEC_POS - LZMA_ALIGN - 1 + rep0 - pos_slot ;
285 } else {
286 num_bits -= LZMA_NUM_ALIGN_BITS;
287 while (num_bits--)
288 rep0 = (rep0 << 1) | rc_direct_bit(rc);
289 - prob = p + LZMA_ALIGN;
290 rep0 <<= LZMA_NUM_ALIGN_BITS;
291 num_bits = LZMA_NUM_ALIGN_BITS;
292 }
293 @@ -461,8 +428,7 @@
294 rep0 |= i;
295 i <<= 1;
296 }
297 - } else
298 - rep0 = pos_slot;
299 + }
300 if (++rep0 == 0)
301 break;
302 }