wok view qemu/stuff/cloop.u @ rev 24736

updated libev and libev-dev (4.31 -> 4.33)
author Hans-G?nter Theisgen
date Wed Mar 16 09:25:45 2022 +0100 (2022-03-16)
parents cea6e929d21e
children
line source
1 --- block/cloop.c
2 +++ block/cloop.c
3 @@ -25,33 +25,206 @@
4 #include "block/block_int.h"
5 #include "qemu/module.h"
6 #include <zlib.h>
7 +#include <lzma.h>
8 +
9 +#define CLOOP_COMPRESSOR_ZLIB 0x0
10 +#define CLOOP_COMPRESSOR_NONE 0x1
11 +#define CLOOP_COMPRESSOR_XZ 0x2
12 +#define CLOOP_COMPRESSOR_LZ4 0x3
13 +#define CLOOP_COMPRESSOR_LZO 0x4
14 +#define CLOOP_COMPRESSOR_ZSTD 0x5
15 +#define CLOOP_COMPRESSOR_LINK 0xF
16 +
17 +#define CLOOP_BLOCK_FLAGS(x) ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60))
18 +#define CLOOP_BLOCK_OFFSET(x) ((x) & 0x0fffffffffffffffLLU)
20 /* Maximum compressed block size */
21 #define MAX_BLOCK_SIZE (64 * 1024 * 1024)
23 +typedef struct cloop_tail {
24 + uint32_t table_size;
25 + uint32_t index_size;
26 + uint32_t num_blocks;
27 +} cloop_tail;
28 +
29 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
30 +#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4)
31 +
32 +typedef struct block_info {
33 + uint64_t offset; /* 64-bit offsets of compressed block */
34 + uint32_t size; /* 32-bit compressed block size */
35 + uint32_t flags; /* 32-bit compression flags */
36 +} block_info;
37 +
38 +static inline int build_index(struct block_info *offsets, unsigned long n,
39 + unsigned long block_size, unsigned global_flags)
40 +{
41 + uint32_t *ofs32 = (uint32_t *) offsets;
42 + loff_t *ofs64 = (loff_t *) offsets;
43 +
44 + /* v3 64bits bug: v1 assumed */
45 + unsigned long v3_64=(n+1)/2;
46 + loff_t prev;
47 +
48 + if (ofs32[0] != 0 && ofs32[1] == 0) {
49 + for (prev=le64_to_cpu(ofs64[v3_64]);
50 + v3_64 > 0 && le64_to_cpu(ofs64[--v3_64]) < prev;
51 + prev=le64_to_cpu(ofs64[v3_64]));
52 + }
53 +
54 + if (ofs32[0] == 0) {
55 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
56 + while (n--) {
57 + offsets[n].offset = be64_to_cpu(offsets[n].offset);
58 + offsets[n].size = be32_to_cpu(offsets[n].size);
59 + offsets[n].flags = 0;
60 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
61 + return n+1;
62 + }
63 + }
64 + else { /* V2.0/V4.0 */
65 + loff_t last = CLOOP_BLOCK_OFFSET(be64_to_cpu(ofs64[n]));
66 + uint32_t flags;
67 + unsigned long i = n;
68 +
69 + for (flags = 0; n-- ;) {
70 + loff_t data = be64_to_cpu(ofs64[n]);
71 +
72 + offsets[n].size = last -
73 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
74 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
75 + return n+1;
76 + last = offsets[n].offset;
77 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
78 + flags |= 1 << offsets[n].flags;
79 + }
80 + if (flags > 1) {
81 + while (i--) {
82 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
83 + offsets[i] = offsets[offsets[i].offset];
84 + }
85 + }
86 + }
87 + }
88 + }
89 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
90 + loff_t last = le64_to_cpu(ofs64[n]);
91 + while (n--) {
92 + offsets[n].size = last -
93 + (offsets[n].offset = le64_to_cpu(ofs64[n]));
94 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
95 + return n+1;
96 + last = offsets[n].offset;
97 + offsets[n].flags = 0;
98 + }
99 + }
100 + else if (be32_to_cpu(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
101 + loff_t last = be32_to_cpu(ofs32[n]);
102 + while (n--) {
103 + offsets[n].size = last -
104 + (offsets[n].offset = be32_to_cpu(ofs32[n]));
105 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
106 + return n+1;
107 + last = offsets[n].offset;
108 + offsets[n].flags = 0;
109 + }
110 + }
111 + else { /* V3.0 */
112 + unsigned long i;
113 + loff_t j;
114 +
115 + v3_64 = (ofs32[1] == 0) ? 2 : 1;
116 + for (i = n; i-- > 0; ) {
117 + offsets[i].size = be32_to_cpu(ofs32[i*v3_64]);
118 + if ((offsets[i].size & 0x80000000) == 0 &&
119 + offsets[i].size > 2 * MAX_BLOCK_SIZE)
120 + return i+1;
121 + }
122 + for (i = 0, j = 128 + 4 + 4; i < n; i++) {
123 + offsets[i].offset = j;
124 + offsets[i].flags = global_flags;
125 + if (offsets[i].size == 0xFFFFFFFF) {
126 + offsets[i].flags = CLOOP_COMPRESSOR_NONE;
127 + offsets[i].size = block_size;
128 + }
129 + if ((offsets[i].size & 0x80000000) == 0) {
130 + j += offsets[i].size;
131 + }
132 + }
133 + for (i = 0; i < n; i++) {
134 + if (offsets[i].size & 0x80000000) {
135 + offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
136 + }
137 + }
138 + }
139 + return 0;
140 +}
141 +
142 typedef struct BDRVCloopState {
143 CoMutex lock;
144 uint32_t block_size;
145 uint32_t n_blocks;
146 - uint64_t *offsets;
147 + block_info *offsets;
148 uint32_t sectors_per_block;
149 uint32_t current_block;
150 uint8_t *compressed_block;
151 uint8_t *uncompressed_block;
152 z_stream zstream;
153 + int global_flags;
154 } BDRVCloopState;
156 static int cloop_probe(const uint8_t *buf, int buf_size, const char *filename)
157 {
158 - const char *magic_version_2_0 = "#!/bin/sh\n"
159 - "#V2.0 Format\n"
160 + static const uint8_t magic[] =
161 "modprobe cloop file=$0 && mount -r -t iso9660 /dev/cloop $1\n";
162 - int length = strlen(magic_version_2_0);
163 - if (length > buf_size) {
164 - length = buf_size;
165 + int ret = 0, length = buf_size;
166 + int i;
167 +
168 + if (length > 127) {
169 + length = 127;
170 + }
171 + for (i = 0; i < length - sizeof(magic) + 1; i++) {
172 + if (buf[i] != magic[0]) continue;
173 + if (memcmp(buf + i, magic, sizeof(magic) - 1)) continue;
174 + ret = 2;
175 + break;
176 }
177 - if (!memcmp(magic_version_2_0, buf, length)) {
178 - return 2;
179 + return ret;
180 +}
181 +
182 +static uint32_t cloop_unpack(BDRVCloopState *s, int flag)
183 +{
184 + int ret;
185 + size_t src_pos;
186 + size_t dest_pos;
187 + uint64_t memlimit;
188 + uint32_t outlen = s->zstream.avail_out;
189 +
190 + switch (flag) {
191 + case CLOOP_COMPRESSOR_ZLIB:
192 + ret = inflateReset(&s->zstream);
193 + if (ret != Z_OK) {
194 + return 0;
195 + }
196 + ret = inflate(&s->zstream, Z_FINISH);
197 + if (ret != Z_STREAM_END || s->zstream.total_out != outlen) {
198 + return 0;
199 + }
200 + return outlen;
201 + case CLOOP_COMPRESSOR_NONE:
202 + memcpy(s->zstream.next_out, s->zstream.next_in, s->zstream.avail_in);
203 + return s->zstream.avail_in;
204 + case CLOOP_COMPRESSOR_XZ:
205 + src_pos = 0;
206 + dest_pos = 0;
207 + memlimit = 32*1024*1024;
208 + ret = lzma_stream_buffer_decode(&memlimit, 0, NULL, s->zstream.next_in, &src_pos,
209 + s->zstream.avail_in, s->zstream.next_out, &dest_pos, s->zstream.total_out);
210 +
211 + if(ret != LZMA_OK || s->zstream.avail_in != (int) src_pos) {
212 + return 0;
213 + }
214 + return dest_pos;
215 }
216 return 0;
217 }
218 @@ -60,7 +233,7 @@
219 Error **errp)
220 {
221 BDRVCloopState *s = bs->opaque;
222 - uint32_t offsets_size, max_compressed_block_size = 1, i;
223 + uint32_t offsets_size, max_compressed_block_size = 1;
224 int ret;
226 bs->read_only = 1;
227 @@ -91,79 +264,92 @@
228 MAX_BLOCK_SIZE / (1024 * 1024));
229 return -EINVAL;
230 }
231 -
232 ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
233 if (ret < 0) {
234 return ret;
235 }
236 s->n_blocks = be32_to_cpu(s->n_blocks);
238 - /* read offsets */
239 - if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
240 - /* Prevent integer overflow */
241 - error_setg(errp, "n_blocks %u must be %zu or less",
242 - s->n_blocks,
243 - (UINT32_MAX - 1) / sizeof(uint64_t));
244 - return -EINVAL;
245 - }
246 - offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
247 - if (offsets_size > 512 * 1024 * 1024) {
248 - /* Prevent ridiculous offsets_size which causes memory allocation to
249 - * fail or overflows bdrv_pread() size. In practice the 512 MB
250 - * offsets[] limit supports 16 TB images at 256 KB block size.
251 - */
252 - error_setg(errp, "image requires too many offsets, "
253 - "try increasing block size");
254 - return -EINVAL;
255 - }
256 - s->offsets = g_malloc(offsets_size);
257 + /* initialize zlib engine */
258 + max_compressed_block_size = s->block_size + s->block_size/1000 + 12 + 4;
259 + s->compressed_block = g_malloc(max_compressed_block_size + 1);
260 + s->uncompressed_block = g_malloc(s->block_size);
262 - ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
263 - if (ret < 0) {
264 + if (inflateInit(&s->zstream) != Z_OK) {
265 + ret = -EINVAL;
266 goto fail;
267 }
269 - for (i = 0; i < s->n_blocks + 1; i++) {
270 - uint64_t size;
271 + /* read offsets */
272 + if (s->n_blocks + 1 == 0) {
273 + cloop_tail tail;
274 + int64_t end = bdrv_getlength(bs->file);
275 + void *p;
276 + uint32_t toclen, len;
278 - s->offsets[i] = be64_to_cpu(s->offsets[i]);
279 - if (i == 0) {
280 - continue;
281 + ret = bdrv_pread(bs->file, end - sizeof(tail), &tail, sizeof(tail));
282 + if (ret < 0) {
283 + goto fail;
284 }
286 - if (s->offsets[i] < s->offsets[i - 1]) {
287 - error_setg(errp, "offsets not monotonically increasing at "
288 - "index %u, image file is corrupt", i);
289 - ret = -EINVAL;
290 - goto fail;
291 + s->n_blocks = be32_to_cpu(tail.num_blocks);
292 + offsets_size = s->n_blocks * sizeof(block_info);
293 + if (offsets_size > 512 * 1024 * 1024) {
294 + /* Prevent ridiculous offsets_size which causes memory allocation to
295 + * fail or overflows bdrv_pread() size. In practice the 512 MB
296 + * offsets[] limit supports 16 TB images at 256 KB block size.
297 + */
298 + error_setg(errp, "image requires too many offsets, "
299 + "try increasing block size");
300 + return -EINVAL;
301 }
302 + len = be32_to_cpu(tail.table_size);
303 + toclen = CLOOP3_INDEX_SIZE(be32_to_cpu(tail.index_size)) * s->n_blocks;
304 + s->global_flags = CLOOP3_BLOCKS_FLAGS(be32_to_cpu(tail.index_size));
306 - size = s->offsets[i] - s->offsets[i - 1];
307 + s->offsets = g_malloc(offsets_size);
308 + p = g_malloc(len);
310 - /* Compressed blocks should be smaller than the uncompressed block size
311 - * but maybe compression performed poorly so the compressed block is
312 - * actually bigger. Clamp down on unrealistic values to prevent
313 - * ridiculous s->compressed_block allocation.
314 - */
315 - if (size > 2 * MAX_BLOCK_SIZE) {
316 - error_setg(errp, "invalid compressed block size at index %u, "
317 - "image file is corrupt", i);
318 + ret = bdrv_pread(bs->file, end - sizeof(tail) - len, p, len);
319 + if (ret < 0) {
320 + goto fail;
321 + }
322 + s->zstream.next_in = p;
323 + s->zstream.avail_in = len;
324 + s->zstream.next_out = s->offsets;
325 + s->zstream.avail_out = toclen;
326 + if (cloop_unpack(s, s->global_flags) == 0) {
327 ret = -EINVAL;
328 goto fail;
329 }
330 + g_free(p);
331 + }
332 + else {
333 + offsets_size = s->n_blocks * sizeof(block_info);
334 + if (offsets_size > 512 * 1024 * 1024) {
335 + /* Prevent ridiculous offsets_size which causes memory allocation to
336 + * fail or overflows bdrv_pread() size. In practice the 512 MB
337 + * offsets[] limit supports 16 TB images at 256 KB block size.
338 + */
339 + error_setg(errp, "image requires too many offsets, "
340 + "try increasing block size");
341 + return -EINVAL;
342 + }
343 + s->offsets = g_malloc(offsets_size);
345 - if (size > max_compressed_block_size) {
346 - max_compressed_block_size = size;
347 + ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
348 + if (ret < 0) {
349 + goto fail;
350 }
351 }
352 -
353 - /* initialize zlib engine */
354 - s->compressed_block = g_malloc(max_compressed_block_size + 1);
355 - s->uncompressed_block = g_malloc(s->block_size);
356 - if (inflateInit(&s->zstream) != Z_OK) {
357 + ret = build_index(s->offsets, s->n_blocks, s->block_size, s->global_flags);
358 + if (ret) {
359 + error_setg(errp, "invalid compressed block size at index %u, "
360 + "image file is corrupt", ret-1);
361 ret = -EINVAL;
362 goto fail;
363 }
364 +
365 s->current_block = s->n_blocks;
367 s->sectors_per_block = s->block_size/512;
368 @@ -184,10 +370,10 @@
370 if (s->current_block != block_num) {
371 int ret;
372 - uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
373 + uint32_t bytes = s->offsets[block_num].size;
375 - ret = bdrv_pread(bs->file, s->offsets[block_num], s->compressed_block,
376 - bytes);
377 + ret = bdrv_pread(bs->file, s->offsets[block_num].offset,
378 + s->compressed_block, bytes);
379 if (ret != bytes) {
380 return -1;
381 }
382 @@ -196,12 +382,7 @@
383 s->zstream.avail_in = bytes;
384 s->zstream.next_out = s->uncompressed_block;
385 s->zstream.avail_out = s->block_size;
386 - ret = inflateReset(&s->zstream);
387 - if (ret != Z_OK) {
388 - return -1;
389 - }
390 - ret = inflate(&s->zstream, Z_FINISH);
391 - if (ret != Z_STREAM_END || s->zstream.total_out != s->block_size) {
392 + if (cloop_unpack(s, s->offsets[block_num].flags) == 0) {
393 return -1;
394 }
396 --- block/Makefile.objs
397 +++ block/Makefile.objs
398 @@ -35,5 +35,5 @@
399 gluster.o-libs := $(GLUSTERFS_LIBS)
400 ssh.o-cflags := $(LIBSSH2_CFLAGS)
401 ssh.o-libs := $(LIBSSH2_LIBS)
402 -qcow.o-libs := -lz
403 +qcow.o-libs := -lz -llzma
404 linux-aio.o-libs := -laio
405 --- block/cloop.c
406 +++ block/cloop.c
407 @@ -48,7 +48,6 @@
408 } cloop_tail;
410 #define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
411 -#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4)
413 typedef struct block_info {
414 uint64_t offset; /* 64-bit offsets of compressed block */
415 @@ -57,7 +56,7 @@
416 } block_info;
418 static inline int build_index(struct block_info *offsets, unsigned long n,
419 - unsigned long block_size, unsigned global_flags)
420 + unsigned long block_size)
421 {
422 uint32_t *ofs32 = (uint32_t *) offsets;
423 loff_t *ofs64 = (loff_t *) offsets;
424 @@ -118,42 +117,44 @@
425 offsets[n].flags = 0;
426 }
427 }
428 - else if (be32_to_cpu(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
429 - loff_t last = be32_to_cpu(ofs32[n]);
430 - while (n--) {
431 - offsets[n].size = last -
432 - (offsets[n].offset = be32_to_cpu(ofs32[n]));
433 - if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
434 - return n+1;
435 - last = offsets[n].offset;
436 - offsets[n].flags = 0;
437 - }
438 - }
439 - else { /* V3.0 */
440 + else { /* V3.0 or V0.68 */
441 unsigned long i;
442 loff_t j;
444 - v3_64 = (ofs32[1] == 0) ? 2 : 1;
445 + for (i = 0; i < n && be32_to_cpu(ofs32[i]) < be32_to_cpu(ofs32[i+1]); i++);
446 + if (i == n && be32_to_cpu(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
447 + loff_t last = be32_to_cpu(ofs32[n]);
448 + while (n--) {
449 + offsets[n].size = last -
450 + (offsets[n].offset = be32_to_cpu(ofs32[n]));
451 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
452 + return n+1;
453 + last = offsets[n].offset;
454 + offsets[n].flags = 0;
455 + }
456 + return 0;
457 + }
458 +
459 + v3_64 = (ofs32[1] == 0);
460 for (i = n; i-- > 0; ) {
461 - offsets[i].size = be32_to_cpu(ofs32[i*v3_64]);
462 - if ((offsets[i].size & 0x80000000) == 0 &&
463 - offsets[i].size > 2 * MAX_BLOCK_SIZE)
464 + offsets[i].size = be32_to_cpu(ofs32[i << v3_64]);
465 + if (offsets[i].size == 0xFFFFFFFF) {
466 + offsets[i].size = 0x10000000 | block_size;
467 + }
468 + offsets[i].flags = (offsets[i].size >> 28);
469 + offsets[i].size &= 0x0FFFFFFF;
470 + if (offsets[i].size > 2 * MAX_BLOCK_SIZE)
471 return i+1;
472 }
473 for (i = 0, j = 128 + 4 + 4; i < n; i++) {
474 offsets[i].offset = j;
475 - offsets[i].flags = global_flags;
476 - if (offsets[i].size == 0xFFFFFFFF) {
477 - offsets[i].flags = CLOOP_COMPRESSOR_NONE;
478 - offsets[i].size = block_size;
479 - }
480 - if ((offsets[i].size & 0x80000000) == 0) {
481 + if (offsets[i].flags < 8) {
482 j += offsets[i].size;
483 }
484 }
485 for (i = 0; i < n; i++) {
486 - if (offsets[i].size & 0x80000000) {
487 - offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
488 + if (offsets[i].flags >= 8) {
489 + offsets[i] = offsets[offsets[i].size];
490 }
491 }
492 }
493 @@ -170,7 +171,6 @@
494 uint8_t *compressed_block;
495 uint8_t *uncompressed_block;
496 z_stream zstream;
497 - int global_flags;
498 } BDRVCloopState;
500 static int cloop_probe(const uint8_t *buf, int buf_size, const char *filename)
501 @@ -305,7 +305,6 @@
502 }
503 len = be32_to_cpu(tail.table_size);
504 toclen = CLOOP3_INDEX_SIZE(be32_to_cpu(tail.index_size)) * s->n_blocks;
505 - s->global_flags = CLOOP3_BLOCKS_FLAGS(be32_to_cpu(tail.index_size));
507 s->offsets = g_malloc(offsets_size);
508 p = g_malloc(len);
509 @@ -316,9 +315,9 @@
510 }
511 s->zstream.next_in = p;
512 s->zstream.avail_in = len;
513 - s->zstream.next_out = s->offsets;
514 + s->zstream.next_out = (void *) s->offsets;
515 s->zstream.avail_out = toclen;
516 - if (cloop_unpack(s, s->global_flags) == 0) {
517 + if (cloop_unpack(s, CLOOP_COMPRESSOR_ZLIB) == 0) {
518 ret = -EINVAL;
519 goto fail;
520 }
521 @@ -342,7 +341,7 @@
522 goto fail;
523 }
524 }
525 - ret = build_index(s->offsets, s->n_blocks, s->block_size, s->global_flags);
526 + ret = build_index(s->offsets, s->n_blocks, s->block_size);
527 if (ret) {
528 error_setg(errp, "invalid compressed block size at index %u, "
529 "image file is corrupt", ret-1);