wok view qemu/stuff/cloop.u @ rev 23680

qemu: partial cloop v4 support
author Pascal Bellard <pascal.bellard@slitaz.org>
date Sun Apr 26 15:17:58 2020 +0000 (2020-04-26)
parents 74ce1799eee7
children f29529667ea4
line source
1 --- source/qemu-2.0.2/block/cloop.c 2014-08-18 18:03:24.000000000 +0200
2 +++ cloop.c 2020-04-26 16:58:23.306636715 +0200
3 @@ -25,33 +25,206 @@
4 #include "block/block_int.h"
5 #include "qemu/module.h"
6 #include <zlib.h>
7 +#include <lzma.h>
8 +
9 +#define CLOOP_COMPRESSOR_ZLIB 0x0
10 +#define CLOOP_COMPRESSOR_NONE 0x1
11 +#define CLOOP_COMPRESSOR_XZ 0x2
12 +#define CLOOP_COMPRESSOR_LZ4 0x3
13 +#define CLOOP_COMPRESSOR_LZO 0x4
14 +#define CLOOP_COMPRESSOR_ZSTD 0x5
15 +#define CLOOP_COMPRESSOR_LINK 0xF
16 +
17 +#define CLOOP_BLOCK_FLAGS(x) ((unsigned int)(((x) & 0xf000000000000000LLU) >> 60))
18 +#define CLOOP_BLOCK_OFFSET(x) ((x) & 0x0fffffffffffffffLLU)
20 /* Maximum compressed block size */
21 #define MAX_BLOCK_SIZE (64 * 1024 * 1024)
23 +typedef struct cloop_tail {
24 + uint32_t table_size;
25 + uint32_t index_size;
26 + uint32_t num_blocks;
27 +} cloop_tail;
28 +
29 +#define CLOOP3_INDEX_SIZE(x) ((unsigned int)((x) & 0xF))
30 +#define CLOOP3_BLOCKS_FLAGS(x) ((unsigned int)((x) & 0x70) >> 4)
31 +
32 +typedef struct block_info {
33 + uint64_t offset; /* 64-bit offsets of compressed block */
34 + uint32_t size; /* 32-bit compressed block size */
35 + uint32_t flags; /* 32-bit compression flags */
36 +} block_info;
37 +
38 +static inline int build_index(struct block_info *offsets, unsigned long n,
39 + unsigned long block_size, unsigned global_flags)
40 +{
41 + uint32_t *ofs32 = (uint32_t *) offsets;
42 + loff_t *ofs64 = (loff_t *) offsets;
43 +
44 + /* v3 64bits bug: v1 assumed */
45 + unsigned long v3_64;
46 + loff_t prev;
47 +
48 + if (ofs32[0] != 0 && ofs32[1] == 0) {
49 + for (v3_64=(n+1)/2, prev=le64_to_cpu(ofs64[v3_64]);
50 + v3_64 > 0 && le64_to_cpu(ofs64[--v3_64]) < prev;
51 + prev=le64_to_cpu(ofs64[v3_64]));
52 + }
53 +
54 + if (ofs32[0] == 0) {
55 + if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
56 + while (n--) {
57 + offsets[n].offset = be64_to_cpu(offsets[n].offset);
58 + offsets[n].size = ntohl(offsets[n].size);
59 + offsets[n].flags = 0;
60 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
61 + return n+1;
62 + }
63 + }
64 + else { /* V2.0/V4.0 */
65 + loff_t last = CLOOP_BLOCK_OFFSET(be64_to_cpu(ofs64[n]));
66 + uint32_t flags;
67 + unsigned long i = n;
68 +
69 + for (flags = 0; n-- ;) {
70 + loff_t data = be64_to_cpu(ofs64[n]);
71 +
72 + offsets[n].size = last -
73 + (offsets[n].offset = CLOOP_BLOCK_OFFSET(data));
74 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
75 + return n+1;
76 + last = offsets[n].offset;
77 + offsets[n].flags = CLOOP_BLOCK_FLAGS(data);
78 + flags |= 1 << offsets[n].flags;
79 + }
80 + if (flags > 1) {
81 + while (i--) {
82 + if (offsets[i].flags == CLOOP_COMPRESSOR_LINK) {
83 + offsets[i] = offsets[offsets[i].offset];
84 + }
85 + }
86 + }
87 + }
88 + }
89 + else if (ofs32[1] == 0 && v3_64 == 0) { /* V1.0 */
90 + loff_t last = le64_to_cpu(ofs64[n]);
91 + while (n--) {
92 + offsets[n].size = last -
93 + (offsets[n].offset = le64_to_cpu(ofs64[n]));
94 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
95 + return n+1;
96 + last = offsets[n].offset;
97 + offsets[n].flags = 0;
98 + }
99 + }
100 + else if (ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
101 + loff_t last = ntohl(ofs32[n]);
102 + while (n--) {
103 + offsets[n].size = last -
104 + (offsets[n].offset = ntohl(ofs32[n]));
105 + if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
106 + return n+1;
107 + last = offsets[n].offset;
108 + offsets[n].flags = 0;
109 + }
110 + }
111 + else { /* V3.0 */
112 + unsigned long i;
113 + loff_t j;
114 +
115 + v3_64 = (ofs32[1] == 0) ? 2 : 1;
116 + for (i = n; i-- > 0; ) {
117 + offsets[i].size = ntohl(ofs32[i*v3_64]);
118 + if ((offsets[i].size & 0x80000000) == 0 &&
119 + offsets[i].size > 2 * MAX_BLOCK_SIZE)
120 + return i+1;
121 + }
122 + for (i = 0, j = 128 + 4 + 4; i < n; i++) {
123 + offsets[i].offset = j;
124 + offsets[i].flags = global_flags;
125 + if (offsets[i].size == 0xFFFFFFFF) {
126 + offsets[i].flags = CLOOP_COMPRESSOR_NONE;
127 + offsets[i].size = block_size;
128 + }
129 + if ((offsets[i].size & 0x80000000) == 0) {
130 + j += offsets[i].size;
131 + }
132 + }
133 + for (i = 0; i < n; i++) {
134 + if (offsets[i].size & 0x80000000) {
135 + offsets[i] = offsets[offsets[i].size & 0x7FFFFFFF];
136 + }
137 + }
138 + }
139 + return 0;
140 +}
141 +
142 typedef struct BDRVCloopState {
143 CoMutex lock;
144 uint32_t block_size;
145 uint32_t n_blocks;
146 - uint64_t *offsets;
147 + block_info *offsets;
148 uint32_t sectors_per_block;
149 uint32_t current_block;
150 uint8_t *compressed_block;
151 uint8_t *uncompressed_block;
152 z_stream zstream;
153 + int global_flags;
154 } BDRVCloopState;
156 static int cloop_probe(const uint8_t *buf, int buf_size, const char *filename)
157 {
158 - const char *magic_version_2_0 = "#!/bin/sh\n"
159 - "#V2.0 Format\n"
160 + static const uint8_t magic[] =
161 "modprobe cloop file=$0 && mount -r -t iso9660 /dev/cloop $1\n";
162 - int length = strlen(magic_version_2_0);
163 - if (length > buf_size) {
164 - length = buf_size;
165 + int i, ret = 0, length = buf_size;
166 + uint8_t c;
167 +
168 + if (length > 127) {
169 + length = 127;
170 + }
171 + for (i = 0; i < length - sizeof(magic) + 1; i++) {
172 + if (buf[i] != magic[0]) continue;
173 + if (strncmp(buf + i, magic, sizeof(magic) - 1)) continue;
174 + ret = 2;
175 + break;
176 }
177 - if (!memcmp(magic_version_2_0, buf, length)) {
178 - return 2;
179 + return ret;
180 +}
181 +
182 +static uint32_t cloop_upack(BDRVCloopState *s, int flag)
183 +{
184 + int ret;
185 + size_t src_pos;
186 + size_t dest_pos;
187 + uint64_t memlimit;
188 + uint32_t outlen = s->zstream.total_out;
189 +
190 + switch (flag) {
191 + case CLOOP_COMPRESSOR_ZLIB:
192 + ret = inflateReset(&s->zstream);
193 + if (ret != Z_OK) {
194 + return 0;
195 + }
196 + ret = inflate(&s->zstream, Z_FINISH);
197 + if (ret != Z_STREAM_END || s->zstream.total_out != outlen) {
198 + return 0;
199 + }
200 + return outlen;
201 + case CLOOP_COMPRESSOR_NONE:
202 + memcpy(s->zstream.next_out, s->zstream.next_in, s->zstream.avail_in);
203 + return s->zstream.avail_in;
204 + case CLOOP_COMPRESSOR_XZ:
205 + src_pos = 0;
206 + dest_pos = 0;
207 + memlimit = 32*1024*1024;
208 + ret = lzma_stream_buffer_decode(&memlimit, 0, NULL, s->zstream.next_in, &src_pos,
209 + s->zstream.avail_in, s->zstream.next_out, &dest_pos, s->zstream.total_out);
210 +
211 + if(ret != LZMA_OK || s->zstream.avail_in != (int) src_pos) {
212 + return 0;
213 + }
214 + return dest_pos;
215 }
216 return 0;
217 }
218 @@ -91,79 +264,92 @@
219 MAX_BLOCK_SIZE / (1024 * 1024));
220 return -EINVAL;
221 }
222 -
223 ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
224 if (ret < 0) {
225 return ret;
226 }
227 s->n_blocks = be32_to_cpu(s->n_blocks);
229 - /* read offsets */
230 - if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
231 - /* Prevent integer overflow */
232 - error_setg(errp, "n_blocks %u must be %zu or less",
233 - s->n_blocks,
234 - (UINT32_MAX - 1) / sizeof(uint64_t));
235 - return -EINVAL;
236 - }
237 - offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
238 - if (offsets_size > 512 * 1024 * 1024) {
239 - /* Prevent ridiculous offsets_size which causes memory allocation to
240 - * fail or overflows bdrv_pread() size. In practice the 512 MB
241 - * offsets[] limit supports 16 TB images at 256 KB block size.
242 - */
243 - error_setg(errp, "image requires too many offsets, "
244 - "try increasing block size");
245 - return -EINVAL;
246 - }
247 - s->offsets = g_malloc(offsets_size);
248 + /* initialize zlib engine */
249 + max_compressed_block_size = s->block_size + s->block_size/1000 + 12 + 4;
250 + s->compressed_block = g_malloc(max_compressed_block_size + 1);
251 + s->uncompressed_block = g_malloc(s->block_size);
253 - ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
254 - if (ret < 0) {
255 + if (inflateInit(&s->zstream) != Z_OK) {
256 + ret = -EINVAL;
257 goto fail;
258 }
260 - for (i = 0; i < s->n_blocks + 1; i++) {
261 - uint64_t size;
262 + /* read offsets */
263 + if (s->n_blocks + 1 == 0) {
264 + cloop_tail tail;
265 + int64_t end = bdrv_getlength(bs->file);
266 + void *p;
267 + uint32_t toclen, len;
269 - s->offsets[i] = be64_to_cpu(s->offsets[i]);
270 - if (i == 0) {
271 - continue;
272 + ret = bdrv_pread(bs->file, end - sizeof(tail), &tail, sizeof(tail));
273 + if (ret < 0) {
274 + goto fail;
275 }
277 - if (s->offsets[i] < s->offsets[i - 1]) {
278 - error_setg(errp, "offsets not monotonically increasing at "
279 - "index %u, image file is corrupt", i);
280 - ret = -EINVAL;
281 - goto fail;
282 + s->n_blocks = be32_to_cpu(tail.num_blocks);
283 + offsets_size = s->n_blocks * sizeof(block_info);
284 + if (offsets_size > 512 * 1024 * 1024) {
285 + /* Prevent ridiculous offsets_size which causes memory allocation to
286 + * fail or overflows bdrv_pread() size. In practice the 512 MB
287 + * offsets[] limit supports 16 TB images at 256 KB block size.
288 + */
289 + error_setg(errp, "image requires too many offsets, "
290 + "try increasing block size");
291 + return -EINVAL;
292 }
293 + len = be32_to_cpu(tail.table_size);
294 + toclen = CLOOP3_INDEX_SIZE(be32_to_cpu(tail.index_size)) * s->n_blocks;
295 + s->global_flags = CLOOP3_BLOCKS_FLAGS(be32_to_cpu(tail.index_size));
297 - size = s->offsets[i] - s->offsets[i - 1];
298 + s->offsets = g_malloc(offsets_size);
299 + p = g_malloc(len);
301 - /* Compressed blocks should be smaller than the uncompressed block size
302 - * but maybe compression performed poorly so the compressed block is
303 - * actually bigger. Clamp down on unrealistic values to prevent
304 - * ridiculous s->compressed_block allocation.
305 - */
306 - if (size > 2 * MAX_BLOCK_SIZE) {
307 - error_setg(errp, "invalid compressed block size at index %u, "
308 - "image file is corrupt", i);
309 + ret = bdrv_pread(bs->file, end - sizeof(tail) - len, p, len);
310 + if (ret < 0) {
311 + goto fail;
312 + }
313 + s->zstream.next_in = p;
314 + s->zstream.avail_in = len;
315 + s->zstream.next_out = s->offsets;
316 + s->zstream.avail_out = toclen;
317 + if (cloop_unpack(s, s->global_flags) == 0) {
318 ret = -EINVAL;
319 goto fail;
320 }
321 + g_free(p);
322 + }
323 + else {
324 + offsets_size = s->n_blocks * sizeof(block_info);
325 + if (offsets_size > 512 * 1024 * 1024) {
326 + /* Prevent ridiculous offsets_size which causes memory allocation to
327 + * fail or overflows bdrv_pread() size. In practice the 512 MB
328 + * offsets[] limit supports 16 TB images at 256 KB block size.
329 + */
330 + error_setg(errp, "image requires too many offsets, "
331 + "try increasing block size");
332 + return -EINVAL;
333 + }
334 + s->offsets = g_malloc(offsets_size);
336 - if (size > max_compressed_block_size) {
337 - max_compressed_block_size = size;
338 + ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
339 + if (ret < 0) {
340 + goto fail;
341 }
342 }
343 -
344 - /* initialize zlib engine */
345 - s->compressed_block = g_malloc(max_compressed_block_size + 1);
346 - s->uncompressed_block = g_malloc(s->block_size);
347 - if (inflateInit(&s->zstream) != Z_OK) {
348 + ret = build_index(s->offsets, s->n_blocks, s->block_size, s->global_flags);
349 + if (ret) {
350 + error_setg(errp, "invalid compressed block size at index %u, "
351 + "image file is corrupt", ret-1);
352 ret = -EINVAL;
353 goto fail;
354 }
355 +
356 s->current_block = s->n_blocks;
358 s->sectors_per_block = s->block_size/512;
359 @@ -184,10 +370,10 @@
361 if (s->current_block != block_num) {
362 int ret;
363 - uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
364 + uint32_t bytes = s->offsets[block_num].size;
366 - ret = bdrv_pread(bs->file, s->offsets[block_num], s->compressed_block,
367 - bytes);
368 + ret = bdrv_pread(bs->file, s->offsets[block_num].offset,
369 + s->compressed_block, bytes);
370 if (ret != bytes) {
371 return -1;
372 }
373 @@ -196,12 +382,7 @@
374 s->zstream.avail_in = bytes;
375 s->zstream.next_out = s->uncompressed_block;
376 s->zstream.avail_out = s->block_size;
377 - ret = inflateReset(&s->zstream);
378 - if (ret != Z_OK) {
379 - return -1;
380 - }
381 - ret = inflate(&s->zstream, Z_FINISH);
382 - if (ret != Z_STREAM_END || s->zstream.total_out != s->block_size) {
383 + if (cloop_unpack(s, s->offsets[block_num].flags) == 0) {
384 return -1;
385 }
387 --- source/qemu-2.0.2/block/Makefile.objs 2014-08-18 18:03:24.000000000 +0200
388 +++ Makefile.objs 2020-04-26 16:32:51.010232389 +0200
389 @@ -35,5 +35,5 @@
390 gluster.o-libs := $(GLUSTERFS_LIBS)
391 ssh.o-cflags := $(LIBSSH2_CFLAGS)
392 ssh.o-libs := $(LIBSSH2_LIBS)
393 -qcow.o-libs := -lz
394 +qcow.o-libs := -lz -llzma
395 linux-aio.o-libs := -laio