wok-next diff qemu/stuff/patches/cloop.u @ rev 20661

Unification of the patch system
author Aleksej Bobylev <al.bobylev@gmail.com>
date Thu May 10 21:12:00 2018 +0300 (2018-05-10)
parents qemu-light/stuff/cloop.u@5b64ca8fb7e1
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/qemu/stuff/patches/cloop.u	Thu May 10 21:12:00 2018 +0300
     1.3 @@ -0,0 +1,284 @@
     1.4 +--- block/cloop.c
     1.5 ++++ block/cloop.c
     1.6 +@@ -29,11 +29,90 @@
     1.7 + /* Maximum compressed block size */
     1.8 + #define MAX_BLOCK_SIZE (64 * 1024 * 1024)
     1.9 + 
    1.10 ++typedef struct cloop_tail {
    1.11 ++	uint32_t table_size;
    1.12 ++	uint32_t index_size;
    1.13 ++	uint32_t num_blocks;
    1.14 ++} cloop_tail;
    1.15 ++
    1.16 ++typedef struct block_info {
    1.17 ++	uint64_t offset;	/* 64-bit offsets of compressed block */
    1.18 ++	uint32_t size;		/* 32-bit compressed block size */
    1.19 ++	uint32_t optidx;	/* 32-bit index number */
    1.20 ++} block_info;
    1.21 ++
    1.22 ++static inline int build_index(block_info *offsets, unsigned long n)
    1.23 ++{
    1.24 ++	uint32_t *ofs32 = (uint32_t *) offsets;
    1.25 ++	uint64_t *ofs64 = (uint64_t *) offsets;
    1.26 ++	
    1.27 ++	if (ofs32[0] == 0) {
    1.28 ++		if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */
    1.29 ++			while (n--) {
    1.30 ++				offsets[n].offset = be64_to_cpu(offsets[n].offset);
    1.31 ++				offsets[n].size = ntohl(offsets[n].size);
    1.32 ++        			if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
    1.33 ++        				return n+1;
    1.34 ++			}
    1.35 ++		}
    1.36 ++		else { /* V2.0 */
    1.37 ++			uint64_t last = be64_to_cpu(ofs64[n - 1]);
    1.38 ++			while (n--) {
    1.39 ++				offsets[n].size = last - 
    1.40 ++					(offsets[n].offset = be64_to_cpu(ofs64[n])); 
    1.41 ++        			if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
    1.42 ++        				return n+1;
    1.43 ++				last = offsets[n].offset;
    1.44 ++			}
    1.45 ++		}
    1.46 ++	}
    1.47 ++	else if (ofs32[1] == 0) { /* V1.0 */
    1.48 ++		uint64_t last = le64_to_cpu(ofs64[n - 1]);
    1.49 ++		while (n--) {
    1.50 ++			offsets[n].size = last - 
    1.51 ++				(offsets[n].offset = le64_to_cpu(ofs64[n])); 
    1.52 ++        		if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
    1.53 ++        			return n+1;
    1.54 ++			last = offsets[n].offset;
    1.55 ++		}
    1.56 ++	}
    1.57 ++	else if (ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */
    1.58 ++		uint64_t last = ntohl(ofs32[n - 1]);
    1.59 ++		while (n--) {
    1.60 ++			offsets[n].size = last - 
    1.61 ++				(offsets[n].offset = ntohl(ofs32[n])); 
    1.62 ++        		if (offsets[n].size > 2 * MAX_BLOCK_SIZE)
    1.63 ++        			return n+1;
    1.64 ++			last = offsets[n].offset;
    1.65 ++		}
    1.66 ++	}
    1.67 ++	else { /* V3.0 */
    1.68 ++		unsigned long i;
    1.69 ++		uint64_t j;
    1.70 ++		
    1.71 ++		for (i = n; i-- > 0; ) {
    1.72 ++			offsets[i].size = ntohl(ofs32[i]); 
    1.73 ++        		if (offsets[i].size > 2 * MAX_BLOCK_SIZE)
    1.74 ++        			return i+1;
    1.75 ++		}
    1.76 ++		for (i = 0, j = 128 + 4 + 4; i < n; i++) {
    1.77 ++			offsets[i].offset = j;
    1.78 ++			if (offsets[i].size & 0x80000000) {
    1.79 ++				unsigned long k = offsets[i].size & 0x7FFFFFFF;
    1.80 ++				offsets[i].offset = offsets[k].offset;
    1.81 ++				offsets[i].size = offsets[k].size;
    1.82 ++			}
    1.83 ++			else j += offsets[i].size;
    1.84 ++		}
    1.85 ++	}
    1.86 ++	return 0;
    1.87 ++}
    1.88 ++
    1.89 + typedef struct BDRVCloopState {
    1.90 +     CoMutex lock;
    1.91 +     uint32_t block_size;
    1.92 +     uint32_t n_blocks;
    1.93 +-    uint64_t *offsets;
    1.94 ++    block_info *offsets;
    1.95 +     uint32_t sectors_per_block;
    1.96 +     uint32_t current_block;
    1.97 +     uint8_t *compressed_block;
    1.98 +@@ -43,17 +117,21 @@
    1.99 + 
   1.100 + static int cloop_probe(const uint8_t *buf, int buf_size, const char *filename)
   1.101 + {
   1.102 +-    const char *magic_version_2_0 = "#!/bin/sh\n"
   1.103 +-        "#V2.0 Format\n"
   1.104 ++    static const uint8_t magic[] =
   1.105 +         "modprobe cloop file=$0 && mount -r -t iso9660 /dev/cloop $1\n";
   1.106 +-    int length = strlen(magic_version_2_0);
   1.107 +-    if (length > buf_size) {
   1.108 +-        length = buf_size;
   1.109 ++    int i, ret = 0, length = buf_size;
   1.110 ++    uint8_t c;
   1.111 ++
   1.112 ++    if (length > 127) {
   1.113 ++        length = 127;
   1.114 +     }
   1.115 +-    if (!memcmp(magic_version_2_0, buf, length)) {
   1.116 +-        return 2;
   1.117 ++    for (i = 0; i < length - sizeof(magic) + 1; i++) {
   1.118 ++	if (buf[i] != magic[0]) continue;
   1.119 ++	if (strncmp(buf + i, magic, sizeof(magic) - 1)) continue;
   1.120 ++	ret = 2;
   1.121 ++	break;
   1.122 +     }
   1.123 +-    return 0;
   1.124 ++    return ret;
   1.125 + }
   1.126 + 
   1.127 + static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
   1.128 +@@ -91,79 +169,97 @@
   1.129 +                    MAX_BLOCK_SIZE / (1024 * 1024));
   1.130 +         return -EINVAL;
   1.131 +     }
   1.132 +-
   1.133 +     ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
   1.134 +     if (ret < 0) {
   1.135 +         return ret;
   1.136 +     }
   1.137 +     s->n_blocks = be32_to_cpu(s->n_blocks);
   1.138 + 
   1.139 +-    /* read offsets */
   1.140 +-    if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
   1.141 +-        /* Prevent integer overflow */
   1.142 +-        error_setg(errp, "n_blocks %u must be %zu or less",
   1.143 +-                   s->n_blocks,
   1.144 +-                   (UINT32_MAX - 1) / sizeof(uint64_t));
   1.145 +-        return -EINVAL;
   1.146 +-    }
   1.147 +-    offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
   1.148 +-    if (offsets_size > 512 * 1024 * 1024) {
   1.149 +-        /* Prevent ridiculous offsets_size which causes memory allocation to
   1.150 +-         * fail or overflows bdrv_pread() size.  In practice the 512 MB
   1.151 +-         * offsets[] limit supports 16 TB images at 256 KB block size.
   1.152 +-         */
   1.153 +-        error_setg(errp, "image requires too many offsets, "
   1.154 +-                   "try increasing block size");
   1.155 +-        return -EINVAL;
   1.156 +-    }
   1.157 +-    s->offsets = g_malloc(offsets_size);
   1.158 ++    /* initialize zlib engine */
   1.159 ++    max_compressed_block_size =  s->block_size + s->block_size/1000 + 12 + 4;
   1.160 ++    s->compressed_block = g_malloc(max_compressed_block_size + 1);
   1.161 ++    s->uncompressed_block = g_malloc(s->block_size);
   1.162 + 
   1.163 +-    ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
   1.164 +-    if (ret < 0) {
   1.165 ++    if (inflateInit(&s->zstream) != Z_OK) {
   1.166 ++        ret = -EINVAL;
   1.167 +         goto fail;
   1.168 +     }
   1.169 + 
   1.170 +-    for (i = 0; i < s->n_blocks + 1; i++) {
   1.171 +-        uint64_t size;
   1.172 ++    /* read offsets */
   1.173 ++    if (s->n_blocks + 1 == 0) {
   1.174 ++        cloop_tail tail;
   1.175 ++        int64_t end = bdrv_getlength(bs->file);
   1.176 ++	void *p;
   1.177 ++	uint32_t toclen, len; 
   1.178 + 
   1.179 +-        s->offsets[i] = be64_to_cpu(s->offsets[i]);
   1.180 +-        if (i == 0) {
   1.181 +-            continue;
   1.182 ++        ret = bdrv_pread(bs->file, end - sizeof(tail), &tail, sizeof(tail));
   1.183 ++        if (ret < 0) {
   1.184 ++            goto fail;
   1.185 +         }
   1.186 + 
   1.187 +-        if (s->offsets[i] < s->offsets[i - 1]) {
   1.188 +-            error_setg(errp, "offsets not monotonically increasing at "
   1.189 +-                       "index %u, image file is corrupt", i);
   1.190 +-            ret = -EINVAL;
   1.191 +-            goto fail;
   1.192 ++        s->n_blocks = be32_to_cpu(tail.num_blocks);
   1.193 ++        offsets_size = s->n_blocks * sizeof(block_info);
   1.194 ++        if (offsets_size > 512 * 1024 * 1024) {
   1.195 ++            /* Prevent ridiculous offsets_size which causes memory allocation to
   1.196 ++             * fail or overflows bdrv_pread() size.  In practice the 512 MB
   1.197 ++             * offsets[] limit supports 16 TB images at 256 KB block size.
   1.198 ++             */
   1.199 ++            error_setg(errp, "image requires too many offsets, "
   1.200 ++                       "try increasing block size");
   1.201 ++            return -EINVAL;
   1.202 +         }
   1.203 ++	len = be32_to_cpu(tail.table_size);
   1.204 ++	toclen = (be32_to_cpu(tail.index_size) & 255) * s->n_blocks;
   1.205 + 
   1.206 +-        size = s->offsets[i] - s->offsets[i - 1];
   1.207 ++        s->offsets = g_malloc(offsets_size);
   1.208 ++	p = g_malloc(len);
   1.209 + 
   1.210 +-        /* Compressed blocks should be smaller than the uncompressed block size
   1.211 +-         * but maybe compression performed poorly so the compressed block is
   1.212 +-         * actually bigger.  Clamp down on unrealistic values to prevent
   1.213 +-         * ridiculous s->compressed_block allocation.
   1.214 +-         */
   1.215 +-        if (size > 2 * MAX_BLOCK_SIZE) {
   1.216 +-            error_setg(errp, "invalid compressed block size at index %u, "
   1.217 +-                       "image file is corrupt", i);
   1.218 ++        ret = bdrv_pread(bs->file, end - sizeof(tail) - len, p, len);
   1.219 ++        if (ret < 0) {
   1.220 ++            goto fail;
   1.221 ++        }
   1.222 ++        s->zstream.next_in = p;
   1.223 ++        s->zstream.avail_in = len;
   1.224 ++        s->zstream.next_out = s->offsets;
   1.225 ++        s->zstream.avail_out = toclen;
   1.226 ++        ret = inflateReset(&s->zstream);
   1.227 ++        if (ret != Z_OK) {
   1.228 +             ret = -EINVAL;
   1.229 +             goto fail;
   1.230 +         }
   1.231 +-
   1.232 +-        if (size > max_compressed_block_size) {
   1.233 +-            max_compressed_block_size = size;
   1.234 ++        ret = inflate(&s->zstream, Z_FINISH);
   1.235 ++        if (ret != Z_STREAM_END || s->zstream.total_out != toclen) {
   1.236 ++            ret = -EINVAL;
   1.237 ++            goto fail;
   1.238 +         }
   1.239 ++	g_free(p);
   1.240 +     }
   1.241 ++    else {
   1.242 ++        offsets_size = s->n_blocks * sizeof(block_info);
   1.243 ++        if (offsets_size > 512 * 1024 * 1024) {
   1.244 ++            /* Prevent ridiculous offsets_size which causes memory allocation to
   1.245 ++             * fail or overflows bdrv_pread() size.  In practice the 512 MB
   1.246 ++             * offsets[] limit supports 16 TB images at 256 KB block size.
   1.247 ++             */
   1.248 ++            error_setg(errp, "image requires too many offsets, "
   1.249 ++                       "try increasing block size");
   1.250 ++            return -EINVAL;
   1.251 ++        }
   1.252 ++        s->offsets = g_malloc(offsets_size);
   1.253 + 
   1.254 +-    /* initialize zlib engine */
   1.255 +-    s->compressed_block = g_malloc(max_compressed_block_size + 1);
   1.256 +-    s->uncompressed_block = g_malloc(s->block_size);
   1.257 +-    if (inflateInit(&s->zstream) != Z_OK) {
   1.258 ++        ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
   1.259 ++        if (ret < 0) {
   1.260 ++            goto fail;
   1.261 ++        }
   1.262 ++    }
   1.263 ++    ret = build_index(s->offsets, s->n_blocks);
   1.264 ++    if (ret) {
   1.265 ++        error_setg(errp, "invalid compressed block size at index %u, "
   1.266 ++                   "image file is corrupt", ret-1);
   1.267 +         ret = -EINVAL;
   1.268 +         goto fail;
   1.269 +     }
   1.270 ++
   1.271 +     s->current_block = s->n_blocks;
   1.272 + 
   1.273 +     s->sectors_per_block = s->block_size/512;
   1.274 +@@ -184,10 +280,10 @@
   1.275 + 
   1.276 +     if (s->current_block != block_num) {
   1.277 +         int ret;
   1.278 +-        uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
   1.279 ++        uint32_t bytes = s->offsets[block_num].size;
   1.280 + 
   1.281 +-        ret = bdrv_pread(bs->file, s->offsets[block_num], s->compressed_block,
   1.282 +-                         bytes);
   1.283 ++        ret = bdrv_pread(bs->file, s->offsets[block_num].offset,
   1.284 ++			 s->compressed_block, bytes);
   1.285 +         if (ret != bytes) {
   1.286 +             return -1;
   1.287 +         }