wok-next rev 20146
Up qemu-light (2.0.2)
author | Pascal Bellard <pascal.bellard@slitaz.org> |
---|---|
date | Wed Nov 01 11:50:08 2017 +0100 (2017-11-01) |
parents | 4673755e4f14 |
children | 2b3f4f48a34b |
files | postgresql/receipt qemu-light/receipt qemu-light/stuff/cloop.u |
line diff
1.1 --- a/postgresql/receipt Wed Nov 01 11:33:53 2017 +0100 1.2 +++ b/postgresql/receipt Wed Nov 01 11:50:08 2017 +0100 1.3 @@ -13,7 +13,7 @@ 1.4 WGET_URL="http://ftp.postgresql.org/pub/source/v$VERSION/$TARBALL" 1.5 1.6 DEPENDS="postgresql-client slitaz-base-files" 1.7 -BUILD_DEPENDS="readline-dev zlib-dev perl python-dev" 1.8 +BUILD_DEPENDS="readline-dev zlib-dev perl-dev python-dev" 1.9 SPLIT="libpostgresqlclient postgresql-client postgresql-dev postgresql-doc" 1.10 1.11 # Rules to configure and make the package.
2.1 --- a/qemu-light/receipt Wed Nov 01 11:33:53 2017 +0100 2.2 +++ b/qemu-light/receipt Wed Nov 01 11:50:08 2017 +0100 2.3 @@ -1,7 +1,7 @@ 2.4 # SliTaz package receipt. 2.5 2.6 PACKAGE="qemu-light" 2.7 -VERSION="1.2.0" 2.8 +VERSION="2.0.2" 2.9 CATEGORY="misc" 2.10 SHORT_DESC="Light Qemu i386-softmmu target (without xen, vde, bluez, blobs, tls)." 2.11 MAINTAINER="pankso@slitaz.org" 2.12 @@ -14,16 +14,27 @@ 2.13 CONFLICT="qemu" 2.14 2.15 DEPENDS="alsa-lib libsdl util-linux-uuid" 2.16 -BUILD_DEPENDS="perl alsa-lib-dev libsdl-dev util-linux-uuid-dev python \ 2.17 -glib-dev zlib-dev" 2.18 +BUILD_DEPENDS="gettext perl alsa-lib-dev libsdl-dev util-linux-uuid-dev python \ 2.19 +glib-dev zlib-dev autoconf automake libtool bison flex" 2.20 2.21 # Rules to configure and make the package. 2.22 compile_rules() 2.23 { 2.24 - cd $src 2.25 + mkdir -p $DESTIDR/usr/share/qemu/ia32 2.26 + 2.27 + [ -s $SOURCES_REPOSITORY/$UEFIZIP ] || wget -P $SOURCES_REPOSITORY \ 2.28 + http://netassist.dl.sourceforge.net/project/edk2/OVMF/$UEFIZIP 2.29 + unzip $SOURCES_REPOSITORY/$UEFIZIP OVMF.fd -d $DESTIDR/usr/share/qemu/ia32 2.30 + 2.31 + patch -p0 < $stuff/cloop.u 2.32 + 2.33 TARGET="i386-softmmu" 2.34 + 2.35 + export LDFLAGS="$LDFLAGS -Wl,--copy-dt-needed-entries" 2.36 + #--cross-prefix= --host-cc= 2.37 ./configure \ 2.38 --prefix=/usr \ 2.39 + --sysconfdir=/etc \ 2.40 --disable-xen \ 2.41 --disable-vnc-sasl \ 2.42 --disable-vnc-tls \
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/qemu-light/stuff/cloop.u Wed Nov 01 11:50:08 2017 +0100 3.3 @@ -0,0 +1,284 @@ 3.4 +--- block/cloop.c 3.5 ++++ block/cloop.c 3.6 +@@ -29,11 +29,90 @@ 3.7 + /* Maximum compressed block size */ 3.8 + #define MAX_BLOCK_SIZE (64 * 1024 * 1024) 3.9 + 3.10 ++typedef struct cloop_tail { 3.11 ++ uint32_t table_size; 3.12 ++ uint32_t index_size; 3.13 ++ uint32_t num_blocks; 3.14 ++} cloop_tail; 3.15 ++ 3.16 ++typedef struct block_info { 3.17 ++ uint64_t offset; /* 64-bit offsets of compressed block */ 3.18 ++ uint32_t size; /* 32-bit compressed block size */ 3.19 ++ uint32_t optidx; /* 32-bit index number */ 3.20 ++} block_info; 3.21 ++ 3.22 ++static inline int build_index(block_info *offsets, unsigned long n) 3.23 ++{ 3.24 ++ uint32_t *ofs32 = (uint32_t *) offsets; 3.25 ++ uint64_t *ofs64 = (uint64_t *) offsets; 3.26 ++ 3.27 ++ if (ofs32[0] == 0) { 3.28 ++ if (ofs32[2]) { /* ACCELERATED KNOPPIX V1.0 */ 3.29 ++ while (n--) { 3.30 ++ offsets[n].offset = be64_to_cpu(offsets[n].offset); 3.31 ++ offsets[n].size = ntohl(offsets[n].size); 3.32 ++ if (offsets[n].size > 2 * MAX_BLOCK_SIZE) 3.33 ++ return n+1; 3.34 ++ } 3.35 ++ } 3.36 ++ else { /* V2.0 */ 3.37 ++ uint64_t last = be64_to_cpu(ofs64[n - 1]); 3.38 ++ while (n--) { 3.39 ++ offsets[n].size = last - 3.40 ++ (offsets[n].offset = be64_to_cpu(ofs64[n])); 3.41 ++ if (offsets[n].size > 2 * MAX_BLOCK_SIZE) 3.42 ++ return n+1; 3.43 ++ last = offsets[n].offset; 3.44 ++ } 3.45 ++ } 3.46 ++ } 3.47 ++ else if (ofs32[1] == 0) { /* V1.0 */ 3.48 ++ uint64_t last = le64_to_cpu(ofs64[n - 1]); 3.49 ++ while (n--) { 3.50 ++ offsets[n].size = last - 3.51 ++ (offsets[n].offset = le64_to_cpu(ofs64[n])); 3.52 ++ if (offsets[n].size > 2 * MAX_BLOCK_SIZE) 3.53 ++ return n+1; 3.54 ++ last = offsets[n].offset; 3.55 ++ } 3.56 ++ } 3.57 ++ else if (ntohl(ofs32[0]) == (4*n) + 0x8C) { /* V0.68 */ 3.58 ++ uint64_t last = ntohl(ofs32[n - 1]); 3.59 ++ while (n--) { 3.60 ++ offsets[n].size = last - 3.61 ++ (offsets[n].offset = ntohl(ofs32[n])); 3.62 ++ if (offsets[n].size > 2 * MAX_BLOCK_SIZE) 3.63 ++ return n+1; 3.64 ++ last = offsets[n].offset; 3.65 ++ } 3.66 ++ } 3.67 ++ else { /* V3.0 */ 3.68 ++ unsigned long i; 3.69 ++ uint64_t j; 3.70 ++ 3.71 ++ for (i = n; i-- > 0; ) { 3.72 ++ offsets[i].size = ntohl(ofs32[i]); 3.73 ++ if (offsets[i].size > 2 * MAX_BLOCK_SIZE) 3.74 ++ return i+1; 3.75 ++ } 3.76 ++ for (i = 0, j = 128 + 4 + 4; i < n; i++) { 3.77 ++ offsets[i].offset = j; 3.78 ++ if (offsets[i].size & 0x80000000) { 3.79 ++ unsigned long k = offsets[i].size & 0x7FFFFFFF; 3.80 ++ offsets[i].offset = offsets[k].offset; 3.81 ++ offsets[i].size = offsets[k].size; 3.82 ++ } 3.83 ++ else j += offsets[i].size; 3.84 ++ } 3.85 ++ } 3.86 ++ return 0; 3.87 ++} 3.88 ++ 3.89 + typedef struct BDRVCloopState { 3.90 + CoMutex lock; 3.91 + uint32_t block_size; 3.92 + uint32_t n_blocks; 3.93 +- uint64_t *offsets; 3.94 ++ block_info *offsets; 3.95 + uint32_t sectors_per_block; 3.96 + uint32_t current_block; 3.97 + uint8_t *compressed_block; 3.98 +@@ -43,17 +117,21 @@ 3.99 + 3.100 + static int cloop_probe(const uint8_t *buf, int buf_size, const char *filename) 3.101 + { 3.102 +- const char *magic_version_2_0 = "#!/bin/sh\n" 3.103 +- "#V2.0 Format\n" 3.104 ++ static const uint8_t magic[] = 3.105 + "modprobe cloop file=$0 && mount -r -t iso9660 /dev/cloop $1\n"; 3.106 +- int length = strlen(magic_version_2_0); 3.107 +- if (length > buf_size) { 3.108 +- length = buf_size; 3.109 ++ int i, ret = 0, length = buf_size; 3.110 ++ uint8_t c; 3.111 ++ 3.112 ++ if (length > 127) { 3.113 ++ length = 127; 3.114 + } 3.115 +- if (!memcmp(magic_version_2_0, buf, length)) { 3.116 +- return 2; 3.117 ++ for (i = 0; i < length - sizeof(magic) + 1; i++) { 3.118 ++ if (buf[i] != magic[0]) continue; 3.119 ++ if (strncmp(buf + i, magic, sizeof(magic) - 1)) continue; 3.120 ++ ret = 2; 3.121 ++ break; 3.122 + } 3.123 +- return 0; 3.124 ++ return ret; 3.125 + } 3.126 + 3.127 + static int cloop_open(BlockDriverState *bs, QDict *options, int flags, 3.128 +@@ -91,79 +169,97 @@ 3.129 + MAX_BLOCK_SIZE / (1024 * 1024)); 3.130 + return -EINVAL; 3.131 + } 3.132 +- 3.133 + ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4); 3.134 + if (ret < 0) { 3.135 + return ret; 3.136 + } 3.137 + s->n_blocks = be32_to_cpu(s->n_blocks); 3.138 + 3.139 +- /* read offsets */ 3.140 +- if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) { 3.141 +- /* Prevent integer overflow */ 3.142 +- error_setg(errp, "n_blocks %u must be %zu or less", 3.143 +- s->n_blocks, 3.144 +- (UINT32_MAX - 1) / sizeof(uint64_t)); 3.145 +- return -EINVAL; 3.146 +- } 3.147 +- offsets_size = (s->n_blocks + 1) * sizeof(uint64_t); 3.148 +- if (offsets_size > 512 * 1024 * 1024) { 3.149 +- /* Prevent ridiculous offsets_size which causes memory allocation to 3.150 +- * fail or overflows bdrv_pread() size. In practice the 512 MB 3.151 +- * offsets[] limit supports 16 TB images at 256 KB block size. 3.152 +- */ 3.153 +- error_setg(errp, "image requires too many offsets, " 3.154 +- "try increasing block size"); 3.155 +- return -EINVAL; 3.156 +- } 3.157 +- s->offsets = g_malloc(offsets_size); 3.158 ++ /* initialize zlib engine */ 3.159 ++ max_compressed_block_size = s->block_size + s->block_size/1000 + 12 + 4; 3.160 ++ s->compressed_block = g_malloc(max_compressed_block_size + 1); 3.161 ++ s->uncompressed_block = g_malloc(s->block_size); 3.162 + 3.163 +- ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size); 3.164 +- if (ret < 0) { 3.165 ++ if (inflateInit(&s->zstream) != Z_OK) { 3.166 ++ ret = -EINVAL; 3.167 + goto fail; 3.168 + } 3.169 + 3.170 +- for (i = 0; i < s->n_blocks + 1; i++) { 3.171 +- uint64_t size; 3.172 ++ /* read offsets */ 3.173 ++ if (s->n_blocks + 1 == 0) { 3.174 ++ cloop_tail tail; 3.175 ++ int64_t end = bdrv_getlength(bs->file); 3.176 ++ void *p; 3.177 ++ uint32_t toclen, len; 3.178 + 3.179 +- s->offsets[i] = be64_to_cpu(s->offsets[i]); 3.180 +- if (i == 0) { 3.181 +- continue; 3.182 ++ ret = bdrv_pread(bs->file, end - sizeof(tail), &tail, sizeof(tail)); 3.183 ++ if (ret < 0) { 3.184 ++ goto fail; 3.185 + } 3.186 + 3.187 +- if (s->offsets[i] < s->offsets[i - 1]) { 3.188 +- error_setg(errp, "offsets not monotonically increasing at " 3.189 +- "index %u, image file is corrupt", i); 3.190 +- ret = -EINVAL; 3.191 +- goto fail; 3.192 ++ s->n_blocks = be32_to_cpu(tail.num_blocks); 3.193 ++ offsets_size = s->n_blocks * sizeof(block_info); 3.194 ++ if (offsets_size > 512 * 1024 * 1024) { 3.195 ++ /* Prevent ridiculous offsets_size which causes memory allocation to 3.196 ++ * fail or overflows bdrv_pread() size. In practice the 512 MB 3.197 ++ * offsets[] limit supports 16 TB images at 256 KB block size. 3.198 ++ */ 3.199 ++ error_setg(errp, "image requires too many offsets, " 3.200 ++ "try increasing block size"); 3.201 ++ return -EINVAL; 3.202 + } 3.203 ++ len = be32_to_cpu(tail.table_size); 3.204 ++ toclen = (be32_to_cpu(tail.index_size) & 255) * s->n_blocks; 3.205 + 3.206 +- size = s->offsets[i] - s->offsets[i - 1]; 3.207 ++ s->offsets = g_malloc(offsets_size); 3.208 ++ p = g_malloc(len); 3.209 + 3.210 +- /* Compressed blocks should be smaller than the uncompressed block size 3.211 +- * but maybe compression performed poorly so the compressed block is 3.212 +- * actually bigger. Clamp down on unrealistic values to prevent 3.213 +- * ridiculous s->compressed_block allocation. 3.214 +- */ 3.215 +- if (size > 2 * MAX_BLOCK_SIZE) { 3.216 +- error_setg(errp, "invalid compressed block size at index %u, " 3.217 +- "image file is corrupt", i); 3.218 ++ ret = bdrv_pread(bs->file, end - sizeof(tail) - len, p, len); 3.219 ++ if (ret < 0) { 3.220 ++ goto fail; 3.221 ++ } 3.222 ++ s->zstream.next_in = p; 3.223 ++ s->zstream.avail_in = len; 3.224 ++ s->zstream.next_out = s->offsets; 3.225 ++ s->zstream.avail_out = toclen; 3.226 ++ ret = inflateReset(&s->zstream); 3.227 ++ if (ret != Z_OK) { 3.228 + ret = -EINVAL; 3.229 + goto fail; 3.230 + } 3.231 +- 3.232 +- if (size > max_compressed_block_size) { 3.233 +- max_compressed_block_size = size; 3.234 ++ ret = inflate(&s->zstream, Z_FINISH); 3.235 ++ if (ret != Z_STREAM_END || s->zstream.total_out != toclen) { 3.236 ++ ret = -EINVAL; 3.237 ++ goto fail; 3.238 + } 3.239 ++ g_free(p); 3.240 + } 3.241 ++ else { 3.242 ++ offsets_size = s->n_blocks * sizeof(block_info); 3.243 ++ if (offsets_size > 512 * 1024 * 1024) { 3.244 ++ /* Prevent ridiculous offsets_size which causes memory allocation to 3.245 ++ * fail or overflows bdrv_pread() size. In practice the 512 MB 3.246 ++ * offsets[] limit supports 16 TB images at 256 KB block size. 3.247 ++ */ 3.248 ++ error_setg(errp, "image requires too many offsets, " 3.249 ++ "try increasing block size"); 3.250 ++ return -EINVAL; 3.251 ++ } 3.252 ++ s->offsets = g_malloc(offsets_size); 3.253 + 3.254 +- /* initialize zlib engine */ 3.255 +- s->compressed_block = g_malloc(max_compressed_block_size + 1); 3.256 +- s->uncompressed_block = g_malloc(s->block_size); 3.257 +- if (inflateInit(&s->zstream) != Z_OK) { 3.258 ++ ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size); 3.259 ++ if (ret < 0) { 3.260 ++ goto fail; 3.261 ++ } 3.262 ++ } 3.263 ++ ret = build_index(s->offsets, s->n_blocks); 3.264 ++ if (ret) { 3.265 ++ error_setg(errp, "invalid compressed block size at index %u, " 3.266 ++ "image file is corrupt", ret-1); 3.267 + ret = -EINVAL; 3.268 + goto fail; 3.269 + } 3.270 ++ 3.271 + s->current_block = s->n_blocks; 3.272 + 3.273 + s->sectors_per_block = s->block_size/512; 3.274 +@@ -184,10 +280,10 @@ 3.275 + 3.276 + if (s->current_block != block_num) { 3.277 + int ret; 3.278 +- uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num]; 3.279 ++ uint32_t bytes = s->offsets[block_num].size; 3.280 + 3.281 +- ret = bdrv_pread(bs->file, s->offsets[block_num], s->compressed_block, 3.282 +- bytes); 3.283 ++ ret = bdrv_pread(bs->file, s->offsets[block_num].offset, 3.284 ++ s->compressed_block, bytes); 3.285 + if (ret != bytes) { 3.286 + return -1; 3.287 + }