wok-6.x rev 25543
linux-cloop: update cloop.u
author | Pascal Bellard <pascal.bellard@slitaz.org> |
---|---|
date | Sat Mar 11 12:33:53 2023 +0000 (20 months ago) |
parents | e6abe3abd527 |
children | acebe5bceb4f |
files | linux-cloop/stuff/cloop.u linux64-cloop/stuff/cloop.u |
line diff
1.1 --- a/linux-cloop/stuff/cloop.u Sat Mar 11 09:15:29 2023 +0000 1.2 +++ b/linux-cloop/stuff/cloop.u Sat Mar 11 12:33:53 2023 +0000 1.3 @@ -327,26 +327,23 @@ 1.4 xz_dec_reset(clo->xzdecoderstate); 1.5 err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer); 1.6 if (err == XZ_STREAM_END || err == XZ_OK) 1.7 -@@ -309,16 +304,12 @@ 1.8 +@@ -309,9 +304,12 @@ 1.9 while (buf_done < buf_len) 1.10 { 1.11 size_t size = buf_len - buf_done, size_read; 1.12 -- mm_segment_t old_fs; 1.13 - /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */ 1.14 - /* int size_read = kernel_read(f, pos, buf + buf_done, size); */ 1.15 -- 1.16 - // mutex_lock(&clo->clo_rq_mutex); 1.17 -- old_fs = get_fs(); 1.18 -- set_fs(KERNEL_DS); 1.19 +- size_read = kernel_read(f, buf + buf_done, size, &pos); 1.20 +- // mutex_unlock(&clo->clo_rq_mutex); 1.21 ++ /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */ 1.22 ++ /* int size_read = kernel_read(f, pos, buf + buf_done, size); */ 1.23 + mm_segment_t old_fs = get_fs(); 1.24 + set_fs(get_ds()); 1.25 - size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos); 1.26 - set_fs(old_fs); 1.27 -- // mutex_unlock(&clo->clo_rq_mutex); 1.28 ++ size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos); 1.29 ++ set_fs(old_fs); 1.30 1.31 if(size_read <= 0) 1.32 { 1.33 -@@ -358,8 +349,8 @@ 1.34 +@@ -351,8 +349,8 @@ 1.35 return i; 1.36 } 1.37 1.38 @@ -357,7 +354,7 @@ 1.39 1.40 /* Load one compressed block from the file. */ 1.41 if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */ 1.42 -@@ -369,12 +360,12 @@ 1.43 +@@ -362,12 +360,12 @@ 1.44 if (n!= compressed_block_len) 1.45 { 1.46 printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n", 1.47 @@ -372,7 +369,7 @@ 1.48 return -1; 1.49 } 1.50 1.51 -@@ -382,14 +373,16 @@ 1.52 +@@ -375,14 +373,16 @@ 1.53 if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0; 1.54 1.55 /* Do the uncompression */ 1.56 @@ -392,7 +389,7 @@ 1.57 clo->buffered_blocknum[clo->current_bufnum] = -1; 1.58 return -1; 1.59 } 1.60 -@@ -397,107 +390,146 @@ 1.61 +@@ -390,107 +390,146 @@ 1.62 return clo->current_bufnum; 1.63 } 1.64 1.65 @@ -451,9 +448,7 @@ 1.66 + unsigned long len = bvec.bv_len; 1.67 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset; 1.68 + while(len > 0) 1.69 - { 1.70 -- ret = BLK_STS_IOERR; 1.71 -- break; /* invalid data, leave inner loop */ 1.72 ++ { 1.73 + u_int32_t length_in_buffer; 1.74 + loff_t block_offset = offset; 1.75 + u_int32_t offset_in_buffer; 1.76 @@ -510,7 +505,9 @@ 1.77 + err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) || 1.78 + kthread_should_stop()); 1.79 + if(unlikely(err)) 1.80 -+ { 1.81 + { 1.82 +- ret = BLK_STS_IOERR; 1.83 +- break; /* invalid data, leave inner loop */ 1.84 + DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n"); 1.85 + continue; 1.86 } 1.87 @@ -631,7 +628,7 @@ 1.88 } 1.89 1.90 /* Read header, flags and offsets from already opened file */ 1.91 -@@ -508,7 +540,7 @@ 1.92 +@@ -501,7 +540,7 @@ 1.93 char *bbuf=NULL; 1.94 unsigned int bbuf_size = 0; 1.95 const unsigned int header_size = sizeof(struct cloop_head); 1.96 @@ -640,7 +637,34 @@ 1.97 loff_t fs_read_position = 0, header_pos[2]; 1.98 int isblkdev, bytes_read, error = 0; 1.99 if (clo->suspended) return error; 1.100 -@@ -581,29 +613,19 @@ 1.101 +@@ -521,7 +560,7 @@ 1.102 + } 1.103 + clo->backing_file = file; 1.104 + clo->backing_inode= inode ; 1.105 +- clo->underlying_total_size = (isblkdev) ? file->f_mapping->host->i_size : inode->i_size; 1.106 ++ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size; 1.107 + if(clo->underlying_total_size < header_size) 1.108 + { 1.109 + printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n", 1.110 +@@ -531,7 +570,7 @@ 1.111 + } 1.112 + if(isblkdev) 1.113 + { 1.114 +- struct request_queue *q = bdev_get_queue(I_BDEV(file->f_mapping->host)); 1.115 ++ struct request_queue *q = bdev_get_queue(inode->i_bdev); 1.116 + blk_queue_max_hw_sectors(clo->clo_queue, queue_max_hw_sectors(q)); /* Renamed in 2.6.34 */ 1.117 + blk_queue_max_segments(clo->clo_queue, queue_max_segments(q)); /* Renamed in 2.6.34 */ 1.118 + /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */ 1.119 +@@ -540,7 +579,7 @@ 1.120 + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) 1.121 + blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn); 1.122 + #endif 1.123 +- clo->underlying_blksize = block_size(I_BDEV(file->f_mapping->host)); 1.124 ++ clo->underlying_blksize = block_size(inode->i_bdev); 1.125 + } 1.126 + else 1.127 + clo->underlying_blksize = PAGE_SIZE; 1.128 +@@ -574,29 +613,19 @@ 1.129 goto error_release; 1.130 } 1.131 memcpy(&clo->head, bbuf, header_size); 1.132 @@ -674,7 +698,7 @@ 1.133 cloop_name, cloop_name); 1.134 error=-EBADF; goto error_release; 1.135 } 1.136 -@@ -613,67 +635,133 @@ 1.137 +@@ -606,67 +635,133 @@ 1.138 cloop_name, clo->head.block_size); 1.139 error=-EBADF; goto error_release; 1.140 } 1.141 @@ -785,13 +809,18 @@ 1.142 + } 1.143 + /* Read them offsets! */ 1.144 + if(clo->header_first) 1.145 ++ { 1.146 ++ total_bytes = total_offsets * sizeof(struct block_info); 1.147 ++ fs_read_position = sizeof(struct cloop_head); 1.148 ++ } 1.149 ++ else 1.150 { 1.151 - printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n", 1.152 - cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read); 1.153 - error=-EBADF; 1.154 - goto error_release; 1.155 -+ total_bytes = total_offsets * sizeof(struct block_info); 1.156 -+ fs_read_position = sizeof(struct cloop_head); 1.157 ++ total_bytes = total_offsets * sizeof(loff_t); 1.158 ++ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 1.159 } 1.160 - /* remember where to read the next blk from file */ 1.161 - fs_read_position += bytes_read; 1.162 @@ -800,16 +829,11 @@ 1.163 - bytes_read / sizeof(loff_t)); 1.164 - DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1); 1.165 - for (i=0,offset=0; i<num_readable; i++) 1.166 -+ else 1.167 ++ for(n=0;n<total_bytes;) 1.168 { 1.169 - loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) ); 1.170 - if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp); 1.171 - if(offsets_read > 0) 1.172 -+ total_bytes = total_offsets * sizeof(loff_t); 1.173 -+ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 1.174 -+ } 1.175 -+ for(n=0;n<total_bytes;) 1.176 -+ { 1.177 + size_t bytes_readable; 1.178 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position); 1.179 + if(bytes_readable <= 0) break; /* Done */ 1.180 @@ -847,7 +871,7 @@ 1.181 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ? 1.182 (buffers / clo->head.block_size) : 1; 1.183 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t)); 1.184 -@@ -705,36 +793,14 @@ 1.185 +@@ -698,36 +793,14 @@ 1.186 cloop_name, clo->largest_block); 1.187 error=-ENOMEM; goto error_release_free_buffer; 1.188 } 1.189 @@ -889,7 +913,7 @@ 1.190 if(preload > 0) 1.191 { 1.192 clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks); 1.193 -@@ -780,6 +846,7 @@ 1.194 +@@ -773,6 +846,7 @@ 1.195 clo->preload_array_size = clo->preload_size = 0; 1.196 } 1.197 } 1.198 @@ -897,7 +921,7 @@ 1.199 /* Uncheck */ 1.200 return error; 1.201 error_release_free_all: 1.202 -@@ -794,9 +861,13 @@ 1.203 +@@ -787,9 +861,13 @@ 1.204 } 1.205 if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; } 1.206 error_release_free: 1.207 @@ -912,7 +936,16 @@ 1.208 if(bbuf) cloop_free(bbuf, clo->underlying_blksize); 1.209 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 1.210 clo->backing_file=NULL; 1.211 -@@ -829,6 +900,7 @@ 1.212 +@@ -809,7 +887,7 @@ 1.213 + file = fget(arg); /* get filp struct from ioctl arg fd */ 1.214 + if(!file) return -EBADF; 1.215 + error=cloop_set_file(cloop_num,file); 1.216 +- set_disk_ro(clo->clo_disk, true); 1.217 ++ set_device_ro(bdev, 1); 1.218 + if(error) fput(file); 1.219 + return error; 1.220 + } 1.221 +@@ -822,6 +900,7 @@ 1.222 if(clo->refcnt > 1) /* we needed one fd for the ioctl */ 1.223 return -EBUSY; 1.224 if(filp==NULL) return -EINVAL; 1.225 @@ -920,7 +953,7 @@ 1.226 if(filp!=initial_file) 1.227 fput(filp); 1.228 else 1.229 -@@ -839,7 +911,7 @@ 1.230 +@@ -832,7 +911,7 @@ 1.231 clo->backing_file = NULL; 1.232 clo->backing_inode = NULL; 1.233 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 1.234 @@ -929,7 +962,7 @@ 1.235 if(clo->preload_cache) 1.236 { 1.237 int i; 1.238 -@@ -1054,15 +1126,15 @@ 1.239 +@@ -1047,15 +1126,15 @@ 1.240 case LOOP_CLR_FD: /* Change arg */ 1.241 case LOOP_GET_STATUS64: /* Change arg */ 1.242 case LOOP_SET_STATUS64: /* Change arg */ 1.243 @@ -949,7 +982,7 @@ 1.244 } 1.245 #endif 1.246 1.247 -@@ -1093,7 +1165,7 @@ 1.248 +@@ -1086,7 +1165,7 @@ 1.249 cloop_dev[cloop_num]->refcnt-=1; 1.250 } 1.251 1.252 @@ -958,7 +991,7 @@ 1.253 { 1.254 owner: THIS_MODULE, 1.255 open: cloop_open, 1.256 -@@ -1105,12 +1177,6 @@ 1.257 +@@ -1098,12 +1177,6 @@ 1.258 /* locked_ioctl ceased to exist in 2.6.36 */ 1.259 }; 1.260 1.261 @@ -971,11 +1004,12 @@ 1.262 static int cloop_register_blkdev(int major_nr) 1.263 { 1.264 return register_blkdev(major_nr, cloop_name); 1.265 -@@ -1124,37 +1190,33 @@ 1.266 +@@ -1117,73 +1190,45 @@ 1.267 1.268 static int cloop_alloc(int cloop_num) 1.269 { 1.270 - struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device)); 1.271 +- int error = -ENOMEM; 1.272 + struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));; 1.273 if(clo == NULL) goto error_out; 1.274 cloop_dev[cloop_num] = clo; 1.275 @@ -990,6 +1024,7 @@ 1.276 - clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 1.277 - clo->tag_set.driver_data = clo; 1.278 - if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo; 1.279 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) 1.280 - clo->clo_queue = blk_mq_init_queue(&clo->tag_set); 1.281 - if(IS_ERR(clo->clo_queue)) 1.282 + clo->clo_thread = NULL; 1.283 @@ -1004,47 +1039,87 @@ 1.284 - goto error_out_free_tags; 1.285 + goto error_out; 1.286 } 1.287 - clo->clo_queue->queuedata = clo; 1.288 -- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS); 1.289 +- clo->clo_disk = alloc_disk(1); 1.290 +-#else 1.291 +- clo->clo_disk = blk_mq_alloc_disk(&clo->tag_set, NULL); 1.292 ++ clo->clo_queue->queuedata = clo; 1.293 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 1.294 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue); 1.295 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue); 1.296 -+#endif 1.297 - clo->clo_disk = alloc_disk(1); 1.298 + #endif 1.299 ++ clo->clo_disk = alloc_disk(1); 1.300 if(!clo->clo_disk) 1.301 { 1.302 printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num); 1.303 - goto error_out_free_queue; 1.304 + goto error_disk; 1.305 } 1.306 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) 1.307 +- clo->clo_disk->queue = clo->clo_queue; 1.308 +-#else 1.309 +- clo->clo_disk->minors = 1; 1.310 +- clo->clo_queue = clo->clo_disk->queue; 1.311 +-#endif 1.312 +- clo->clo_queue->queuedata = clo; 1.313 +- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS); 1.314 - spin_lock_init(&clo->queue_lock); 1.315 - mutex_init(&clo->clo_ctl_mutex); 1.316 - mutex_init(&clo->clo_rq_mutex); 1.317 clo->clo_disk->major = cloop_major; 1.318 clo->clo_disk->first_minor = cloop_num; 1.319 clo->clo_disk->fops = &clo_fops; 1.320 -@@ -1163,12 +1225,8 @@ 1.321 ++ clo->clo_disk->queue = clo->clo_queue; 1.322 + clo->clo_disk->private_data = clo; 1.323 sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num); 1.324 - add_disk(clo->clo_disk); 1.325 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 1.326 +- error = add_disk(clo->clo_disk); 1.327 +- if (error) 1.328 +- goto error_out_free_disk; 1.329 +-#endif 1.330 ++ add_disk(clo->clo_disk); 1.331 return 0; 1.332 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 1.333 +-error_out_free_disk: 1.334 +- blk_cleanup_disk(clo->clo_disk); 1.335 +-#endif 1.336 -error_out_free_queue: 1.337 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) 1.338 +error_disk: 1.339 blk_cleanup_queue(clo->clo_queue); 1.340 -error_out_free_tags: 1.341 +-#endif 1.342 - blk_mq_free_tag_set(&clo->tag_set); 1.343 -error_out_free_clo: 1.344 - cloop_free(clo, sizeof(struct cloop_device)); 1.345 error_out: 1.346 - return -ENOMEM; 1.347 +- return error; 1.348 ++ return -ENOMEM; 1.349 } 1.350 -@@ -1179,7 +1237,6 @@ 1.351 + 1.352 + static void cloop_dealloc(int cloop_num) 1.353 +@@ -1191,13 +1236,8 @@ 1.354 + struct cloop_device *clo = cloop_dev[cloop_num]; 1.355 if(clo == NULL) return; 1.356 del_gendisk(clo->clo_disk); 1.357 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 1.358 +- blk_cleanup_disk(clo->clo_disk); 1.359 +-#else 1.360 blk_cleanup_queue(clo->clo_queue); 1.361 + put_disk(clo->clo_disk); 1.362 +-#endif 1.363 - blk_mq_free_tag_set(&clo->tag_set); 1.364 - put_disk(clo->clo_disk); 1.365 cloop_free(clo, sizeof(struct cloop_device)); 1.366 cloop_dev[cloop_num] = NULL; 1.367 + } 1.368 +@@ -1286,3 +1326,8 @@ 1.369 + /* The cloop init and exit function registration (especially needed for Kernel 2.6) */ 1.370 + module_init(cloop_init); 1.371 + module_exit(cloop_exit); 1.372 ++ 1.373 ++#include <linux/vermagic.h> 1.374 ++#include <linux/compiler.h> 1.375 ++ 1.376 ++MODULE_INFO(vermagic, VERMAGIC_STRING); 1.377 --- cloop_suspend.c 1.378 +++ cloop_suspend.c 1.379 @@ -14,6 +14,7 @@
2.1 --- a/linux64-cloop/stuff/cloop.u Sat Mar 11 09:15:29 2023 +0000 2.2 +++ b/linux64-cloop/stuff/cloop.u Sat Mar 11 12:33:53 2023 +0000 2.3 @@ -327,26 +327,23 @@ 2.4 xz_dec_reset(clo->xzdecoderstate); 2.5 err = xz_dec_run(clo->xzdecoderstate, &clo->xz_buffer); 2.6 if (err == XZ_STREAM_END || err == XZ_OK) 2.7 -@@ -309,16 +304,12 @@ 2.8 +@@ -309,9 +304,12 @@ 2.9 while (buf_done < buf_len) 2.10 { 2.11 size_t size = buf_len - buf_done, size_read; 2.12 -- mm_segment_t old_fs; 2.13 - /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */ 2.14 - /* int size_read = kernel_read(f, pos, buf + buf_done, size); */ 2.15 -- 2.16 - // mutex_lock(&clo->clo_rq_mutex); 2.17 -- old_fs = get_fs(); 2.18 -- set_fs(KERNEL_DS); 2.19 +- size_read = kernel_read(f, buf + buf_done, size, &pos); 2.20 +- // mutex_unlock(&clo->clo_rq_mutex); 2.21 ++ /* kernel_read() only supports 32 bit offsets, so we use vfs_read() instead. */ 2.22 ++ /* int size_read = kernel_read(f, pos, buf + buf_done, size); */ 2.23 + mm_segment_t old_fs = get_fs(); 2.24 + set_fs(get_ds()); 2.25 - size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos); 2.26 - set_fs(old_fs); 2.27 -- // mutex_unlock(&clo->clo_rq_mutex); 2.28 ++ size_read = vfs_read(f, (void __user *)(buf + buf_done), size, &pos); 2.29 ++ set_fs(old_fs); 2.30 2.31 if(size_read <= 0) 2.32 { 2.33 -@@ -358,8 +349,8 @@ 2.34 +@@ -351,8 +349,8 @@ 2.35 return i; 2.36 } 2.37 2.38 @@ -357,7 +354,7 @@ 2.39 2.40 /* Load one compressed block from the file. */ 2.41 if(compressed_block_offset > 0 && compressed_block_len >= 0) /* sanity check */ 2.42 -@@ -369,12 +360,12 @@ 2.43 +@@ -362,12 +360,12 @@ 2.44 if (n!= compressed_block_len) 2.45 { 2.46 printk(KERN_ERR "%s: error while reading %lu bytes @ %llu from file %s\n", 2.47 @@ -372,7 +369,7 @@ 2.48 return -1; 2.49 } 2.50 2.51 -@@ -382,14 +373,16 @@ 2.52 +@@ -375,14 +373,16 @@ 2.53 if(++clo->current_bufnum >= clo->num_buffered_blocks) clo->current_bufnum = 0; 2.54 2.55 /* Do the uncompression */ 2.56 @@ -392,7 +389,7 @@ 2.57 clo->buffered_blocknum[clo->current_bufnum] = -1; 2.58 return -1; 2.59 } 2.60 -@@ -397,107 +390,146 @@ 2.61 +@@ -390,107 +390,146 @@ 2.62 return clo->current_bufnum; 2.63 } 2.64 2.65 @@ -451,9 +448,7 @@ 2.66 + unsigned long len = bvec.bv_len; 2.67 + char *to_ptr = kmap(bvec.bv_page) + bvec.bv_offset; 2.68 + while(len > 0) 2.69 - { 2.70 -- ret = BLK_STS_IOERR; 2.71 -- break; /* invalid data, leave inner loop */ 2.72 ++ { 2.73 + u_int32_t length_in_buffer; 2.74 + loff_t block_offset = offset; 2.75 + u_int32_t offset_in_buffer; 2.76 @@ -510,7 +505,9 @@ 2.77 + err = wait_event_interruptible(clo->clo_event, !list_empty(&clo->clo_list) || 2.78 + kthread_should_stop()); 2.79 + if(unlikely(err)) 2.80 -+ { 2.81 + { 2.82 +- ret = BLK_STS_IOERR; 2.83 +- break; /* invalid data, leave inner loop */ 2.84 + DEBUGP(KERN_ERR "cloop thread activated on error!? Continuing.\n"); 2.85 + continue; 2.86 } 2.87 @@ -631,7 +628,7 @@ 2.88 } 2.89 2.90 /* Read header, flags and offsets from already opened file */ 2.91 -@@ -508,7 +540,7 @@ 2.92 +@@ -501,7 +540,7 @@ 2.93 char *bbuf=NULL; 2.94 unsigned int bbuf_size = 0; 2.95 const unsigned int header_size = sizeof(struct cloop_head); 2.96 @@ -640,7 +637,34 @@ 2.97 loff_t fs_read_position = 0, header_pos[2]; 2.98 int isblkdev, bytes_read, error = 0; 2.99 if (clo->suspended) return error; 2.100 -@@ -581,29 +613,19 @@ 2.101 +@@ -521,7 +560,7 @@ 2.102 + } 2.103 + clo->backing_file = file; 2.104 + clo->backing_inode= inode ; 2.105 +- clo->underlying_total_size = (isblkdev) ? file->f_mapping->host->i_size : inode->i_size; 2.106 ++ clo->underlying_total_size = (isblkdev) ? inode->i_bdev->bd_inode->i_size : inode->i_size; 2.107 + if(clo->underlying_total_size < header_size) 2.108 + { 2.109 + printk(KERN_ERR "%s: %llu bytes (must be >= %u bytes)\n", 2.110 +@@ -531,7 +570,7 @@ 2.111 + } 2.112 + if(isblkdev) 2.113 + { 2.114 +- struct request_queue *q = bdev_get_queue(I_BDEV(file->f_mapping->host)); 2.115 ++ struct request_queue *q = bdev_get_queue(inode->i_bdev); 2.116 + blk_queue_max_hw_sectors(clo->clo_queue, queue_max_hw_sectors(q)); /* Renamed in 2.6.34 */ 2.117 + blk_queue_max_segments(clo->clo_queue, queue_max_segments(q)); /* Renamed in 2.6.34 */ 2.118 + /* blk_queue_max_hw_segments(clo->clo_queue, queue_max_hw_segments(q)); */ /* Removed in 2.6.34 */ 2.119 +@@ -540,7 +579,7 @@ 2.120 + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) 2.121 + blk_queue_merge_bvec(clo->clo_queue, q->merge_bvec_fn); 2.122 + #endif 2.123 +- clo->underlying_blksize = block_size(I_BDEV(file->f_mapping->host)); 2.124 ++ clo->underlying_blksize = block_size(inode->i_bdev); 2.125 + } 2.126 + else 2.127 + clo->underlying_blksize = PAGE_SIZE; 2.128 +@@ -574,29 +613,19 @@ 2.129 goto error_release; 2.130 } 2.131 memcpy(&clo->head, bbuf, header_size); 2.132 @@ -674,7 +698,7 @@ 2.133 cloop_name, cloop_name); 2.134 error=-EBADF; goto error_release; 2.135 } 2.136 -@@ -613,67 +635,133 @@ 2.137 +@@ -606,67 +635,133 @@ 2.138 cloop_name, clo->head.block_size); 2.139 error=-EBADF; goto error_release; 2.140 } 2.141 @@ -785,13 +809,18 @@ 2.142 + } 2.143 + /* Read them offsets! */ 2.144 + if(clo->header_first) 2.145 ++ { 2.146 ++ total_bytes = total_offsets * sizeof(struct block_info); 2.147 ++ fs_read_position = sizeof(struct cloop_head); 2.148 ++ } 2.149 ++ else 2.150 { 2.151 - printk(KERN_ERR "%s: Bad file %s, read() %lu bytes @ %llu returned %d.\n", 2.152 - cloop_name, clo->underlying_filename, (unsigned long)clo->underlying_blksize, fs_read_position, (int)bytes_read); 2.153 - error=-EBADF; 2.154 - goto error_release; 2.155 -+ total_bytes = total_offsets * sizeof(struct block_info); 2.156 -+ fs_read_position = sizeof(struct cloop_head); 2.157 ++ total_bytes = total_offsets * sizeof(loff_t); 2.158 ++ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 2.159 } 2.160 - /* remember where to read the next blk from file */ 2.161 - fs_read_position += bytes_read; 2.162 @@ -800,16 +829,11 @@ 2.163 - bytes_read / sizeof(loff_t)); 2.164 - DEBUGP(KERN_INFO "cloop: parsing %d offsets %d to %d\n", num_readable, offsets_read, offsets_read+num_readable-1); 2.165 - for (i=0,offset=0; i<num_readable; i++) 2.166 -+ else 2.167 ++ for(n=0;n<total_bytes;) 2.168 { 2.169 - loff_t tmp = be64_to_cpu( *(loff_t*) (bbuf+offset) ); 2.170 - if (i%50==0) DEBUGP(KERN_INFO "cloop: offset %03d: %llu\n", offsets_read, tmp); 2.171 - if(offsets_read > 0) 2.172 -+ total_bytes = total_offsets * sizeof(loff_t); 2.173 -+ fs_read_position = clo->underlying_total_size - sizeof(struct cloop_head) - total_bytes; 2.174 -+ } 2.175 -+ for(n=0;n<total_bytes;) 2.176 -+ { 2.177 + size_t bytes_readable; 2.178 + bytes_readable = MIN(bbuf_size, clo->underlying_total_size - fs_read_position); 2.179 + if(bytes_readable <= 0) break; /* Done */ 2.180 @@ -847,7 +871,7 @@ 2.181 clo->num_buffered_blocks = (buffers > 0 && clo->head.block_size >= 512) ? 2.182 (buffers / clo->head.block_size) : 1; 2.183 clo->buffered_blocknum = cloop_malloc(clo->num_buffered_blocks * sizeof (u_int32_t)); 2.184 -@@ -705,36 +793,14 @@ 2.185 +@@ -698,36 +793,14 @@ 2.186 cloop_name, clo->largest_block); 2.187 error=-ENOMEM; goto error_release_free_buffer; 2.188 } 2.189 @@ -889,7 +913,7 @@ 2.190 if(preload > 0) 2.191 { 2.192 clo->preload_array_size = ((preload<=clo->head.num_blocks)?preload:clo->head.num_blocks); 2.193 -@@ -780,6 +846,7 @@ 2.194 +@@ -773,6 +846,7 @@ 2.195 clo->preload_array_size = clo->preload_size = 0; 2.196 } 2.197 } 2.198 @@ -897,7 +921,7 @@ 2.199 /* Uncheck */ 2.200 return error; 2.201 error_release_free_all: 2.202 -@@ -794,9 +861,13 @@ 2.203 +@@ -787,9 +861,13 @@ 2.204 } 2.205 if (clo->buffered_blocknum) { cloop_free(clo->buffered_blocknum, sizeof(int)*clo->num_buffered_blocks); clo->buffered_blocknum=NULL; } 2.206 error_release_free: 2.207 @@ -912,7 +936,16 @@ 2.208 if(bbuf) cloop_free(bbuf, clo->underlying_blksize); 2.209 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 2.210 clo->backing_file=NULL; 2.211 -@@ -829,6 +900,7 @@ 2.212 +@@ -809,7 +887,7 @@ 2.213 + file = fget(arg); /* get filp struct from ioctl arg fd */ 2.214 + if(!file) return -EBADF; 2.215 + error=cloop_set_file(cloop_num,file); 2.216 +- set_disk_ro(clo->clo_disk, true); 2.217 ++ set_device_ro(bdev, 1); 2.218 + if(error) fput(file); 2.219 + return error; 2.220 + } 2.221 +@@ -822,6 +900,7 @@ 2.222 if(clo->refcnt > 1) /* we needed one fd for the ioctl */ 2.223 return -EBUSY; 2.224 if(filp==NULL) return -EINVAL; 2.225 @@ -920,7 +953,7 @@ 2.226 if(filp!=initial_file) 2.227 fput(filp); 2.228 else 2.229 -@@ -839,7 +911,7 @@ 2.230 +@@ -832,7 +911,7 @@ 2.231 clo->backing_file = NULL; 2.232 clo->backing_inode = NULL; 2.233 if(clo->underlying_filename) { kfree(clo->underlying_filename); clo->underlying_filename=NULL; } 2.234 @@ -929,7 +962,7 @@ 2.235 if(clo->preload_cache) 2.236 { 2.237 int i; 2.238 -@@ -1054,15 +1126,15 @@ 2.239 +@@ -1047,15 +1126,15 @@ 2.240 case LOOP_CLR_FD: /* Change arg */ 2.241 case LOOP_GET_STATUS64: /* Change arg */ 2.242 case LOOP_SET_STATUS64: /* Change arg */ 2.243 @@ -949,7 +982,7 @@ 2.244 } 2.245 #endif 2.246 2.247 -@@ -1093,7 +1165,7 @@ 2.248 +@@ -1086,7 +1165,7 @@ 2.249 cloop_dev[cloop_num]->refcnt-=1; 2.250 } 2.251 2.252 @@ -958,7 +991,7 @@ 2.253 { 2.254 owner: THIS_MODULE, 2.255 open: cloop_open, 2.256 -@@ -1105,12 +1177,6 @@ 2.257 +@@ -1098,12 +1177,6 @@ 2.258 /* locked_ioctl ceased to exist in 2.6.36 */ 2.259 }; 2.260 2.261 @@ -971,11 +1004,12 @@ 2.262 static int cloop_register_blkdev(int major_nr) 2.263 { 2.264 return register_blkdev(major_nr, cloop_name); 2.265 -@@ -1124,37 +1190,33 @@ 2.266 +@@ -1117,73 +1190,45 @@ 2.267 2.268 static int cloop_alloc(int cloop_num) 2.269 { 2.270 - struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device)); 2.271 +- int error = -ENOMEM; 2.272 + struct cloop_device *clo = (struct cloop_device *) cloop_malloc(sizeof(struct cloop_device));; 2.273 if(clo == NULL) goto error_out; 2.274 cloop_dev[cloop_num] = clo; 2.275 @@ -990,6 +1024,7 @@ 2.276 - clo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 2.277 - clo->tag_set.driver_data = clo; 2.278 - if(blk_mq_alloc_tag_set(&clo->tag_set)) goto error_out_free_clo; 2.279 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) 2.280 - clo->clo_queue = blk_mq_init_queue(&clo->tag_set); 2.281 - if(IS_ERR(clo->clo_queue)) 2.282 + clo->clo_thread = NULL; 2.283 @@ -1004,47 +1039,87 @@ 2.284 - goto error_out_free_tags; 2.285 + goto error_out; 2.286 } 2.287 - clo->clo_queue->queuedata = clo; 2.288 -- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS); 2.289 +- clo->clo_disk = alloc_disk(1); 2.290 +-#else 2.291 +- clo->clo_disk = blk_mq_alloc_disk(&clo->tag_set, NULL); 2.292 ++ clo->clo_queue->queuedata = clo; 2.293 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) 2.294 + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, clo->clo_queue); 2.295 + queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, clo->clo_queue); 2.296 -+#endif 2.297 - clo->clo_disk = alloc_disk(1); 2.298 + #endif 2.299 ++ clo->clo_disk = alloc_disk(1); 2.300 if(!clo->clo_disk) 2.301 { 2.302 printk(KERN_ERR "%s: Unable to alloc disk[%d]\n", cloop_name, cloop_num); 2.303 - goto error_out_free_queue; 2.304 + goto error_disk; 2.305 } 2.306 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) 2.307 +- clo->clo_disk->queue = clo->clo_queue; 2.308 +-#else 2.309 +- clo->clo_disk->minors = 1; 2.310 +- clo->clo_queue = clo->clo_disk->queue; 2.311 +-#endif 2.312 +- clo->clo_queue->queuedata = clo; 2.313 +- blk_queue_max_hw_sectors(clo->clo_queue, BLK_DEF_MAX_SECTORS); 2.314 - spin_lock_init(&clo->queue_lock); 2.315 - mutex_init(&clo->clo_ctl_mutex); 2.316 - mutex_init(&clo->clo_rq_mutex); 2.317 clo->clo_disk->major = cloop_major; 2.318 clo->clo_disk->first_minor = cloop_num; 2.319 clo->clo_disk->fops = &clo_fops; 2.320 -@@ -1163,12 +1225,8 @@ 2.321 ++ clo->clo_disk->queue = clo->clo_queue; 2.322 + clo->clo_disk->private_data = clo; 2.323 sprintf(clo->clo_disk->disk_name, "%s%d", cloop_name, cloop_num); 2.324 - add_disk(clo->clo_disk); 2.325 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 2.326 +- error = add_disk(clo->clo_disk); 2.327 +- if (error) 2.328 +- goto error_out_free_disk; 2.329 +-#endif 2.330 ++ add_disk(clo->clo_disk); 2.331 return 0; 2.332 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 2.333 +-error_out_free_disk: 2.334 +- blk_cleanup_disk(clo->clo_disk); 2.335 +-#endif 2.336 -error_out_free_queue: 2.337 +-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) 2.338 +error_disk: 2.339 blk_cleanup_queue(clo->clo_queue); 2.340 -error_out_free_tags: 2.341 +-#endif 2.342 - blk_mq_free_tag_set(&clo->tag_set); 2.343 -error_out_free_clo: 2.344 - cloop_free(clo, sizeof(struct cloop_device)); 2.345 error_out: 2.346 - return -ENOMEM; 2.347 +- return error; 2.348 ++ return -ENOMEM; 2.349 } 2.350 -@@ -1179,7 +1237,6 @@ 2.351 + 2.352 + static void cloop_dealloc(int cloop_num) 2.353 +@@ -1191,13 +1236,8 @@ 2.354 + struct cloop_device *clo = cloop_dev[cloop_num]; 2.355 if(clo == NULL) return; 2.356 del_gendisk(clo->clo_disk); 2.357 +-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,15,0) 2.358 +- blk_cleanup_disk(clo->clo_disk); 2.359 +-#else 2.360 blk_cleanup_queue(clo->clo_queue); 2.361 + put_disk(clo->clo_disk); 2.362 +-#endif 2.363 - blk_mq_free_tag_set(&clo->tag_set); 2.364 - put_disk(clo->clo_disk); 2.365 cloop_free(clo, sizeof(struct cloop_device)); 2.366 cloop_dev[cloop_num] = NULL; 2.367 + } 2.368 +@@ -1286,3 +1326,8 @@ 2.369 + /* The cloop init and exit function registration (especially needed for Kernel 2.6) */ 2.370 + module_init(cloop_init); 2.371 + module_exit(cloop_exit); 2.372 ++ 2.373 ++#include <linux/vermagic.h> 2.374 ++#include <linux/compiler.h> 2.375 ++ 2.376 ++MODULE_INFO(vermagic, VERMAGIC_STRING); 2.377 --- cloop_suspend.c 2.378 +++ cloop_suspend.c 2.379 @@ -14,6 +14,7 @@