2 * Copyright (C) 1994-1997 Claus-Justus Heine
4 This program is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License as
6 published by the Free Software Foundation; either version 2, or (at
7 your option) any later version.
9 This program is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; see the file COPYING. If not, write to
16 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
20 * This file implements a "generic" interface between the *
21 * zftape-driver and a compression-algorithm. The *
22 * compression-algorithm currently used is a LZ77. I use the *
23 * implementation lzrw3 by Ross N. Williams (Renaissance *
24 * Software). The compression program itself is in the file
25 * lzrw3.c * and lzrw3.h. To adopt another compression algorithm
26 * the functions * zft_compress() and zft_uncompress() must be
27 * changed * appropriately. See below.
30 char zftc_src[] ="$Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.c,v $";
31 char zftc_rev[] = "$Revision: 1.1.6.1 $";
32 char zftc_dat[] = "$Date: 1997/11/16 15:15:56 $";
34 #include <linux/version.h>
35 #include <linux/errno.h>
37 #include <linux/module.h>
39 #include <linux/zftape.h>
41 #include <asm/uaccess.h>
43 #include "../zftape/zftape-init.h"
44 #include "../zftape/zftape-eof.h"
45 #include "../zftape/zftape-ctl.h"
46 #include "../zftape/zftape-write.h"
47 #include "../zftape/zftape-read.h"
48 #include "../zftape/zftape-rw.h"
49 #include "../compressor/zftape-compress.h"
50 #include "../zftape/zftape-vtbl.h"
51 #include "../compressor/lzrw3.h"
57 /* I handle the allocation of this buffer as a special case, because
58 * it's size varies depending on the tape length inserted.
63 static int keep_module_locked = 1;
65 static void *zftc_wrk_mem = NULL;
66 static __u8 *zftc_buf = NULL;
67 static void *zftc_scratch_buf = NULL;
69 /* compression statistics
71 static unsigned int zftc_wr_uncompressed = 0;
72 static unsigned int zftc_wr_compressed = 0;
73 static unsigned int zftc_rd_uncompressed = 0;
74 static unsigned int zftc_rd_compressed = 0;
77 static int zftc_write(int *write_cnt,
78 __u8 *dst_buf, const int seg_sz,
79 const __u8 *src_buf, const int req_len,
80 const zft_position *pos, const zft_volinfo *volume);
81 static int zftc_read(int *read_cnt,
82 __u8 *dst_buf, const int to_do,
83 const __u8 *src_buf, const int seg_sz,
84 const zft_position *pos, const zft_volinfo *volume);
85 static int zftc_seek(unsigned int new_block_pos,
86 zft_position *pos, const zft_volinfo *volume,
88 static void zftc_lock (void);
89 static void zftc_reset (void);
90 static void zftc_cleanup(void);
91 static void zftc_stats (void);
93 /* compressed segment. This conforms to QIC-80-MC, Revision K.
95 * Rev. K applies to tapes with `fixed length format' which is
96 * indicated by format code 2,3 and 5. See below for format code 4 and 6
98 * 2 bytes: offset of compression segment structure
99 * 29k > offset >= 29k-18: data from previous segment ens in this
100 * segment and no compressed block starts
102 * offset == 0: data from previous segment occupies entire
103 * segment and continues in next segment
104 * n bytes: remainder from previous segment
107 * 4 bytes: 4 bytes: files set byte offset
108 * Post Rev. K and QIC-3020/3020:
109 * 8 bytes: 8 bytes: files set byte offset
110 * 2 bytes: byte count N (amount of data following)
111 * bit 15 is set if data is compressed, bit 15 is not
112 * set if data is uncompressed
113 * N bytes: data (as much as specified in the byte count)
114 * 2 bytes: byte count N_1 of next cluster
115 * N_1 bytes: data of next cluset
116 * 2 bytes: byte count N_2 of next cluster
119 * Note that the `N' byte count accounts only for the bytes that in the
120 * current segment if the cluster spans to the next segment.
125 int cmpr_pos; /* actual position in compression buffer */
126 int cmpr_sz; /* what is left in the compression buffer
127 * when copying the compressed data to the
130 unsigned int first_block; /* location of header information in
133 unsigned int count; /* amount of data of current block
134 * contained in current segment
136 unsigned int offset; /* offset in current segment */
137 unsigned int spans:1; /* might continue in next segment */
138 unsigned int uncmpr; /* 0x8000 if this block contains
141 __s64 foffs; /* file set byte offset, same as in
142 * compression map segment
146 static cmpr_info cseg; /* static data. Must be kept uptodate and shared by
147 * read, write and seek functions
150 #define DUMP_CMPR_INFO(level, msg, info) \
151 TRACE(level, msg "\n" \
152 KERN_INFO "cmpr_pos : %d\n" \
153 KERN_INFO "cmpr_sz : %d\n" \
154 KERN_INFO "first_block: %d\n" \
155 KERN_INFO "count : %d\n" \
156 KERN_INFO "offset : %d\n" \
157 KERN_INFO "spans : %d\n" \
158 KERN_INFO "uncmpr : 0x%04x\n" \
159 KERN_INFO "foffs : " LL_X, \
160 (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block, \
161 (info)->count, (info)->offset, (info)->spans == 1, \
162 (info)->uncmpr, LL((info)->foffs))
164 /* dispatch compression segment info, return error code
166 * afterwards, cseg->offset points to start of data of the NEXT
167 * compressed block, and cseg->count contains the amount of data
168 * left in the actual compressed block. cseg->spans is set to 1 if
169 * the block is continued in the following segment. Otherwise it is
172 static int get_cseg (cmpr_info *cinfo, const __u8 *buff,
173 const unsigned int seg_sz,
174 const zft_volinfo *volume)
176 TRACE_FUN(ft_t_flow);
178 cinfo->first_block = GET2(buff, 0);
179 if (cinfo->first_block == 0) { /* data spans to next segment */
180 cinfo->count = seg_sz - sizeof(__u16);
181 cinfo->offset = seg_sz;
183 } else { /* cluster definetely ends in this segment */
184 if (cinfo->first_block > seg_sz) {
186 TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
187 KERN_INFO "segment size: %d\n"
188 KERN_INFO "first block : %d",
189 seg_sz, cinfo->first_block);
191 cinfo->count = cinfo->first_block - sizeof(__u16);
192 cinfo->offset = cinfo->first_block;
195 /* now get the offset the first block should have in the
196 * uncompressed data stream.
198 * For this magic `18' refer to CRF-3 standard or QIC-80MC,
201 if ((seg_sz - cinfo->offset) > 18) {
202 if (volume->qic113) { /* > revision K */
203 TRACE(ft_t_data_flow, "New QIC-113 compliance");
204 cinfo->foffs = GET8(buff, cinfo->offset);
205 cinfo->offset += sizeof(__s64);
207 TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
208 cinfo->foffs = (__s64)GET4(buff, cinfo->offset);
209 cinfo->offset += sizeof(__u32);
212 if (cinfo->foffs > volume->size) {
213 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
214 KERN_INFO "offset in current volume: %d\n"
215 KERN_INFO "size of current volume : %d",
216 (int)(cinfo->foffs>>10), (int)(volume->size>>10));
218 if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
219 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
220 KERN_INFO "block size : %d\n"
221 KERN_INFO "data record: %d",
222 volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
224 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
228 /* This one is called, when a new cluster starts in same segment.
230 * Note: if this is the first cluster in the current segment, we must
231 * not check whether there are more than 18 bytes available because
232 * this have already been done in get_cseg() and there may be less
233 * than 18 bytes available due to header information.
236 static void get_next_cluster(cmpr_info *cluster, const __u8 *buff,
237 const int seg_sz, const int finish)
239 TRACE_FUN(ft_t_flow);
241 if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
242 cluster->count = GET2(buff, cluster->offset);
243 cluster->uncmpr = cluster->count & 0x8000;
244 cluster->count -= cluster->uncmpr;
245 cluster->offset += sizeof(__u16);
247 if ((cluster->offset + cluster->count) < seg_sz) {
249 } else if (cluster->offset + cluster->count == seg_sz) {
250 cluster->spans = !finish;
252 /* either an error or a volume written by an
253 * old version. If this is a data error, then we'll
256 TRACE(ft_t_data_flow, "Either error or old volume");
258 cluster->count = seg_sz - cluster->offset;
265 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
269 static void zftc_lock(void)
271 MOD_INC_USE_COUNT; /* sets MOD_VISITED and MOD_USED_ONCE,
272 * locking is done with can_unload()
274 keep_module_locked = 1;
277 /* this function is needed for zftape_reset_position in zftape-io.c
279 static void zftc_reset(void)
281 TRACE_FUN(ft_t_flow);
283 memset((void *)&cseg, '\0', sizeof(cseg));
285 keep_module_locked = 0;
289 static int cmpr_mem_initialized = 0;
290 static unsigned int alloc_blksz = 0;
292 static int zft_allocate_cmpr_mem(unsigned int blksz)
294 TRACE_FUN(ft_t_flow);
296 if (cmpr_mem_initialized && blksz == alloc_blksz) {
299 TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
301 TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
304 TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
306 cmpr_mem_initialized = 1;
310 static void zftc_cleanup(void)
312 TRACE_FUN(ft_t_flow);
314 zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
315 zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
316 zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
317 cmpr_mem_initialized = alloc_blksz = 0;
321 /*****************************************************************************
323 * The following two functions "ftape_compress()" and *
324 * "ftape_uncompress()" are the interface to the actual compression *
325 * algorithm (i.e. they are calling the "compress()" function from *
326 * the lzrw3 package for now). These routines could quite easily be *
327 * changed to adopt another compression algorithm instead of lzrw3, *
328 * which currently is used. *
330 *****************************************************************************/
332 /* called by zft_compress_write() to perform the compression. Must
333 * return the size of the compressed data.
335 * NOTE: The size of the compressed data should not exceed the size of
336 * the uncompressed data. Most compression algorithms have means
337 * to store data unchanged if the "compressed" data amount would
338 * exceed the original one. Mostly this is done by storing some
339 * flag-bytes in front of the compressed data to indicate if it
340 * is compressed or not. Thus the worst compression result
341 * length is the original length plus those flag-bytes.
343 * We don't want that, as the QIC-80 standard provides a means
344 * of marking uncompressed blocks by simply setting bit 15 of
345 * the compressed block's length. Thus a compessed block can
346 * have at most a length of 2^15-1 bytes. The QIC-80 standard
347 * restricts the block-length even further, allowing only 29k -
350 * Currently, the maximum blocksize used by zftape is 28k.
352 * In short: don't exceed the length of the input-package, set
353 * bit 15 of the compressed size to 1 if you have copied data
354 * instead of compressing it.
356 static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
359 TRACE_FUN(ft_t_flow);
362 lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
363 in_buffer, in_sz, out_buffer, &compressed_sz);
364 if (TRACE_LEVEL >= ft_t_info) {
365 /* the compiler will optimize this away when
366 * compiled with NO_TRACE_AT_ALL option
368 TRACE(ft_t_data_flow, "\n"
369 KERN_INFO "before compression: %d bytes\n"
370 KERN_INFO "after compresison : %d bytes",
372 (int)(compressed_sz < 0
373 ? -compressed_sz : compressed_sz));
374 /* for statistical purposes
376 zftc_wr_compressed += (compressed_sz < 0
377 ? -compressed_sz : compressed_sz);
378 zftc_wr_uncompressed += in_sz;
380 TRACE_EXIT (int)compressed_sz;
383 /* called by zft_compress_read() to decompress the data. Must
384 * return the size of the decompressed data for sanity checks
385 * (compared with zft_blk_sz)
387 * NOTE: Read the note for zft_compress() above! If bit 15 of the
388 * parameter in_sz is set, then the data in in_buffer isn't
389 * compressed, which must be handled by the un-compression
390 * algorithm. (I changed lzrw3 to handle this.)
392 * The parameter max_out_sz is needed to prevent buffer overruns when
393 * uncompressing corrupt data.
395 static unsigned int zft_uncompress(__u8 *in_buffer,
398 unsigned int max_out_sz)
400 TRACE_FUN(ft_t_flow);
402 lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
403 in_buffer, (__s32)in_sz,
404 out_buffer, (__u32 *)&max_out_sz);
406 if (TRACE_LEVEL >= ft_t_info) {
407 TRACE(ft_t_data_flow, "\n"
408 KERN_INFO "before decompression: %d bytes\n"
409 KERN_INFO "after decompression : %d bytes",
410 in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
411 /* for statistical purposes
413 zftc_rd_compressed += in_sz < 0 ? -in_sz : in_sz;
414 zftc_rd_uncompressed += max_out_sz;
416 TRACE_EXIT (unsigned int)max_out_sz;
419 /* print some statistics about the efficiency of the compression to
422 static void zftc_stats(void)
424 TRACE_FUN(ft_t_flow);
426 if (TRACE_LEVEL < ft_t_info) {
429 if (zftc_wr_uncompressed != 0) {
430 if (zftc_wr_compressed > (1<<14)) {
431 TRACE(ft_t_info, "compression statistics (writing):\n"
432 KERN_INFO " compr./uncmpr. : %3d %%",
433 (((zftc_wr_compressed>>10) * 100)
434 / (zftc_wr_uncompressed>>10)));
436 TRACE(ft_t_info, "compression statistics (writing):\n"
437 KERN_INFO " compr./uncmpr. : %3d %%",
438 ((zftc_wr_compressed * 100)
439 / zftc_wr_uncompressed));
442 if (zftc_rd_uncompressed != 0) {
443 if (zftc_rd_compressed > (1<<14)) {
444 TRACE(ft_t_info, "compression statistics (reading):\n"
445 KERN_INFO " compr./uncmpr. : %3d %%",
446 (((zftc_rd_compressed>>10) * 100)
447 / (zftc_rd_uncompressed>>10)));
449 TRACE(ft_t_info, "compression statistics (reading):\n"
450 KERN_INFO " compr./uncmpr. : %3d %%",
451 ((zftc_rd_compressed * 100)
452 / zftc_rd_uncompressed));
455 /* only print it once: */
456 zftc_wr_uncompressed =
458 zftc_rd_uncompressed =
459 zftc_rd_compressed = 0;
463 /* start new compressed block
465 static int start_new_cseg(cmpr_info *cluster,
467 const zft_position *pos,
468 const unsigned int blk_sz,
470 const int this_segs_sz,
476 TRACE_FUN(ft_t_flow);
478 size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
479 TRACE(ft_t_data_flow,"\n"
480 KERN_INFO "segment size : %d\n"
481 KERN_INFO "compressed_sz: %d\n"
482 KERN_INFO "size_left : %d",
483 this_segs_sz, cluster->cmpr_sz, size_left);
484 if (size_left > 18) { /* start a new cluseter */
485 cp_cnt = cluster->cmpr_sz;
486 cluster->cmpr_sz = 0;
487 buf_pos = cp_cnt + sizeof(__u16);
488 PUT2(dst_buf, 0, buf_pos);
491 __s64 foffs = pos->volume_pos;
492 if (cp_cnt) foffs += (__s64)blk_sz;
494 TRACE(ft_t_data_flow, "new style QIC-113 header");
495 PUT8(dst_buf, buf_pos, foffs);
496 buf_pos += sizeof(__s64);
498 __u32 foffs = (__u32)pos->volume_pos;
499 if (cp_cnt) foffs += (__u32)blk_sz;
501 TRACE(ft_t_data_flow, "old style QIC-80MC header");
502 PUT4(dst_buf, buf_pos, foffs);
503 buf_pos += sizeof(__u32);
505 } else if (size_left >= 0) {
506 cp_cnt = cluster->cmpr_sz;
507 cluster->cmpr_sz = 0;
508 buf_pos = cp_cnt + sizeof(__u16);
509 PUT2(dst_buf, 0, buf_pos);
510 /* zero unused part of segment. */
511 memset(dst_buf + buf_pos, '\0', size_left);
512 buf_pos = this_segs_sz;
513 } else { /* need entire segment and more space */
515 cp_cnt = this_segs_sz - sizeof(__u16);
516 cluster->cmpr_sz -= cp_cnt;
517 buf_pos = this_segs_sz;
519 memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
520 cluster->cmpr_pos += cp_cnt;
524 /* return-value: the number of bytes removed from the user-buffer
525 * `src_buf' or error code
527 * int *write_cnt : how much actually has been moved to the
528 * dst_buf. Need not be initialized when
529 * function returns with an error code
530 * (negativ return value)
531 * __u8 *dst_buf : kernel space buffer where the has to be
532 * copied to. The contents of this buffers
533 * goes to a specific segment.
534 * const int seg_sz : the size of the segment dst_buf will be
536 * const zft_position *pos : struct containing the coordinates in
537 * the current volume (byte position,
538 * segment id of current segment etc)
539 * const zft_volinfo *volume: information about the current volume,
541 * const __u8 *src_buf : user space buffer that contains the
542 * data the user wants to be written to
544 * const int req_len : the amount of data the user wants to be
547 static int zftc_write(int *write_cnt,
548 __u8 *dst_buf, const int seg_sz,
549 const __u8 *src_buf, const int req_len,
550 const zft_position *pos, const zft_volinfo *volume)
552 int req_len_left = req_len;
555 int buf_pos_write = pos->seg_byte_pos;
556 TRACE_FUN(ft_t_flow);
558 keep_module_locked = 1;
559 MOD_INC_USE_COUNT; /* sets MOD_VISITED and MOD_USED_ONCE,
560 * locking is done with can_unload()
562 /* Note: we do not unlock the module because
563 * there are some values cached in that `cseg' variable. We
564 * don't don't want to use this information when being
565 * unloaded by kerneld even when the tape is full or when we
566 * cannot allocate enough memory.
568 if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
571 if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
572 /* should we unlock the module? But it shouldn't
573 * be locked anyway ...
577 if (buf_pos_write == 0) { /* fill a new segment */
578 *write_cnt = buf_pos_write = start_new_cseg(&cseg,
585 if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
586 req_len_left -= result = volume->blk_sz;
592 *write_cnt = result = 0;
595 len_left = seg_sz - buf_pos_write;
596 while ((req_len_left > 0) && (len_left > 18)) {
597 /* now we have some size left for a new compressed
598 * block. We know, that the compression buffer is
599 * empty (else there wouldn't be any space left).
601 if (copy_from_user(zftc_scratch_buf, src_buf + result,
602 volume->blk_sz) != 0) {
605 req_len_left -= volume->blk_sz;
606 cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz,
608 if (cseg.cmpr_sz < 0) {
609 cseg.uncmpr = 0x8000;
610 cseg.cmpr_sz = -cseg.cmpr_sz;
614 /* increment "result" iff we copied the entire
615 * compressed block to the zft_deblock_buf
617 len_left -= sizeof(__u16);
618 if (len_left >= cseg.cmpr_sz) {
619 len_left -= cseg.count = cseg.cmpr_sz;
620 cseg.cmpr_pos = cseg.cmpr_sz = 0;
621 result += volume->blk_sz;
625 cseg.count = len_left;
628 PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
629 buf_pos_write += sizeof(__u16);
630 memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
631 buf_pos_write += cseg.count;
632 *write_cnt += cseg.count + sizeof(__u16);
633 FT_SIGNAL_EXIT(_DONT_BLOCK);
635 /* erase the remainder of the segment if less than 18 bytes
636 * left (18 bytes is due to the QIC-80 standard)
638 if (len_left <= 18) {
639 memset(dst_buf + buf_pos_write, '\0', len_left);
640 (*write_cnt) += len_left;
642 TRACE(ft_t_data_flow, "returning %d", result);
648 * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
650 * int *to_do : the remaining size of the read-request.
654 * char *buff : buff is the address of the upper part of the user
655 * buffer, that hasn't been filled with data yet.
657 * int buf_pos_read : copy of from _ftape_read()
658 * int buf_len_read : copy of buf_len_rd from _ftape_read()
659 * char *zft_deblock_buf: zft_deblock_buf
660 * unsigned short blk_sz: the block size valid for this volume, may differ
662 * int finish: if != 0 means that this is the last segment belonging
664 * returns the amount of data actually copied to the user-buffer
666 * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
669 static int zftc_read (int *read_cnt,
670 __u8 *dst_buf, const int to_do,
671 const __u8 *src_buf, const int seg_sz,
672 const zft_position *pos, const zft_volinfo *volume)
676 int remaining = to_do;
677 TRACE_FUN(ft_t_flow);
679 keep_module_locked = 1;
680 MOD_INC_USE_COUNT; /* sets MOD_VISITED and MOD_USED_ONCE,
681 * locking is done with can_unload()
683 TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
684 if (pos->seg_byte_pos == 0) {
685 /* new segment just read
687 TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
689 memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16),
691 cseg.cmpr_pos += cseg.count;
692 *read_cnt = cseg.offset;
693 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
697 /* loop and uncompress until user buffer full or
698 * deblock-buffer empty
700 TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
701 cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
702 while ((cseg.spans == 0) && (remaining > 0)) {
703 if (cseg.cmpr_pos != 0) { /* cmpr buf is not empty */
705 zft_uncompress(zftc_buf,
706 cseg.uncmpr == 0x8000 ?
707 -cseg.cmpr_pos : cseg.cmpr_pos,
710 if (uncompressed_sz != volume->blk_sz) {
712 TRACE_ABORT(-EIO, ft_t_warn,
713 "Uncompressed blk (%d) != blk size (%d)",
714 uncompressed_sz, volume->blk_sz);
716 if (copy_to_user(dst_buf + result,
718 uncompressed_sz) != 0 ) {
721 remaining -= uncompressed_sz;
722 result += uncompressed_sz;
726 get_next_cluster(&cseg, src_buf, seg_sz,
727 volume->end_seg == pos->seg_pos);
728 if (cseg.count != 0) {
729 memcpy(zftc_buf, src_buf + cseg.offset,
731 cseg.cmpr_pos = cseg.count;
732 cseg.offset += cseg.count;
733 *read_cnt += cseg.count + sizeof(__u16);
738 TRACE(ft_t_data_flow, "\n"
739 KERN_INFO "compressed_sz: %d\n"
740 KERN_INFO "compos : %d\n"
741 KERN_INFO "*read_cnt : %d",
742 cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
744 if (seg_sz - cseg.offset <= 18) {
745 *read_cnt += seg_sz - cseg.offset;
746 TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
748 TRACE(ft_t_data_flow, "\n"
749 KERN_INFO "segment size : %d\n"
750 KERN_INFO "read count : %d\n"
751 KERN_INFO "buf_pos_read : %d\n"
752 KERN_INFO "remaining : %d",
753 seg_sz, *read_cnt, pos->seg_byte_pos,
754 seg_sz - *read_cnt - pos->seg_byte_pos);
755 TRACE(ft_t_data_flow, "returning: %d", result);
759 /* seeks to the new data-position. Reads sometimes a segment.
761 * start_seg and end_seg give the boundaries of the current volume
762 * blk_sz is the blk_sz of the current volume as stored in the
765 * We don't allow blocksizes less than 1024 bytes, therefore we don't need
766 * a 64 bit argument for new_block_pos.
769 static int seek_in_segment(const unsigned int to_do, cmpr_info *c_info,
770 const char *src_buf, const int seg_sz,
771 const int seg_pos, const zft_volinfo *volume);
772 static int slow_seek_forward_until_error(const unsigned int distance,
773 cmpr_info *c_info, zft_position *pos,
774 const zft_volinfo *volume, __u8 *buf);
775 static int search_valid_segment(unsigned int segment,
776 const unsigned int end_seg,
777 const unsigned int max_foffs,
778 zft_position *pos, cmpr_info *c_info,
779 const zft_volinfo *volume, __u8 *buf);
780 static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
781 zft_position *pos, const zft_volinfo *volume,
783 static int compute_seg_pos(unsigned int dest, zft_position *pos,
784 const zft_volinfo *volume);
786 #define ZFT_SLOW_SEEK_THRESHOLD 10 /* segments */
787 #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
788 #define ZFT_FAST_SEEK_BACKUP 10 /* segments */
790 static int zftc_seek(unsigned int new_block_pos,
791 zft_position *pos, const zft_volinfo *volume, __u8 *buf)
800 int fast_seek_trials = 0;
801 TRACE_FUN(ft_t_flow);
803 keep_module_locked = 1;
804 MOD_INC_USE_COUNT; /* sets MOD_VISITED and MOD_USED_ONCE,
805 * locking is done with can_unload()
807 if (new_block_pos == 0) {
808 pos->seg_pos = volume->start_seg;
809 pos->seg_byte_pos = 0;
814 dest = new_block_pos * (volume->blk_sz >> 10);
815 distance = dest - (pos->volume_pos >> 10);
816 while (distance != 0) {
817 seg_dist = compute_seg_pos(dest, pos, volume);
818 TRACE(ft_t_noise, "\n"
819 KERN_INFO "seg_dist: %d\n"
820 KERN_INFO "distance: %d\n"
821 KERN_INFO "dest : %d\n"
822 KERN_INFO "vpos : %d\n"
823 KERN_INFO "seg_pos : %d\n"
824 KERN_INFO "trials : %d",
825 seg_dist, distance, dest,
826 (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
830 TRACE(ft_t_bug, "BUG: distance %d > 0, "
831 "segment difference %d < 0",
836 new_seg = pos->seg_pos + seg_dist;
837 if (new_seg > volume->end_seg) {
838 new_seg = volume->end_seg;
840 if (old_seg == new_seg || /* loop */
841 seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
842 fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
843 TRACE(ft_t_noise, "starting slow seek:\n"
844 KERN_INFO "fast seek failed too often: %s\n"
845 KERN_INFO "near target position : %s\n"
846 KERN_INFO "looping between two segs : %s",
848 ZFT_FAST_SEEK_MAX_TRIALS)
850 (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD)
854 result = slow_seek_forward(dest, &cseg,
859 limit = volume->end_seg;
862 result = search_valid_segment(new_seg, limit,
866 if (result == 0 || result == -EINTR) {
869 if (new_seg == volume->start_seg) {
870 result = -EIO; /* set errror
876 new_seg -= ZFT_FAST_SEEK_BACKUP;
877 if (new_seg < volume->start_seg) {
878 new_seg = volume->start_seg;
883 "Couldn't find a readable segment");
886 } else /* if (distance < 0) */ {
888 TRACE(ft_t_bug, "BUG: distance %d < 0, "
889 "segment difference %d >0",
894 new_seg = pos->seg_pos + seg_dist;
895 if (fast_seek_trials > 0 && seg_dist == 0) {
896 /* this avoids sticking to the same
897 * segment all the time. On the other hand:
898 * if we got here for the first time, and the
899 * deblock_buffer still contains a valid
900 * segment, then there is no need to skip to
901 * the previous segment if the desired position
902 * is inside this segment.
906 if (new_seg < volume->start_seg) {
907 new_seg = volume->start_seg;
909 limit = pos->seg_pos;
912 result = search_valid_segment(new_seg, limit,
916 if (result == 0 || result == -EINTR) {
919 if (new_seg == volume->start_seg) {
920 result = -EIO; /* set errror
926 new_seg -= ZFT_FAST_SEEK_BACKUP;
927 if (new_seg < volume->start_seg) {
928 new_seg = volume->start_seg;
933 "Couldn't find a readable segment");
937 distance = dest - (pos->volume_pos >> 10);
943 /* advance inside the given segment at most to_do bytes.
947 static int seek_in_segment(const unsigned int to_do,
952 const zft_volinfo *volume)
955 int blk_sz = volume->blk_sz >> 10;
956 int remaining = to_do;
957 TRACE_FUN(ft_t_flow);
959 if (c_info->offset == 0) {
960 /* new segment just read
962 TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
963 c_info->cmpr_pos += c_info->count;
964 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
966 /* loop and uncompress until user buffer full or
967 * deblock-buffer empty
969 TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
970 c_info->cmpr_sz, c_info->cmpr_pos);
971 while (c_info->spans == 0 && remaining > 0) {
972 if (c_info->cmpr_pos != 0) { /* cmpr buf is not empty */
975 c_info->cmpr_pos = 0;
978 get_next_cluster(c_info, src_buf, seg_sz,
979 volume->end_seg == seg_pos);
980 if (c_info->count != 0) {
981 c_info->cmpr_pos = c_info->count;
982 c_info->offset += c_info->count;
987 /* Allow escape from this loop on signal!
989 FT_SIGNAL_EXIT(_DONT_BLOCK);
990 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
991 TRACE(ft_t_noise, "to_do: %d", remaining);
993 if (seg_sz - c_info->offset <= 18) {
994 c_info->offset = seg_sz;
996 TRACE(ft_t_noise, "\n"
997 KERN_INFO "segment size : %d\n"
998 KERN_INFO "buf_pos_read : %d\n"
999 KERN_INFO "remaining : %d",
1000 seg_sz, c_info->offset,
1001 seg_sz - c_info->offset);
1005 static int slow_seek_forward_until_error(const unsigned int distance,
1008 const zft_volinfo *volume,
1011 unsigned int remaining = distance;
1015 TRACE_FUN(ft_t_flow);
1017 seg_pos = pos->seg_pos;
1019 TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf,
1021 /* now we have the contents of the actual segment in
1022 * the deblock buffer
1024 TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
1025 seg_sz, seg_pos,volume),);
1026 remaining -= result;
1027 pos->volume_pos += result<<10;
1028 pos->seg_pos = seg_pos;
1029 pos->seg_byte_pos = c_info->offset;
1031 if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
1033 pos->seg_byte_pos = 0;
1036 /* Allow escape from this loop on signal!
1038 FT_SIGNAL_EXIT(_DONT_BLOCK);
1039 TRACE(ft_t_noise, "\n"
1040 KERN_INFO "remaining: %d\n"
1041 KERN_INFO "seg_pos: %d\n"
1042 KERN_INFO "end_seg: %d\n"
1043 KERN_INFO "result: %d",
1044 remaining, seg_pos, volume->end_seg, result);
1045 } while (remaining > 0 && seg_pos <= volume->end_seg);
1049 /* return segment id of next segment containing valid data, -EIO otherwise
1051 static int search_valid_segment(unsigned int segment,
1052 const unsigned int end_seg,
1053 const unsigned int max_foffs,
1056 const zft_volinfo *volume,
1061 TRACE_FUN(ft_t_flow);
1063 memset(&tmp_info, 0, sizeof(cmpr_info));
1064 while (segment <= end_seg) {
1065 FT_SIGNAL_EXIT(_DONT_BLOCK);
1067 "Searching readable segment between %d and %d",
1069 seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
1071 (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
1072 (tmp_info.foffs != 0 || segment == volume->start_seg)) {
1073 if ((tmp_info.foffs>>10) > max_foffs) {
1074 TRACE_ABORT(-EIO, ft_t_noise, "\n"
1075 KERN_INFO "cseg.foff: %d\n"
1076 KERN_INFO "dest : %d",
1077 (int)(tmp_info.foffs >> 10),
1080 DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
1082 pos->seg_pos = segment;
1083 pos->volume_pos = c_info->foffs;
1084 pos->seg_byte_pos = c_info->offset;
1085 TRACE(ft_t_noise, "found segment at %d", segment);
1093 static int slow_seek_forward(unsigned int dest,
1096 const zft_volinfo *volume,
1099 unsigned int distance;
1101 TRACE_FUN(ft_t_flow);
1103 distance = dest - (pos->volume_pos >> 10);
1104 while ((distance > 0) &&
1105 (result = slow_seek_forward_until_error(distance,
1110 if (result == -EINTR) {
1113 TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
1114 /* the failing segment is either pos->seg_pos or
1115 * pos->seg_pos + 1. There is no need to further try
1116 * that segment, because ftape_read_segment() already
1117 * has tried very much to read it. So we start with
1118 * following segment, which is pos->seg_pos + 1
1120 if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
1123 TRACE(ft_t_noise, "search_valid_segment() failed");
1127 distance = dest - (pos->volume_pos >> 10);
1129 TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
1130 /* found valid segment, retry the seek */
1135 static int compute_seg_pos(const unsigned int dest,
1137 const zft_volinfo *volume)
1140 int distance = dest - (pos->volume_pos >> 10);
1141 unsigned int raw_size;
1142 unsigned int virt_size;
1143 unsigned int factor;
1144 TRACE_FUN(ft_t_flow);
1146 if (distance >= 0) {
1147 raw_size = volume->end_seg - pos->seg_pos + 1;
1148 virt_size = ((unsigned int)(volume->size>>10)
1149 - (unsigned int)(pos->volume_pos>>10)
1150 + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1151 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1152 if (virt_size == 0 || raw_size == 0) {
1155 if (raw_size >= (1<<25)) {
1156 factor = raw_size/(virt_size>>7);
1158 factor = (raw_size<<7)/virt_size;
1160 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1161 segment = (segment * factor)>>7;
1163 raw_size = pos->seg_pos - volume->start_seg + 1;
1164 virt_size = ((unsigned int)(pos->volume_pos>>10)
1165 + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1166 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1167 if (virt_size == 0 || raw_size == 0) {
1170 if (raw_size >= (1<<25)) {
1171 factor = raw_size/(virt_size>>7);
1173 factor = (raw_size<<7)/virt_size;
1175 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1177 TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
1181 static struct zft_cmpr_ops cmpr_ops = {
1190 int zft_compressor_init(void)
1192 TRACE_FUN(ft_t_flow);
1195 printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
1196 if (TRACE_LEVEL >= ft_t_info) {
1198 KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
1199 KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n"
1200 KERN_INFO "Compiled for kernel version %s\n", UTS_RELEASE);
1203 /* print a short no-nonsense boot message */
1204 printk("zftape compressor v1.00a 970514 for Linux " UTS_RELEASE "\n");
1205 printk("For use with " FTAPE_VERSION "\n");
1207 TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
1208 TRACE(ft_t_info, "installing compressor for zftape ...");
1209 TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
1216 "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
1218 "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
1219 MODULE_LICENSE("GPL");
1221 /* Called by modules package when installing the driver
1223 int init_module(void)
1227 #if 0 /* FIXME --RR */
1228 if (!mod_member_present(&__this_module, can_unload))
1230 __this_module.can_unload = can_unload;
1232 result = zft_compressor_init();
1233 keep_module_locked = 0;
1237 /* Called by modules package when removing the driver
1239 void cleanup_module(void)
1241 TRACE_FUN(ft_t_flow);
1243 if (zft_cmpr_unregister() != &cmpr_ops) {
1244 TRACE(ft_t_info, "failed");
1246 TRACE(ft_t_info, "successful");
1249 printk(KERN_INFO "zft-compressor successfully unloaded.\n");