d27eb620d1affabeb2b81daa9a01c1bdaf7a069e
[linux-flexiantxendom0-3.2.10.git] / drivers / char / ftape / compressor / zftape-compress.c
1 /*
2  *      Copyright (C) 1994-1997 Claus-Justus Heine
3
4  This program is free software; you can redistribute it and/or
5  modify it under the terms of the GNU General Public License as
6  published by the Free Software Foundation; either version 2, or (at
7  your option) any later version.
8  
9  This program is distributed in the hope that it will be useful, but
10  WITHOUT ANY WARRANTY; without even the implied warranty of
11  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  General Public License for more details.
13  
14  You should have received a copy of the GNU General Public License
15  along with this program; see the file COPYING.  If not, write to
16  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
17  USA.
18  
19  *
20  *     This file implements a "generic" interface between the *
21  *     zftape-driver and a compression-algorithm. The *
22  *     compression-algorithm currently used is a LZ77. I use the *
23  *     implementation lzrw3 by Ross N. Williams (Renaissance *
24  *     Software). The compression program itself is in the file
25  *     lzrw3.c * and lzrw3.h.  To adopt another compression algorithm
26  *     the functions * zft_compress() and zft_uncompress() must be
27  *     changed * appropriately. See below.
28  */
29
30  char zftc_src[] ="$Source: /homes/cvs/ftape-stacked/ftape/compressor/zftape-compress.c,v $";
31  char zftc_rev[] = "$Revision: 1.1.6.1 $";
32  char zftc_dat[] = "$Date: 1997/11/16 15:15:56 $";
33
34 #include <linux/version.h>
35 #include <linux/errno.h>
36 #include <linux/mm.h>
37 #include <linux/module.h>
38
39 #include <linux/zftape.h>
40
41 #include <asm/uaccess.h>
42
43 #include "../zftape/zftape-init.h"
44 #include "../zftape/zftape-eof.h"
45 #include "../zftape/zftape-ctl.h"
46 #include "../zftape/zftape-write.h"
47 #include "../zftape/zftape-read.h"
48 #include "../zftape/zftape-rw.h"
49 #include "../compressor/zftape-compress.h"
50 #include "../zftape/zftape-vtbl.h"
51 #include "../compressor/lzrw3.h"
52
53 /*
54  *   global variables
55  */
56
57 /* I handle the allocation of this buffer as a special case, because
58  * it's size varies depending on the tape length inserted.
59  */
60
61 /* local variables 
62  */
63 static int keep_module_locked = 1;
64
65 static void *zftc_wrk_mem = NULL;
66 static __u8 *zftc_buf     = NULL;
67 static void *zftc_scratch_buf  = NULL;
68
69 /* compression statistics 
70  */
71 static unsigned int zftc_wr_uncompressed = 0;
72 static unsigned int zftc_wr_compressed   = 0;
73 static unsigned int zftc_rd_uncompressed = 0;
74 static unsigned int zftc_rd_compressed   = 0;
75
76 /* forward */
77 static int  zftc_write(int *write_cnt,
78                        __u8 *dst_buf, const int seg_sz,
79                        const __u8 *src_buf, const int req_len,
80                        const zft_position *pos, const zft_volinfo *volume);
81 static int  zftc_read(int *read_cnt,
82                       __u8  *dst_buf, const int to_do,
83                       const __u8 *src_buf, const int seg_sz,
84                       const zft_position *pos, const zft_volinfo *volume);
85 static int  zftc_seek(unsigned int new_block_pos, 
86                       zft_position *pos, const zft_volinfo *volume,
87                       __u8 *buffer);
88 static void zftc_lock   (void);
89 static void zftc_reset  (void);
90 static void zftc_cleanup(void);
91 static void zftc_stats      (void);
92
93 /* compressed segment. This conforms to QIC-80-MC, Revision K.
94  * 
95  * Rev. K applies to tapes with `fixed length format' which is
96  * indicated by format code 2,3 and 5. See below for format code 4 and 6
97  *
98  * 2 bytes: offset of compression segment structure
99  *          29k > offset >= 29k-18: data from previous segment ens in this
100  *                                  segment and no compressed block starts
101  *                                  in this segment
102  *                     offset == 0: data from previous segment occupies entire
103  *                                  segment and continues in next segment
104  * n bytes: remainder from previous segment
105  * 
106  * Rev. K:  
107  * 4 bytes: 4 bytes: files set byte offset
108  * Post Rev. K and QIC-3020/3020:
109  * 8 bytes: 8 bytes: files set byte offset
110  * 2 bytes: byte count N (amount of data following)
111  *          bit 15 is set if data is compressed, bit 15 is not
112  *          set if data is uncompressed
113  * N bytes: data (as much as specified in the byte count)
114  * 2 bytes: byte count N_1 of next cluster
115  * N_1 bytes: data of next cluset
116  * 2 bytes: byte count N_2 of next cluster
117  * N_2 bytes: ...  
118  *
119  * Note that the `N' byte count accounts only for the bytes that in the
120  * current segment if the cluster spans to the next segment.
121  */
122
123 typedef struct
124 {
125         int cmpr_pos;             /* actual position in compression buffer */
126         int cmpr_sz;              /* what is left in the compression buffer
127                                    * when copying the compressed data to the
128                                    * deblock buffer
129                                    */
130         unsigned int first_block; /* location of header information in
131                                    * this segment
132                                    */
133         unsigned int count;       /* amount of data of current block
134                                    * contained in current segment 
135                                    */
136         unsigned int offset;      /* offset in current segment */
137         unsigned int spans:1;     /* might continue in next segment */
138         unsigned int uncmpr;      /* 0x8000 if this block contains
139                                    * uncompressed data 
140                                    */
141         __s64 foffs;              /* file set byte offset, same as in 
142                                    * compression map segment
143                                    */
144 } cmpr_info;
145
146 static cmpr_info cseg; /* static data. Must be kept uptodate and shared by 
147                         * read, write and seek functions
148                         */
149
150 #define DUMP_CMPR_INFO(level, msg, info)                                \
151         TRACE(level, msg "\n"                                           \
152               KERN_INFO "cmpr_pos   : %d\n"                             \
153               KERN_INFO "cmpr_sz    : %d\n"                             \
154               KERN_INFO "first_block: %d\n"                             \
155               KERN_INFO "count      : %d\n"                             \
156               KERN_INFO "offset     : %d\n"                             \
157               KERN_INFO "spans      : %d\n"                             \
158               KERN_INFO "uncmpr     : 0x%04x\n"                         \
159               KERN_INFO "foffs      : " LL_X,                           \
160               (info)->cmpr_pos, (info)->cmpr_sz, (info)->first_block,   \
161               (info)->count, (info)->offset, (info)->spans == 1,        \
162               (info)->uncmpr, LL((info)->foffs))
163
164 /*   dispatch compression segment info, return error code
165  *  
166  *   afterwards, cseg->offset points to start of data of the NEXT
167  *   compressed block, and cseg->count contains the amount of data
168  *   left in the actual compressed block. cseg->spans is set to 1 if
169  *   the block is continued in the following segment. Otherwise it is
170  *   set to 0. 
171  */
172 static int get_cseg (cmpr_info *cinfo, const __u8 *buff, 
173                      const unsigned int seg_sz,
174                      const zft_volinfo *volume)
175 {
176         TRACE_FUN(ft_t_flow);
177
178         cinfo->first_block = GET2(buff, 0);
179         if (cinfo->first_block == 0) { /* data spans to next segment */
180                 cinfo->count  = seg_sz - sizeof(__u16);
181                 cinfo->offset = seg_sz;
182                 cinfo->spans = 1;
183         } else { /* cluster definetely ends in this segment */
184                 if (cinfo->first_block > seg_sz) {
185                         /* data corrupted */
186                         TRACE_ABORT(-EIO, ft_t_err, "corrupted data:\n"
187                                     KERN_INFO "segment size: %d\n"
188                                     KERN_INFO "first block : %d",
189                                     seg_sz, cinfo->first_block);
190                 }
191                 cinfo->count  = cinfo->first_block - sizeof(__u16);
192                 cinfo->offset = cinfo->first_block;
193                 cinfo->spans = 0;
194         }
195         /* now get the offset the first block should have in the
196          * uncompressed data stream.
197          *
198          * For this magic `18' refer to CRF-3 standard or QIC-80MC,
199          * Rev. K.  
200          */
201         if ((seg_sz - cinfo->offset) > 18) {
202                 if (volume->qic113) { /* > revision K */
203                         TRACE(ft_t_data_flow, "New QIC-113 compliance");
204                         cinfo->foffs = GET8(buff, cinfo->offset);
205                         cinfo->offset += sizeof(__s64); 
206                 } else {
207                         TRACE(/* ft_t_data_flow */ ft_t_noise, "pre QIC-113 version");
208                         cinfo->foffs   = (__s64)GET4(buff, cinfo->offset);
209                         cinfo->offset += sizeof(__u32); 
210                 }
211         }
212         if (cinfo->foffs > volume->size) {
213                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
214                             KERN_INFO "offset in current volume: %d\n"
215                             KERN_INFO "size of current volume  : %d",
216                             (int)(cinfo->foffs>>10), (int)(volume->size>>10));
217         }
218         if (cinfo->cmpr_pos + cinfo->count > volume->blk_sz) {
219                 TRACE_ABORT(-EIO, ft_t_err, "Inconsistency:\n"
220                             KERN_INFO "block size : %d\n"
221                             KERN_INFO "data record: %d",
222                             volume->blk_sz, cinfo->cmpr_pos + cinfo->count);
223         }
224         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", cinfo);
225         TRACE_EXIT 0;
226 }
227
228 /*  This one is called, when a new cluster starts in same segment.
229  *  
230  *  Note: if this is the first cluster in the current segment, we must
231  *  not check whether there are more than 18 bytes available because
232  *  this have already been done in get_cseg() and there may be less
233  *  than 18 bytes available due to header information.
234  * 
235  */
236 static void get_next_cluster(cmpr_info *cluster, const __u8 *buff, 
237                              const int seg_sz, const int finish)
238 {
239         TRACE_FUN(ft_t_flow);
240
241         if (seg_sz - cluster->offset > 18 || cluster->foffs != 0) {
242                 cluster->count   = GET2(buff, cluster->offset);
243                 cluster->uncmpr  = cluster->count & 0x8000;
244                 cluster->count  -= cluster->uncmpr;
245                 cluster->offset += sizeof(__u16);
246                 cluster->foffs   = 0;
247                 if ((cluster->offset + cluster->count) < seg_sz) {
248                         cluster->spans = 0;
249                 } else if (cluster->offset + cluster->count == seg_sz) {
250                         cluster->spans = !finish;
251                 } else {
252                         /* either an error or a volume written by an 
253                          * old version. If this is a data error, then we'll
254                          * catch it later.
255                          */
256                         TRACE(ft_t_data_flow, "Either error or old volume");
257                         cluster->spans = 1;
258                         cluster->count = seg_sz - cluster->offset;
259                 }
260         } else {
261                 cluster->count = 0;
262                 cluster->spans = 0;
263                 cluster->foffs = 0;
264         }
265         DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */ , "", cluster);
266         TRACE_EXIT;
267 }
268
269 static void zftc_lock(void)
270 {
271         MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
272                             *  locking is done with can_unload()
273                             */
274         keep_module_locked = 1;
275 }
276
277 /*  this function is needed for zftape_reset_position in zftape-io.c 
278  */
279 static void zftc_reset(void)
280 {
281         TRACE_FUN(ft_t_flow);
282
283         memset((void *)&cseg, '\0', sizeof(cseg));
284         zftc_stats();
285         keep_module_locked = 0;
286         TRACE_EXIT;
287 }
288
289 static int cmpr_mem_initialized = 0;
290 static unsigned int alloc_blksz = 0;
291
292 static int zft_allocate_cmpr_mem(unsigned int blksz)
293 {
294         TRACE_FUN(ft_t_flow);
295
296         if (cmpr_mem_initialized && blksz == alloc_blksz) {
297                 TRACE_EXIT 0;
298         }
299         TRACE_CATCH(zft_vmalloc_once(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE),
300                     zftc_cleanup());
301         TRACE_CATCH(zft_vmalloc_always(&zftc_buf, blksz + CMPR_OVERRUN),
302                     zftc_cleanup());
303         alloc_blksz = blksz;
304         TRACE_CATCH(zft_vmalloc_always(&zftc_scratch_buf, blksz+CMPR_OVERRUN),
305                     zftc_cleanup());
306         cmpr_mem_initialized = 1;
307         TRACE_EXIT 0;
308 }
309
310 static void zftc_cleanup(void)
311 {
312         TRACE_FUN(ft_t_flow);
313
314         zft_vfree(&zftc_wrk_mem, CMPR_WRK_MEM_SIZE);
315         zft_vfree(&zftc_buf, alloc_blksz + CMPR_OVERRUN);
316         zft_vfree(&zftc_scratch_buf, alloc_blksz + CMPR_OVERRUN);
317         cmpr_mem_initialized = alloc_blksz = 0;
318         TRACE_EXIT;
319 }
320
321 /*****************************************************************************
322  *                                                                           *
323  *  The following two functions "ftape_compress()" and                       *
324  *  "ftape_uncompress()" are the interface to the actual compression         *
325  *  algorithm (i.e. they are calling the "compress()" function from          *
326  *  the lzrw3 package for now). These routines could quite easily be         *
327  *  changed to adopt another compression algorithm instead of lzrw3,         *
328  *  which currently is used.                                                 *
329  *                                                                           *
330  *****************************************************************************/
331
332 /* called by zft_compress_write() to perform the compression. Must
333  * return the size of the compressed data.
334  *
335  * NOTE: The size of the compressed data should not exceed the size of
336  *       the uncompressed data. Most compression algorithms have means
337  *       to store data unchanged if the "compressed" data amount would
338  *       exceed the original one. Mostly this is done by storing some
339  *       flag-bytes in front of the compressed data to indicate if it
340  *       is compressed or not. Thus the worst compression result
341  *       length is the original length plus those flag-bytes.
342  *
343  *       We don't want that, as the QIC-80 standard provides a means
344  *       of marking uncompressed blocks by simply setting bit 15 of
345  *       the compressed block's length. Thus a compessed block can
346  *       have at most a length of 2^15-1 bytes. The QIC-80 standard
347  *       restricts the block-length even further, allowing only 29k -
348  *       6 bytes.
349  *
350  *       Currently, the maximum blocksize used by zftape is 28k.
351  *
352  *       In short: don't exceed the length of the input-package, set
353  *       bit 15 of the compressed size to 1 if you have copied data
354  *       instead of compressing it.
355  */
356 static int zft_compress(__u8 *in_buffer, unsigned int in_sz, __u8 *out_buffer)
357
358         __s32 compressed_sz;
359         TRACE_FUN(ft_t_flow);
360         
361
362         lzrw3_compress(COMPRESS_ACTION_COMPRESS, zftc_wrk_mem,
363                        in_buffer, in_sz, out_buffer, &compressed_sz);
364         if (TRACE_LEVEL >= ft_t_info) {
365                 /*  the compiler will optimize this away when
366                  *  compiled with NO_TRACE_AT_ALL option
367                  */
368                 TRACE(ft_t_data_flow, "\n"
369                       KERN_INFO "before compression: %d bytes\n"
370                       KERN_INFO "after compresison : %d bytes", 
371                       in_sz, 
372                       (int)(compressed_sz < 0 
373                       ? -compressed_sz : compressed_sz));
374                 /*  for statistical purposes
375                  */
376                 zftc_wr_compressed   += (compressed_sz < 0 
377                                            ? -compressed_sz : compressed_sz);
378                 zftc_wr_uncompressed += in_sz;
379         }
380         TRACE_EXIT (int)compressed_sz;
381 }
382
383 /* called by zft_compress_read() to decompress the data. Must
384  * return the size of the decompressed data for sanity checks
385  * (compared with zft_blk_sz)
386  *
387  * NOTE: Read the note for zft_compress() above!  If bit 15 of the
388  *       parameter in_sz is set, then the data in in_buffer isn't
389  *       compressed, which must be handled by the un-compression
390  *       algorithm. (I changed lzrw3 to handle this.)
391  *
392  *  The parameter max_out_sz is needed to prevent buffer overruns when 
393  *  uncompressing corrupt data.
394  */
395 static unsigned int zft_uncompress(__u8 *in_buffer, 
396                                    int in_sz, 
397                                    __u8 *out_buffer,
398                                    unsigned int max_out_sz)
399
400         TRACE_FUN(ft_t_flow);
401         
402         lzrw3_compress(COMPRESS_ACTION_DECOMPRESS, zftc_wrk_mem,
403                        in_buffer, (__s32)in_sz,
404                        out_buffer, (__u32 *)&max_out_sz);
405         
406         if (TRACE_LEVEL >= ft_t_info) {
407                 TRACE(ft_t_data_flow, "\n"
408                       KERN_INFO "before decompression: %d bytes\n"
409                       KERN_INFO "after decompression : %d bytes", 
410                       in_sz < 0 ? -in_sz : in_sz,(int)max_out_sz);
411                 /*  for statistical purposes
412                  */
413                 zftc_rd_compressed   += in_sz < 0 ? -in_sz : in_sz;
414                 zftc_rd_uncompressed += max_out_sz;
415         }
416         TRACE_EXIT (unsigned int)max_out_sz;
417 }
418
419 /* print some statistics about the efficiency of the compression to
420  * the kernel log 
421  */
422 static void zftc_stats(void)
423 {
424         TRACE_FUN(ft_t_flow);
425
426         if (TRACE_LEVEL < ft_t_info) {
427                 TRACE_EXIT;
428         }
429         if (zftc_wr_uncompressed != 0) {
430                 if (zftc_wr_compressed > (1<<14)) {
431                         TRACE(ft_t_info, "compression statistics (writing):\n"
432                               KERN_INFO " compr./uncmpr.   : %3d %%",
433                               (((zftc_wr_compressed>>10) * 100)
434                                / (zftc_wr_uncompressed>>10)));
435                 } else {
436                         TRACE(ft_t_info, "compression statistics (writing):\n"
437                               KERN_INFO " compr./uncmpr.   : %3d %%",
438                               ((zftc_wr_compressed * 100)
439                                / zftc_wr_uncompressed));
440                 }
441         }
442         if (zftc_rd_uncompressed != 0) {
443                 if (zftc_rd_compressed > (1<<14)) {
444                         TRACE(ft_t_info, "compression statistics (reading):\n"
445                               KERN_INFO " compr./uncmpr.   : %3d %%",
446                               (((zftc_rd_compressed>>10) * 100)
447                                / (zftc_rd_uncompressed>>10)));
448                 } else {
449                         TRACE(ft_t_info, "compression statistics (reading):\n"
450                               KERN_INFO " compr./uncmpr.   : %3d %%",
451                               ((zftc_rd_compressed * 100)
452                                / zftc_rd_uncompressed));
453                 }
454         }
455         /* only print it once: */
456         zftc_wr_uncompressed = 
457                 zftc_wr_compressed  =
458                 zftc_rd_uncompressed =
459                 zftc_rd_compressed   = 0;
460         TRACE_EXIT;
461 }
462
463 /* start new compressed block 
464  */
465 static int start_new_cseg(cmpr_info *cluster, 
466                           char *dst_buf, 
467                           const zft_position *pos,
468                           const unsigned int blk_sz,
469                           const char *src_buf,
470                           const int this_segs_sz,
471                           const int qic113)
472 {
473         int size_left;
474         int cp_cnt;
475         int buf_pos;
476         TRACE_FUN(ft_t_flow);
477
478         size_left = this_segs_sz - sizeof(__u16) - cluster->cmpr_sz;
479         TRACE(ft_t_data_flow,"\n" 
480               KERN_INFO "segment size   : %d\n"
481               KERN_INFO "compressed_sz: %d\n"
482               KERN_INFO "size_left      : %d",
483               this_segs_sz, cluster->cmpr_sz, size_left);
484         if (size_left > 18) { /* start a new cluseter */
485                 cp_cnt = cluster->cmpr_sz;
486                 cluster->cmpr_sz = 0;
487                 buf_pos = cp_cnt + sizeof(__u16);
488                 PUT2(dst_buf, 0, buf_pos);
489
490                 if (qic113) {
491                         __s64 foffs = pos->volume_pos;
492                         if (cp_cnt) foffs += (__s64)blk_sz;
493
494                         TRACE(ft_t_data_flow, "new style QIC-113 header");
495                         PUT8(dst_buf, buf_pos, foffs);
496                         buf_pos += sizeof(__s64);
497                 } else {
498                         __u32 foffs = (__u32)pos->volume_pos;
499                         if (cp_cnt) foffs += (__u32)blk_sz;
500                         
501                         TRACE(ft_t_data_flow, "old style QIC-80MC header");
502                         PUT4(dst_buf, buf_pos, foffs);
503                         buf_pos += sizeof(__u32);
504                 }
505         } else if (size_left >= 0) {
506                 cp_cnt = cluster->cmpr_sz;
507                 cluster->cmpr_sz = 0;
508                 buf_pos = cp_cnt + sizeof(__u16);
509                 PUT2(dst_buf, 0, buf_pos);  
510                 /* zero unused part of segment. */
511                 memset(dst_buf + buf_pos, '\0', size_left);
512                 buf_pos = this_segs_sz;
513         } else { /* need entire segment and more space */
514                 PUT2(dst_buf, 0, 0); 
515                 cp_cnt = this_segs_sz - sizeof(__u16);
516                 cluster->cmpr_sz  -= cp_cnt;
517                 buf_pos = this_segs_sz;
518         }
519         memcpy(dst_buf + sizeof(__u16), src_buf + cluster->cmpr_pos, cp_cnt);
520         cluster->cmpr_pos += cp_cnt;
521         TRACE_EXIT buf_pos;
522 }
523
524 /* return-value: the number of bytes removed from the user-buffer
525  *               `src_buf' or error code
526  *
527  *  int *write_cnt           : how much actually has been moved to the
528  *                             dst_buf. Need not be initialized when
529  *                             function returns with an error code
530  *                             (negativ return value) 
531  *  __u8 *dst_buf            : kernel space buffer where the has to be
532  *                             copied to. The contents of this buffers
533  *                             goes to a specific segment.
534  *  const int seg_sz         : the size of the segment dst_buf will be
535  *                             copied to.
536  *  const zft_position *pos  : struct containing the coordinates in
537  *                             the current volume (byte position,
538  *                             segment id of current segment etc)
539  *  const zft_volinfo *volume: information about the current volume,
540  *                             size etc.
541  *  const __u8 *src_buf      : user space buffer that contains the
542  *                             data the user wants to be written to
543  *                             tape.
544  *  const int req_len        : the amount of data the user wants to be
545  *                             written to tape.
546  */
547 static int zftc_write(int *write_cnt,
548                       __u8 *dst_buf, const int seg_sz,
549                       const __u8 *src_buf, const int req_len,
550                       const zft_position *pos, const zft_volinfo *volume)
551 {
552         int req_len_left = req_len;
553         int result;
554         int len_left;
555         int buf_pos_write = pos->seg_byte_pos;
556         TRACE_FUN(ft_t_flow);
557         
558         keep_module_locked = 1;
559         MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
560                             *  locking is done with can_unload()
561                             */
562         /* Note: we do not unlock the module because
563          * there are some values cached in that `cseg' variable.  We
564          * don't don't want to use this information when being
565          * unloaded by kerneld even when the tape is full or when we
566          * cannot allocate enough memory.
567          */
568         if (pos->tape_pos > (volume->size-volume->blk_sz-ZFT_CMPR_OVERHEAD)) {
569                 TRACE_EXIT -ENOSPC;
570         }    
571         if (zft_allocate_cmpr_mem(volume->blk_sz) < 0) {
572                 /* should we unlock the module? But it shouldn't 
573                  * be locked anyway ...
574                  */
575                 TRACE_EXIT -ENOMEM;
576         }
577         if (buf_pos_write == 0) { /* fill a new segment */
578                 *write_cnt = buf_pos_write = start_new_cseg(&cseg,
579                                                             dst_buf,
580                                                             pos,
581                                                             volume->blk_sz,
582                                                             zftc_buf, 
583                                                             seg_sz,
584                                                             volume->qic113);
585                 if (cseg.cmpr_sz == 0 && cseg.cmpr_pos != 0) {
586                         req_len_left -= result = volume->blk_sz;
587                         cseg.cmpr_pos  = 0;
588                 } else {
589                         result = 0;
590                 }
591         } else {
592                 *write_cnt = result = 0;
593         }
594         
595         len_left = seg_sz - buf_pos_write;
596         while ((req_len_left > 0) && (len_left > 18)) {
597                 /* now we have some size left for a new compressed
598                  * block.  We know, that the compression buffer is
599                  * empty (else there wouldn't be any space left).  
600                  */
601                 if (copy_from_user(zftc_scratch_buf, src_buf + result, 
602                                    volume->blk_sz) != 0) {
603                         TRACE_EXIT -EFAULT;
604                 }
605                 req_len_left -= volume->blk_sz;
606                 cseg.cmpr_sz = zft_compress(zftc_scratch_buf, volume->blk_sz, 
607                                             zftc_buf);
608                 if (cseg.cmpr_sz < 0) {
609                         cseg.uncmpr = 0x8000;
610                         cseg.cmpr_sz = -cseg.cmpr_sz;
611                 } else {
612                         cseg.uncmpr = 0;
613                 }
614                 /* increment "result" iff we copied the entire
615                  * compressed block to the zft_deblock_buf 
616                  */
617                 len_left -= sizeof(__u16);
618                 if (len_left >= cseg.cmpr_sz) {
619                         len_left -= cseg.count = cseg.cmpr_sz;
620                         cseg.cmpr_pos = cseg.cmpr_sz = 0;
621                         result += volume->blk_sz;
622                 } else {
623                         cseg.cmpr_sz       -= 
624                                 cseg.cmpr_pos =
625                                 cseg.count    = len_left;
626                         len_left = 0;
627                 }
628                 PUT2(dst_buf, buf_pos_write, cseg.uncmpr | cseg.count);
629                 buf_pos_write += sizeof(__u16);
630                 memcpy(dst_buf + buf_pos_write, zftc_buf, cseg.count);
631                 buf_pos_write += cseg.count;
632                 *write_cnt    += cseg.count + sizeof(__u16);
633                 FT_SIGNAL_EXIT(_DONT_BLOCK);
634         }
635         /* erase the remainder of the segment if less than 18 bytes
636          * left (18 bytes is due to the QIC-80 standard) 
637          */
638         if (len_left <= 18) {
639                 memset(dst_buf + buf_pos_write, '\0', len_left);
640                 (*write_cnt) += len_left;
641         }
642         TRACE(ft_t_data_flow, "returning %d", result);
643         TRACE_EXIT result;
644 }   
645
646 /* out:
647  *
648  * int *read_cnt: the number of bytes we removed from the zft_deblock_buf
649  *                (result)
650  * int *to_do   : the remaining size of the read-request.
651  *
652  * in:
653  *
654  * char *buff          : buff is the address of the upper part of the user
655  *                       buffer, that hasn't been filled with data yet.
656
657  * int buf_pos_read    : copy of from _ftape_read()
658  * int buf_len_read    : copy of buf_len_rd from _ftape_read()
659  * char *zft_deblock_buf: zft_deblock_buf
660  * unsigned short blk_sz: the block size valid for this volume, may differ
661  *                            from zft_blk_sz.
662  * int finish: if != 0 means that this is the last segment belonging
663  *  to this volume
664  * returns the amount of data actually copied to the user-buffer
665  *
666  * to_do MUST NOT SHRINK except to indicate an EOF. In this case *to_do has to
667  * be set to 0 
668  */
669 static int zftc_read (int *read_cnt, 
670                       __u8  *dst_buf, const int to_do, 
671                       const __u8 *src_buf, const int seg_sz, 
672                       const zft_position *pos, const zft_volinfo *volume)
673 {          
674         int uncompressed_sz;         
675         int result = 0;
676         int remaining = to_do;
677         TRACE_FUN(ft_t_flow);
678
679         keep_module_locked = 1;
680         MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
681                             *  locking is done with can_unload()
682                             */
683         TRACE_CATCH(zft_allocate_cmpr_mem(volume->blk_sz),);
684         if (pos->seg_byte_pos == 0) {
685                 /* new segment just read
686                  */
687                 TRACE_CATCH(get_cseg(&cseg, src_buf, seg_sz, volume),
688                             *read_cnt = 0);
689                 memcpy(zftc_buf + cseg.cmpr_pos, src_buf + sizeof(__u16), 
690                        cseg.count);
691                 cseg.cmpr_pos += cseg.count;
692                 *read_cnt      = cseg.offset;
693                 DUMP_CMPR_INFO(ft_t_noise /* ft_t_any */, "", &cseg);
694         } else {
695                 *read_cnt = 0;
696         }
697         /* loop and uncompress until user buffer full or
698          * deblock-buffer empty 
699          */
700         TRACE(ft_t_data_flow, "compressed_sz: %d, compos : %d, *read_cnt: %d",
701               cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
702         while ((cseg.spans == 0) && (remaining > 0)) {
703                 if (cseg.cmpr_pos  != 0) { /* cmpr buf is not empty */
704                         uncompressed_sz = 
705                                 zft_uncompress(zftc_buf,
706                                                cseg.uncmpr == 0x8000 ?
707                                                -cseg.cmpr_pos : cseg.cmpr_pos,
708                                                zftc_scratch_buf,
709                                                volume->blk_sz);
710                         if (uncompressed_sz != volume->blk_sz) {
711                                 *read_cnt = 0;
712                                 TRACE_ABORT(-EIO, ft_t_warn,
713                                       "Uncompressed blk (%d) != blk size (%d)",
714                                       uncompressed_sz, volume->blk_sz);
715                         }       
716                         if (copy_to_user(dst_buf + result, 
717                                          zftc_scratch_buf, 
718                                          uncompressed_sz) != 0 ) {
719                                 TRACE_EXIT -EFAULT;
720                         }
721                         remaining      -= uncompressed_sz;
722                         result     += uncompressed_sz;
723                         cseg.cmpr_pos  = 0;
724                 }                                              
725                 if (remaining > 0) {
726                         get_next_cluster(&cseg, src_buf, seg_sz, 
727                                          volume->end_seg == pos->seg_pos);
728                         if (cseg.count != 0) {
729                                 memcpy(zftc_buf, src_buf + cseg.offset,
730                                        cseg.count);
731                                 cseg.cmpr_pos = cseg.count;
732                                 cseg.offset  += cseg.count;
733                                 *read_cnt += cseg.count + sizeof(__u16);
734                         } else {
735                                 remaining = 0;
736                         }
737                 }
738                 TRACE(ft_t_data_flow, "\n" 
739                       KERN_INFO "compressed_sz: %d\n"
740                       KERN_INFO "compos       : %d\n"
741                       KERN_INFO "*read_cnt    : %d",
742                       cseg.cmpr_sz, cseg.cmpr_pos, *read_cnt);
743         }
744         if (seg_sz - cseg.offset <= 18) {
745                 *read_cnt += seg_sz - cseg.offset;
746                 TRACE(ft_t_data_flow, "expanding read cnt to: %d", *read_cnt);
747         }
748         TRACE(ft_t_data_flow, "\n"
749               KERN_INFO "segment size   : %d\n"
750               KERN_INFO "read count     : %d\n"
751               KERN_INFO "buf_pos_read   : %d\n"
752               KERN_INFO "remaining      : %d",
753                 seg_sz, *read_cnt, pos->seg_byte_pos, 
754                 seg_sz - *read_cnt - pos->seg_byte_pos);
755         TRACE(ft_t_data_flow, "returning: %d", result);
756         TRACE_EXIT result;
757 }                
758
759 /* seeks to the new data-position. Reads sometimes a segment.
760  *  
761  * start_seg and end_seg give the boundaries of the current volume
762  * blk_sz is the blk_sz of the current volume as stored in the
763  * volume label
764  *
765  * We don't allow blocksizes less than 1024 bytes, therefore we don't need
766  * a 64 bit argument for new_block_pos.
767  */
768
769 static int seek_in_segment(const unsigned int to_do, cmpr_info  *c_info,
770                            const char *src_buf, const int seg_sz, 
771                            const int seg_pos, const zft_volinfo *volume);
772 static int slow_seek_forward_until_error(const unsigned int distance,
773                                          cmpr_info *c_info, zft_position *pos, 
774                                          const zft_volinfo *volume, __u8 *buf);
775 static int search_valid_segment(unsigned int segment,
776                                 const unsigned int end_seg,
777                                 const unsigned int max_foffs,
778                                 zft_position *pos, cmpr_info *c_info,
779                                 const zft_volinfo *volume, __u8 *buf);
780 static int slow_seek_forward(unsigned int dest, cmpr_info *c_info,
781                              zft_position *pos, const zft_volinfo *volume,
782                              __u8 *buf);
783 static int compute_seg_pos(unsigned int dest, zft_position *pos,
784                            const zft_volinfo *volume);
785
786 #define ZFT_SLOW_SEEK_THRESHOLD  10 /* segments */
787 #define ZFT_FAST_SEEK_MAX_TRIALS 10 /* times */
788 #define ZFT_FAST_SEEK_BACKUP     10 /* segments */
789
790 static int zftc_seek(unsigned int new_block_pos,
791                      zft_position *pos, const zft_volinfo *volume, __u8 *buf)
792 {
793         unsigned int dest;
794         int limit;
795         int distance;
796         int result = 0;
797         int seg_dist;
798         int new_seg;
799         int old_seg = 0;
800         int fast_seek_trials = 0;
801         TRACE_FUN(ft_t_flow);
802
803         keep_module_locked = 1;
804         MOD_INC_USE_COUNT; /*  sets MOD_VISITED and MOD_USED_ONCE,
805                             *  locking is done with can_unload()
806                             */
807         if (new_block_pos == 0) {
808                 pos->seg_pos      = volume->start_seg;
809                 pos->seg_byte_pos = 0;
810                 pos->volume_pos   = 0;
811                 zftc_reset();
812                 TRACE_EXIT 0;
813         }
814         dest = new_block_pos * (volume->blk_sz >> 10);
815         distance = dest - (pos->volume_pos >> 10);
816         while (distance != 0) {
817                 seg_dist = compute_seg_pos(dest, pos, volume);
818                 TRACE(ft_t_noise, "\n"
819                       KERN_INFO "seg_dist: %d\n"
820                       KERN_INFO "distance: %d\n"
821                       KERN_INFO "dest    : %d\n"
822                       KERN_INFO "vpos    : %d\n"
823                       KERN_INFO "seg_pos : %d\n"
824                       KERN_INFO "trials  : %d",
825                       seg_dist, distance, dest,
826                       (unsigned int)(pos->volume_pos>>10), pos->seg_pos,
827                       fast_seek_trials);
828                 if (distance > 0) {
829                         if (seg_dist < 0) {
830                                 TRACE(ft_t_bug, "BUG: distance %d > 0, "
831                                       "segment difference %d < 0",
832                                       distance, seg_dist);
833                                 result = -EIO;
834                                 break;
835                         }
836                         new_seg = pos->seg_pos + seg_dist;
837                         if (new_seg > volume->end_seg) {
838                                 new_seg = volume->end_seg;
839                         }
840                         if (old_seg == new_seg || /* loop */
841                             seg_dist <= ZFT_SLOW_SEEK_THRESHOLD ||
842                             fast_seek_trials >= ZFT_FAST_SEEK_MAX_TRIALS) {
843                                 TRACE(ft_t_noise, "starting slow seek:\n"
844                                    KERN_INFO "fast seek failed too often: %s\n"
845                                    KERN_INFO "near target position      : %s\n"
846                                    KERN_INFO "looping between two segs  : %s",
847                                       (fast_seek_trials >= 
848                                        ZFT_FAST_SEEK_MAX_TRIALS)
849                                       ? "yes" : "no",
850                                       (seg_dist <= ZFT_SLOW_SEEK_THRESHOLD) 
851                                       ? "yes" : "no",
852                                       (old_seg == new_seg)
853                                       ? "yes" : "no");
854                                 result = slow_seek_forward(dest, &cseg, 
855                                                            pos, volume, buf);
856                                 break;
857                         }
858                         old_seg = new_seg;
859                         limit = volume->end_seg;
860                         fast_seek_trials ++;
861                         for (;;) {
862                                 result = search_valid_segment(new_seg, limit,
863                                                               volume->size,
864                                                               pos, &cseg,
865                                                               volume, buf);
866                                 if (result == 0 || result == -EINTR) {
867                                         break;
868                                 }
869                                 if (new_seg == volume->start_seg) {
870                                         result = -EIO; /* set errror 
871                                                         * condition
872                                                         */
873                                         break;
874                                 }
875                                 limit    = new_seg;
876                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
877                                 if (new_seg < volume->start_seg) {
878                                         new_seg = volume->start_seg;
879                                 }
880                         }
881                         if (result < 0) {
882                                 TRACE(ft_t_warn,
883                                       "Couldn't find a readable segment");
884                                 break;
885                         }
886                 } else /* if (distance < 0) */ {
887                         if (seg_dist > 0) {
888                                 TRACE(ft_t_bug, "BUG: distance %d < 0, "
889                                       "segment difference %d >0",
890                                       distance, seg_dist);
891                                 result = -EIO;
892                                 break;
893                         }
894                         new_seg = pos->seg_pos + seg_dist;
895                         if (fast_seek_trials > 0 && seg_dist == 0) {
896                                 /* this avoids sticking to the same
897                                  * segment all the time. On the other hand:
898                                  * if we got here for the first time, and the
899                                  * deblock_buffer still contains a valid
900                                  * segment, then there is no need to skip to 
901                                  * the previous segment if the desired position
902                                  * is inside this segment.
903                                  */
904                                 new_seg --;
905                         }
906                         if (new_seg < volume->start_seg) {
907                                 new_seg = volume->start_seg;
908                         }
909                         limit   = pos->seg_pos;
910                         fast_seek_trials ++;
911                         for (;;) {
912                                 result = search_valid_segment(new_seg, limit,
913                                                               pos->volume_pos,
914                                                               pos, &cseg,
915                                                               volume, buf);
916                                 if (result == 0 || result == -EINTR) {
917                                         break;
918                                 }
919                                 if (new_seg == volume->start_seg) {
920                                         result = -EIO; /* set errror 
921                                                         * condition
922                                                         */
923                                         break;
924                                 }
925                                 limit    = new_seg;
926                                 new_seg -= ZFT_FAST_SEEK_BACKUP;
927                                 if (new_seg < volume->start_seg) {
928                                         new_seg = volume->start_seg;
929                                 }
930                         }
931                         if (result < 0) {
932                                 TRACE(ft_t_warn,
933                                       "Couldn't find a readable segment");
934                                 break;
935                         }
936                 }
937                 distance = dest - (pos->volume_pos >> 10);
938         }
939         TRACE_EXIT result;
940 }
941
942
943 /*  advance inside the given segment at most to_do bytes.
944  *  of kilobytes moved
945  */
946
947 static int seek_in_segment(const unsigned int to_do,
948                            cmpr_info  *c_info,
949                            const char *src_buf, 
950                            const int seg_sz, 
951                            const int seg_pos,
952                            const zft_volinfo *volume)
953 {
954         int result = 0;
955         int blk_sz = volume->blk_sz >> 10;
956         int remaining = to_do;
957         TRACE_FUN(ft_t_flow);
958
959         if (c_info->offset == 0) {
960                 /* new segment just read
961                  */
962                 TRACE_CATCH(get_cseg(c_info, src_buf, seg_sz, volume),);
963                 c_info->cmpr_pos += c_info->count;
964                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
965         }
966         /* loop and uncompress until user buffer full or
967          * deblock-buffer empty 
968          */
969         TRACE(ft_t_noise, "compressed_sz: %d, compos : %d",
970               c_info->cmpr_sz, c_info->cmpr_pos);
971         while (c_info->spans == 0 && remaining > 0) {
972                 if (c_info->cmpr_pos  != 0) { /* cmpr buf is not empty */
973                         result       += blk_sz;
974                         remaining    -= blk_sz;
975                         c_info->cmpr_pos = 0;
976                 }
977                 if (remaining > 0) {
978                         get_next_cluster(c_info, src_buf, seg_sz, 
979                                          volume->end_seg == seg_pos);
980                         if (c_info->count != 0) {
981                                 c_info->cmpr_pos = c_info->count;
982                                 c_info->offset  += c_info->count;
983                         } else {
984                                 break;
985                         }
986                 }
987                 /*  Allow escape from this loop on signal!
988                  */
989                 FT_SIGNAL_EXIT(_DONT_BLOCK);
990                 DUMP_CMPR_INFO(ft_t_noise, "", c_info);
991                 TRACE(ft_t_noise, "to_do: %d", remaining);
992         }
993         if (seg_sz - c_info->offset <= 18) {
994                 c_info->offset = seg_sz;
995         }
996         TRACE(ft_t_noise, "\n"
997               KERN_INFO "segment size   : %d\n"
998               KERN_INFO "buf_pos_read   : %d\n"
999               KERN_INFO "remaining      : %d",
1000               seg_sz, c_info->offset,
1001               seg_sz - c_info->offset);
1002         TRACE_EXIT result;
1003 }                
1004
1005 static int slow_seek_forward_until_error(const unsigned int distance,
1006                                          cmpr_info *c_info,
1007                                          zft_position *pos, 
1008                                          const zft_volinfo *volume,
1009                                          __u8 *buf)
1010 {
1011         unsigned int remaining = distance;
1012         int seg_sz;
1013         int seg_pos;
1014         int result;
1015         TRACE_FUN(ft_t_flow);
1016         
1017         seg_pos = pos->seg_pos;
1018         do {
1019                 TRACE_CATCH(seg_sz = zft_fetch_segment(seg_pos, buf, 
1020                                                        FT_RD_AHEAD),);
1021                 /* now we have the contents of the actual segment in
1022                  * the deblock buffer
1023                  */
1024                 TRACE_CATCH(result = seek_in_segment(remaining, c_info, buf,
1025                                                      seg_sz, seg_pos,volume),);
1026                 remaining        -= result;
1027                 pos->volume_pos  += result<<10;
1028                 pos->seg_pos      = seg_pos;
1029                 pos->seg_byte_pos = c_info->offset;
1030                 seg_pos ++;
1031                 if (seg_pos <= volume->end_seg && c_info->offset == seg_sz) {
1032                         pos->seg_pos ++;
1033                         pos->seg_byte_pos = 0;
1034                         c_info->offset = 0;
1035                 }
1036                 /*  Allow escape from this loop on signal!
1037                  */
1038                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1039                 TRACE(ft_t_noise, "\n"
1040                       KERN_INFO "remaining:  %d\n"
1041                       KERN_INFO "seg_pos:    %d\n"
1042                       KERN_INFO "end_seg:    %d\n"
1043                       KERN_INFO "result:     %d",
1044                       remaining, seg_pos, volume->end_seg, result);  
1045         } while (remaining > 0 && seg_pos <= volume->end_seg);
1046         TRACE_EXIT 0;
1047 }
1048
1049 /* return segment id of next segment containing valid data, -EIO otherwise
1050  */
1051 static int search_valid_segment(unsigned int segment,
1052                                 const unsigned int end_seg,
1053                                 const unsigned int max_foffs,
1054                                 zft_position *pos,
1055                                 cmpr_info *c_info,
1056                                 const zft_volinfo *volume,
1057                                 __u8 *buf)
1058 {
1059         cmpr_info tmp_info;
1060         int seg_sz;
1061         TRACE_FUN(ft_t_flow);
1062         
1063         memset(&tmp_info, 0, sizeof(cmpr_info));
1064         while (segment <= end_seg) {
1065                 FT_SIGNAL_EXIT(_DONT_BLOCK);
1066                 TRACE(ft_t_noise,
1067                       "Searching readable segment between %d and %d",
1068                       segment, end_seg);
1069                 seg_sz = zft_fetch_segment(segment, buf, FT_RD_AHEAD);
1070                 if ((seg_sz > 0) &&
1071                     (get_cseg (&tmp_info, buf, seg_sz, volume) >= 0) &&
1072                     (tmp_info.foffs != 0 || segment == volume->start_seg)) {
1073                         if ((tmp_info.foffs>>10) > max_foffs) {
1074                                 TRACE_ABORT(-EIO, ft_t_noise, "\n"
1075                                             KERN_INFO "cseg.foff: %d\n"
1076                                             KERN_INFO "dest     : %d",
1077                                             (int)(tmp_info.foffs >> 10),
1078                                             max_foffs);
1079                         }
1080                         DUMP_CMPR_INFO(ft_t_noise, "", &tmp_info);
1081                         *c_info           = tmp_info;
1082                         pos->seg_pos      = segment;
1083                         pos->volume_pos   = c_info->foffs;
1084                         pos->seg_byte_pos = c_info->offset;
1085                         TRACE(ft_t_noise, "found segment at %d", segment);
1086                         TRACE_EXIT 0;
1087                 }
1088                 segment++;
1089         }
1090         TRACE_EXIT -EIO;
1091 }
1092
1093 static int slow_seek_forward(unsigned int dest,
1094                              cmpr_info *c_info,
1095                              zft_position *pos,
1096                              const zft_volinfo *volume,
1097                              __u8 *buf)
1098 {
1099         unsigned int distance;
1100         int result = 0;
1101         TRACE_FUN(ft_t_flow);
1102                 
1103         distance = dest - (pos->volume_pos >> 10);
1104         while ((distance > 0) &&
1105                (result = slow_seek_forward_until_error(distance,
1106                                                        c_info,
1107                                                        pos,
1108                                                        volume,
1109                                                        buf)) < 0) {
1110                 if (result == -EINTR) {
1111                         break;
1112                 }
1113                 TRACE(ft_t_noise, "seg_pos: %d", pos->seg_pos);
1114                 /* the failing segment is either pos->seg_pos or
1115                  * pos->seg_pos + 1. There is no need to further try
1116                  * that segment, because ftape_read_segment() already
1117                  * has tried very much to read it. So we start with
1118                  * following segment, which is pos->seg_pos + 1
1119                  */
1120                 if(search_valid_segment(pos->seg_pos+1, volume->end_seg, dest,
1121                                         pos, c_info,
1122                                         volume, buf) < 0) {
1123                         TRACE(ft_t_noise, "search_valid_segment() failed");
1124                         result = -EIO;
1125                         break;
1126                 }
1127                 distance = dest - (pos->volume_pos >> 10);
1128                 result = 0;
1129                 TRACE(ft_t_noise, "segment: %d", pos->seg_pos);
1130                 /* found valid segment, retry the seek */
1131         }
1132         TRACE_EXIT result;
1133 }
1134
1135 static int compute_seg_pos(const unsigned int dest,
1136                            zft_position *pos,
1137                            const zft_volinfo *volume)
1138 {
1139         int segment;
1140         int distance = dest - (pos->volume_pos >> 10);
1141         unsigned int raw_size;
1142         unsigned int virt_size;
1143         unsigned int factor;
1144         TRACE_FUN(ft_t_flow);
1145
1146         if (distance >= 0) {
1147                 raw_size  = volume->end_seg - pos->seg_pos + 1;
1148                 virt_size = ((unsigned int)(volume->size>>10) 
1149                              - (unsigned int)(pos->volume_pos>>10)
1150                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1151                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1152                 if (virt_size == 0 || raw_size == 0) {
1153                         TRACE_EXIT 0;
1154                 }
1155                 if (raw_size >= (1<<25)) {
1156                         factor = raw_size/(virt_size>>7);
1157                 } else {
1158                         factor = (raw_size<<7)/virt_size;
1159                 }
1160                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1161                 segment = (segment * factor)>>7;
1162         } else {
1163                 raw_size  = pos->seg_pos - volume->start_seg + 1;
1164                 virt_size = ((unsigned int)(pos->volume_pos>>10)
1165                              + FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS - 1);
1166                 virt_size /= FT_SECTORS_PER_SEGMENT - FT_ECC_SECTORS;
1167                 if (virt_size == 0 || raw_size == 0) {
1168                         TRACE_EXIT 0;
1169                 }
1170                 if (raw_size >= (1<<25)) {
1171                         factor = raw_size/(virt_size>>7);
1172                 } else {
1173                         factor = (raw_size<<7)/virt_size;
1174                 }
1175                 segment = distance/(FT_SECTORS_PER_SEGMENT-FT_ECC_SECTORS);
1176         }
1177         TRACE(ft_t_noise, "factor: %d/%d", factor, 1<<7);
1178         TRACE_EXIT segment;
1179 }
1180
1181 static struct zft_cmpr_ops cmpr_ops = {
1182         zftc_write,
1183         zftc_read,
1184         zftc_seek,
1185         zftc_lock,
1186         zftc_reset,
1187         zftc_cleanup
1188 };
1189
1190 int zft_compressor_init(void)
1191 {
1192         TRACE_FUN(ft_t_flow);
1193         
1194 #ifdef MODULE
1195         printk(KERN_INFO "zftape compressor v1.00a 970514 for " FTAPE_VERSION "\n");
1196         if (TRACE_LEVEL >= ft_t_info) {
1197                 printk(
1198 KERN_INFO "(c) 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de)\n"
1199 KERN_INFO "Compressor for zftape (lzrw3 algorithm)\n"
1200 KERN_INFO "Compiled for kernel version %s\n", UTS_RELEASE);
1201         }
1202 #else /* !MODULE */
1203         /* print a short no-nonsense boot message */
1204         printk("zftape compressor v1.00a 970514 for Linux " UTS_RELEASE "\n");
1205         printk("For use with " FTAPE_VERSION "\n");
1206 #endif /* MODULE */
1207         TRACE(ft_t_info, "zft_compressor_init @ 0x%p", zft_compressor_init);
1208         TRACE(ft_t_info, "installing compressor for zftape ...");
1209         TRACE_CATCH(zft_cmpr_register(&cmpr_ops),);
1210         TRACE_EXIT 0;
1211 }
1212
1213 #ifdef MODULE
1214
1215 MODULE_AUTHOR(
1216         "(c) 1996, 1997 Claus-Justus Heine (claus@momo.math.rwth-aachen.de");
1217 MODULE_DESCRIPTION(
1218 "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
1219 MODULE_LICENSE("GPL");
1220
1221 /* Called by modules package when installing the driver
1222  */
1223 int init_module(void)
1224 {
1225         int result;
1226
1227 #if 0 /* FIXME --RR */
1228         if (!mod_member_present(&__this_module, can_unload))
1229                 return -EBUSY;
1230         __this_module.can_unload = can_unload;
1231 #endif
1232         result = zft_compressor_init();
1233         keep_module_locked = 0;
1234         return result;
1235 }
1236
1237 /* Called by modules package when removing the driver 
1238  */
1239 void cleanup_module(void)
1240 {
1241         TRACE_FUN(ft_t_flow);
1242
1243         if (zft_cmpr_unregister() != &cmpr_ops) {
1244                 TRACE(ft_t_info, "failed");
1245         } else {
1246                 TRACE(ft_t_info, "successful");
1247         }
1248         zftc_cleanup();
1249         printk(KERN_INFO "zft-compressor successfully unloaded.\n");
1250         TRACE_EXIT;
1251 }
1252 #endif /* MODULE */