One more "#undef BAD" for AIX.
[rsync/rsync.git] / zlib / deflate.c
CommitLineData
d4286ec4 1/* deflate.c -- compress data using the deflation algorithm
be59d0ec 2 * Copyright (C) 1995-2002 Jean-loup Gailly.
d4286ec4
PM
3 * For conditions of distribution and use, see copyright notice in zlib.h
4 */
5
6/*
7 * ALGORITHM
8 *
9 * The "deflation" process depends on being able to identify portions
10 * of the input text which are identical to earlier input (within a
11 * sliding window trailing behind the input currently being processed).
12 *
13 * The most straightforward technique turns out to be the fastest for
14 * most input files: try all possible matches and select the longest.
15 * The key feature of this algorithm is that insertions into the string
16 * dictionary are very simple and thus fast, and deletions are avoided
17 * completely. Insertions are performed at each input character, whereas
18 * string matches are performed only when the previous match ends. So it
19 * is preferable to spend more time in matches to allow very fast string
20 * insertions and avoid deletions. The matching algorithm for small
21 * strings is inspired from that of Rabin & Karp. A brute force approach
22 * is used to find longer strings when a small match has been found.
23 * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
24 * (by Leonid Broukhis).
25 * A previous version of this file used a more sophisticated algorithm
26 * (by Fiala and Greene) which is guaranteed to run in linear amortized
27 * time, but has a larger average cost, uses more memory and is patented.
28 * However the F&G algorithm may be faster for some highly redundant
29 * files if the parameter max_chain_length (described below) is too large.
30 *
31 * ACKNOWLEDGEMENTS
32 *
33 * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
34 * I found it in 'freeze' written by Leonid Broukhis.
35 * Thanks to many people for bug reports and testing.
36 *
37 * REFERENCES
38 *
39 * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
40 * Available in ftp://ds.internic.net/rfc/rfc1951.txt
41 *
42 * A description of the Rabin and Karp algorithm is given in the book
43 * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
44 *
45 * Fiala,E.R., and Greene,D.H.
46 * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
47 *
48 */
49
50/* @(#) $Id$ */
51
52#include "deflate.h"
53
12febd80
WD
54#define read_buf dread_buf
55
d4286ec4 56const char deflate_copyright[] =
be59d0ec 57 " deflate 1.1.4 Copyright 1995-2002 Jean-loup Gailly ";
d4286ec4
PM
58/*
59 If you use the zlib library in a product, an acknowledgment is welcome
60 in the documentation of your product. If for some reason you cannot
61 include such an acknowledgment, I would appreciate that you keep this
62 copyright string in the executable of your product.
63 */
64
65/* ===========================================================================
66 * Function prototypes.
67 */
68typedef enum {
69 need_more, /* block not completed, need more input or more output */
70 block_done, /* block flush performed */
71 finish_started, /* finish started, need only more output at next deflate */
72 finish_done /* finish done, accept no more input or output */
73} block_state;
74
75typedef block_state (*compress_func) OF((deflate_state *s, int flush));
76/* Compression function. Returns the block state after the call. */
77
78local void fill_window OF((deflate_state *s));
79local block_state deflate_stored OF((deflate_state *s, int flush));
80local block_state deflate_fast OF((deflate_state *s, int flush));
81local block_state deflate_slow OF((deflate_state *s, int flush));
82local void lm_init OF((deflate_state *s));
83local void putShortMSB OF((deflate_state *s, uInt b));
84local void flush_pending OF((z_streamp strm));
12febd80 85local int read_buf OF((z_streamp strm, Bytef *buf, unsigned size));
d4286ec4
PM
86#ifdef ASMV
87 void match_init OF((void)); /* asm code initialization */
88 uInt longest_match OF((deflate_state *s, IPos cur_match));
89#else
90local uInt longest_match OF((deflate_state *s, IPos cur_match));
91#endif
92
93#ifdef DEBUG
94local void check_match OF((deflate_state *s, IPos start, IPos match,
95 int length));
96#endif
97
98/* ===========================================================================
99 * Local data
100 */
101
102#define NIL 0
103/* Tail of hash chains */
104
105#ifndef TOO_FAR
106# define TOO_FAR 4096
107#endif
108/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
109
110#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
111/* Minimum amount of lookahead, except at the end of the input file.
112 * See deflate.c for comments about the MIN_MATCH+1.
113 */
114
115/* Values for max_lazy_match, good_match and max_chain_length, depending on
116 * the desired pack level (0..9). The values given below have been tuned to
117 * exclude worst case performance for pathological files. Better values may be
118 * found for specific files.
119 */
120typedef struct config_s {
121 ush good_length; /* reduce lazy search above this match length */
122 ush max_lazy; /* do not perform lazy search above this match length */
123 ush nice_length; /* quit search above this match length */
124 ush max_chain;
125 compress_func func;
126} config;
127
128local const config configuration_table[10] = {
129/* good lazy nice chain */
130/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
131/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
132/* 2 */ {4, 5, 16, 8, deflate_fast},
133/* 3 */ {4, 6, 32, 32, deflate_fast},
134
135/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
136/* 5 */ {8, 16, 32, 32, deflate_slow},
137/* 6 */ {8, 16, 128, 128, deflate_slow},
138/* 7 */ {8, 32, 128, 256, deflate_slow},
139/* 8 */ {32, 128, 258, 1024, deflate_slow},
140/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
141
142/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
143 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
144 * meaning.
145 */
146
147#define EQUAL 0
148/* result of memcmp for equal strings */
149
150struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
151
152/* ===========================================================================
153 * Update a hash value with the given input byte
154 * IN assertion: all calls to to UPDATE_HASH are made with consecutive
155 * input characters, so that a running hash key can be computed from the
156 * previous key instead of complete recalculation each time.
157 */
158#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
159
160
161/* ===========================================================================
162 * Insert string str in the dictionary and set match_head to the previous head
163 * of the hash chain (the most recent string with same hash key). Return
164 * the previous length of the hash chain.
165 * If this file is compiled with -DFASTEST, the compression level is forced
166 * to 1, and no hash chains are maintained.
167 * IN assertion: all calls to to INSERT_STRING are made with consecutive
168 * input characters and the first MIN_MATCH bytes of str are valid
169 * (except for the last MIN_MATCH-1 bytes of the input file).
170 */
171#ifdef FASTEST
172#define INSERT_STRING(s, str, match_head) \
173 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
174 match_head = s->head[s->ins_h], \
175 s->head[s->ins_h] = (Pos)(str))
176#else
177#define INSERT_STRING(s, str, match_head) \
178 (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
179 s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
180 s->head[s->ins_h] = (Pos)(str))
181#endif
182
183/* ===========================================================================
184 * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
185 * prev[] will be initialized on the fly.
186 */
187#define CLEAR_HASH(s) \
188 s->head[s->hash_size-1] = NIL; \
189 zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
190
191/* ========================================================================= */
192int ZEXPORT deflateInit_(strm, level, version, stream_size)
193 z_streamp strm;
194 int level;
195 const char *version;
196 int stream_size;
197{
198 return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
199 Z_DEFAULT_STRATEGY, version, stream_size);
200 /* To do: ignore strm->next_in if we use it as window */
201}
202
203/* ========================================================================= */
204int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
205 version, stream_size)
206 z_streamp strm;
207 int level;
208 int method;
209 int windowBits;
210 int memLevel;
211 int strategy;
212 const char *version;
213 int stream_size;
214{
215 deflate_state *s;
216 int noheader = 0;
217 static const char* my_version = ZLIB_VERSION;
218
219 ushf *overlay;
220 /* We overlay pending_buf and d_buf+l_buf. This works since the average
221 * output size for (length,distance) codes is <= 24 bits.
222 */
223
224 if (version == Z_NULL || version[0] != my_version[0] ||
225 stream_size != sizeof(z_stream)) {
226 return Z_VERSION_ERROR;
227 }
228 if (strm == Z_NULL) return Z_STREAM_ERROR;
229
230 strm->msg = Z_NULL;
231 if (strm->zalloc == Z_NULL) {
232 strm->zalloc = zcalloc;
233 strm->opaque = (voidpf)0;
234 }
235 if (strm->zfree == Z_NULL) strm->zfree = zcfree;
236
237 if (level == Z_DEFAULT_COMPRESSION) level = 6;
238#ifdef FASTEST
239 level = 1;
240#endif
241
242 if (windowBits < 0) { /* undocumented feature: suppress zlib header */
243 noheader = 1;
244 windowBits = -windowBits;
245 }
246 if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
be59d0ec 247 windowBits < 9 || windowBits > 15 || level < 0 || level > 9 ||
d4286ec4
PM
248 strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
249 return Z_STREAM_ERROR;
250 }
251 s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
252 if (s == Z_NULL) return Z_MEM_ERROR;
253 strm->state = (struct internal_state FAR *)s;
254 s->strm = strm;
255
256 s->noheader = noheader;
257 s->w_bits = windowBits;
258 s->w_size = 1 << s->w_bits;
259 s->w_mask = s->w_size - 1;
260
261 s->hash_bits = memLevel + 7;
262 s->hash_size = 1 << s->hash_bits;
263 s->hash_mask = s->hash_size - 1;
264 s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
265
266 s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
267 s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
268 s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
269
270 s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
271
272 overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
273 s->pending_buf = (uchf *) overlay;
274 s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
275
276 if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
277 s->pending_buf == Z_NULL) {
278 strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
279 deflateEnd (strm);
280 return Z_MEM_ERROR;
281 }
282 s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
283 s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
284
285 s->level = level;
286 s->strategy = strategy;
287 s->method = (Byte)method;
288
289 return deflateReset(strm);
290}
291
292/* ========================================================================= */
293int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength)
294 z_streamp strm;
295 const Bytef *dictionary;
296 uInt dictLength;
297{
298 deflate_state *s;
299 uInt length = dictLength;
300 uInt n;
301 IPos hash_head = 0;
302
303 if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL ||
304 strm->state->status != INIT_STATE) return Z_STREAM_ERROR;
305
306 s = strm->state;
307 strm->adler = adler32(strm->adler, dictionary, dictLength);
308
309 if (length < MIN_MATCH) return Z_OK;
310 if (length > MAX_DIST(s)) {
311 length = MAX_DIST(s);
312#ifndef USE_DICT_HEAD
313 dictionary += dictLength - length; /* use the tail of the dictionary */
314#endif
315 }
316 zmemcpy(s->window, dictionary, length);
317 s->strstart = length;
318 s->block_start = (long)length;
319
320 /* Insert all strings in the hash table (except for the last two bytes).
321 * s->lookahead stays null, so s->ins_h will be recomputed at the next
322 * call of fill_window.
323 */
324 s->ins_h = s->window[0];
325 UPDATE_HASH(s, s->ins_h, s->window[1]);
326 for (n = 0; n <= length - MIN_MATCH; n++) {
327 INSERT_STRING(s, n, hash_head);
328 }
329 if (hash_head) hash_head = 0; /* to make compiler happy */
330 return Z_OK;
331}
332
333/* ========================================================================= */
334int ZEXPORT deflateReset (strm)
335 z_streamp strm;
336{
337 deflate_state *s;
338
339 if (strm == Z_NULL || strm->state == Z_NULL ||
340 strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
341
342 strm->total_in = strm->total_out = 0;
343 strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
344 strm->data_type = Z_UNKNOWN;
345
346 s = (deflate_state *)strm->state;
347 s->pending = 0;
348 s->pending_out = s->pending_buf;
349
350 if (s->noheader < 0) {
351 s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
352 }
353 s->status = s->noheader ? BUSY_STATE : INIT_STATE;
354 strm->adler = 1;
355 s->last_flush = Z_NO_FLUSH;
356
357 _tr_init(s);
358 lm_init(s);
359
360 return Z_OK;
361}
362
363/* ========================================================================= */
364int ZEXPORT deflateParams(strm, level, strategy)
365 z_streamp strm;
366 int level;
367 int strategy;
368{
369 deflate_state *s;
370 compress_func func;
371 int err = Z_OK;
372
373 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
374 s = strm->state;
375
376 if (level == Z_DEFAULT_COMPRESSION) {
377 level = 6;
378 }
379 if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
380 return Z_STREAM_ERROR;
381 }
382 func = configuration_table[s->level].func;
383
384 if (func != configuration_table[level].func && strm->total_in != 0) {
385 /* Flush the last buffer: */
386 err = deflate(strm, Z_PARTIAL_FLUSH);
387 }
388 if (s->level != level) {
389 s->level = level;
390 s->max_lazy_match = configuration_table[level].max_lazy;
391 s->good_match = configuration_table[level].good_length;
392 s->nice_match = configuration_table[level].nice_length;
393 s->max_chain_length = configuration_table[level].max_chain;
394 }
395 s->strategy = strategy;
396 return err;
397}
398
399/* =========================================================================
400 * Put a short in the pending buffer. The 16-bit value is put in MSB order.
401 * IN assertion: the stream state is correct and there is enough room in
402 * pending_buf.
403 */
404local void putShortMSB (s, b)
405 deflate_state *s;
406 uInt b;
407{
408 put_byte(s, (Byte)(b >> 8));
409 put_byte(s, (Byte)(b & 0xff));
410}
411
412/* =========================================================================
413 * Flush as much pending output as possible. All deflate() output goes
414 * through this function so some applications may wish to modify it
415 * to avoid allocating a large strm->next_out buffer and copying into it.
12febd80 416 * (See also read_buf()).
d4286ec4
PM
417 */
418local void flush_pending(strm)
419 z_streamp strm;
420{
421 unsigned len = strm->state->pending;
422
423 if (len > strm->avail_out) len = strm->avail_out;
424 if (len == 0) return;
425
426 zmemcpy(strm->next_out, strm->state->pending_out, len);
427 strm->next_out += len;
428 strm->state->pending_out += len;
429 strm->total_out += len;
430 strm->avail_out -= len;
431 strm->state->pending -= len;
432 if (strm->state->pending == 0) {
433 strm->state->pending_out = strm->state->pending_buf;
434 }
435}
436
437/* ========================================================================= */
438int ZEXPORT deflate (strm, flush)
439 z_streamp strm;
440 int flush;
441{
442 int old_flush; /* value of flush param for previous deflate call */
443 deflate_state *s;
444
445 if (strm == Z_NULL || strm->state == Z_NULL ||
5914bf15 446 flush > Z_INSERT_ONLY || flush < 0) {
d4286ec4
PM
447 return Z_STREAM_ERROR;
448 }
449 s = strm->state;
450
451 if (strm->next_out == Z_NULL ||
452 (strm->next_in == Z_NULL && strm->avail_in != 0) ||
453 (s->status == FINISH_STATE && flush != Z_FINISH)) {
454 ERR_RETURN(strm, Z_STREAM_ERROR);
455 }
456 if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
457
458 s->strm = strm; /* just in case */
459 old_flush = s->last_flush;
460 s->last_flush = flush;
461
462 /* Write the zlib header */
463 if (s->status == INIT_STATE) {
464
465 uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
466 uInt level_flags = (s->level-1) >> 1;
467
468 if (level_flags > 3) level_flags = 3;
469 header |= (level_flags << 6);
470 if (s->strstart != 0) header |= PRESET_DICT;
471 header += 31 - (header % 31);
472
473 s->status = BUSY_STATE;
474 putShortMSB(s, header);
475
476 /* Save the adler32 of the preset dictionary: */
477 if (s->strstart != 0) {
478 putShortMSB(s, (uInt)(strm->adler >> 16));
479 putShortMSB(s, (uInt)(strm->adler & 0xffff));
480 }
481 strm->adler = 1L;
482 }
483
484 /* Flush as much pending output as possible */
485 if (s->pending != 0) {
486 flush_pending(strm);
487 if (strm->avail_out == 0) {
488 /* Since avail_out is 0, deflate will be called again with
489 * more output space, but possibly with both pending and
490 * avail_in equal to zero. There won't be anything to do,
491 * but this is not an error situation so make sure we
492 * return OK instead of BUF_ERROR at next call of deflate:
493 */
494 s->last_flush = -1;
495 return Z_OK;
496 }
497
498 /* Make sure there is something to do and avoid duplicate consecutive
499 * flushes. For repeated and useless calls with Z_FINISH, we keep
500 * returning Z_STREAM_END instead of Z_BUFF_ERROR.
501 */
502 } else if (strm->avail_in == 0 && flush <= old_flush &&
503 flush != Z_FINISH) {
504 ERR_RETURN(strm, Z_BUF_ERROR);
505 }
506
507 /* User must not provide more input after the first FINISH: */
508 if (s->status == FINISH_STATE && strm->avail_in != 0) {
509 ERR_RETURN(strm, Z_BUF_ERROR);
510 }
511
512 /* Start a new block or continue the current one.
513 */
514 if (strm->avail_in != 0 || s->lookahead != 0 ||
515 (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
516 block_state bstate;
517
518 bstate = (*(configuration_table[s->level].func))(s, flush);
519
520 if (bstate == finish_started || bstate == finish_done) {
521 s->status = FINISH_STATE;
522 }
523 if (bstate == need_more || bstate == finish_started) {
524 if (strm->avail_out == 0) {
525 s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
526 }
527 return Z_OK;
528 /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
529 * of deflate should use the same flush parameter to make sure
530 * that the flush is complete. So we don't have to output an
531 * empty block here, this will be done at next call. This also
532 * ensures that for a very small output buffer, we emit at most
533 * one empty block.
534 */
535 }
536 if (bstate == block_done) {
537 if (flush == Z_PARTIAL_FLUSH) {
538 _tr_align(s);
539 } else { /* FULL_FLUSH or SYNC_FLUSH */
540 _tr_stored_block(s, (char*)0, 0L, 0);
541 /* For a full flush, this empty block will be recognized
542 * as a special marker by inflate_sync().
543 */
544 if (flush == Z_FULL_FLUSH) {
545 CLEAR_HASH(s); /* forget history */
546 }
547 }
548 flush_pending(strm);
549 if (strm->avail_out == 0) {
550 s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
551 return Z_OK;
552 }
553 }
554 }
555 Assert(strm->avail_out > 0, "bug2");
556
557 if (flush != Z_FINISH) return Z_OK;
558 if (s->noheader) return Z_STREAM_END;
559
560 /* Write the zlib trailer (adler32) */
561 putShortMSB(s, (uInt)(strm->adler >> 16));
562 putShortMSB(s, (uInt)(strm->adler & 0xffff));
563 flush_pending(strm);
564 /* If avail_out is zero, the application will call deflate again
565 * to flush the rest.
566 */
567 s->noheader = -1; /* write the trailer only once! */
568 return s->pending != 0 ? Z_OK : Z_STREAM_END;
569}
570
571/* ========================================================================= */
572int ZEXPORT deflateEnd (strm)
573 z_streamp strm;
574{
575 int status;
576
577 if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
578
579 status = strm->state->status;
580 if (status != INIT_STATE && status != BUSY_STATE &&
581 status != FINISH_STATE) {
582 return Z_STREAM_ERROR;
583 }
584
585 /* Deallocate in reverse order of allocations: */
586 TRY_FREE(strm, strm->state->pending_buf);
587 TRY_FREE(strm, strm->state->head);
588 TRY_FREE(strm, strm->state->prev);
589 TRY_FREE(strm, strm->state->window);
590
591 ZFREE(strm, strm->state);
592 strm->state = Z_NULL;
593
594 return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
595}
596
597/* =========================================================================
598 * Copy the source state to the destination state.
599 * To simplify the source, this is not supported for 16-bit MSDOS (which
600 * doesn't have enough memory anyway to duplicate compression states).
601 */
602int ZEXPORT deflateCopy (dest, source)
603 z_streamp dest;
604 z_streamp source;
605{
606#ifdef MAXSEG_64K
607 return Z_STREAM_ERROR;
608#else
609 deflate_state *ds;
610 deflate_state *ss;
611 ushf *overlay;
612
d4286ec4 613
06b91d8e 614 if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) {
d4286ec4
PM
615 return Z_STREAM_ERROR;
616 }
06b91d8e
MP
617
618 ss = source->state;
619
d4286ec4
PM
620 *dest = *source;
621
622 ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
623 if (ds == Z_NULL) return Z_MEM_ERROR;
624 dest->state = (struct internal_state FAR *) ds;
625 *ds = *ss;
626 ds->strm = dest;
627
628 ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
629 ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
630 ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
631 overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
632 ds->pending_buf = (uchf *) overlay;
633
634 if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
635 ds->pending_buf == Z_NULL) {
636 deflateEnd (dest);
637 return Z_MEM_ERROR;
638 }
639 /* following zmemcpy do not work for 16-bit MSDOS */
640 zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
641 zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
642 zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
643 zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
644
645 ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
646 ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
647 ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
648
649 ds->l_desc.dyn_tree = ds->dyn_ltree;
650 ds->d_desc.dyn_tree = ds->dyn_dtree;
651 ds->bl_desc.dyn_tree = ds->bl_tree;
652
653 return Z_OK;
654#endif
655}
656
657/* ===========================================================================
658 * Read a new buffer from the current input stream, update the adler32
659 * and total number of bytes read. All deflate() input goes through
660 * this function so some applications may wish to modify it to avoid
661 * allocating a large strm->next_in buffer and copying from it.
662 * (See also flush_pending()).
663 */
12febd80 664local int read_buf(strm, buf, size)
d4286ec4
PM
665 z_streamp strm;
666 Bytef *buf;
667 unsigned size;
668{
669 unsigned len = strm->avail_in;
670
671 if (len > size) len = size;
672 if (len == 0) return 0;
673
674 strm->avail_in -= len;
675
676 if (!strm->state->noheader) {
677 strm->adler = adler32(strm->adler, strm->next_in, len);
678 }
679 zmemcpy(buf, strm->next_in, len);
680 strm->next_in += len;
681 strm->total_in += len;
682
683 return (int)len;
684}
685
686/* ===========================================================================
687 * Initialize the "longest match" routines for a new zlib stream
688 */
689local void lm_init (s)
690 deflate_state *s;
691{
692 s->window_size = (ulg)2L*s->w_size;
693
694 CLEAR_HASH(s);
695
696 /* Set the default configuration parameters:
697 */
698 s->max_lazy_match = configuration_table[s->level].max_lazy;
699 s->good_match = configuration_table[s->level].good_length;
700 s->nice_match = configuration_table[s->level].nice_length;
701 s->max_chain_length = configuration_table[s->level].max_chain;
702
703 s->strstart = 0;
704 s->block_start = 0L;
705 s->lookahead = 0;
706 s->match_length = s->prev_length = MIN_MATCH-1;
707 s->match_available = 0;
708 s->ins_h = 0;
709#ifdef ASMV
710 match_init(); /* initialize the asm code */
711#endif
712}
713
714/* ===========================================================================
715 * Set match_start to the longest match starting at the given string and
716 * return its length. Matches shorter or equal to prev_length are discarded,
717 * in which case the result is equal to prev_length and match_start is
718 * garbage.
719 * IN assertions: cur_match is the head of the hash chain for the current
720 * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
721 * OUT assertion: the match length is not greater than s->lookahead.
722 */
723#ifndef ASMV
724/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
725 * match.S. The code will be functionally equivalent.
726 */
727#ifndef FASTEST
728local uInt longest_match(s, cur_match)
729 deflate_state *s;
730 IPos cur_match; /* current match */
731{
732 unsigned chain_length = s->max_chain_length;/* max hash chain length */
733 register Bytef *scan = s->window + s->strstart; /* current string */
734 register Bytef *match; /* matched string */
735 register int len; /* length of current match */
736 int best_len = s->prev_length; /* best match length so far */
737 int nice_match = s->nice_match; /* stop if match long enough */
738 IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
739 s->strstart - (IPos)MAX_DIST(s) : NIL;
740 /* Stop when cur_match becomes <= limit. To simplify the code,
741 * we prevent matches with the string of window index 0.
742 */
743 Posf *prev = s->prev;
744 uInt wmask = s->w_mask;
745
746#ifdef UNALIGNED_OK
747 /* Compare two bytes at a time. Note: this is not always beneficial.
748 * Try with and without -DUNALIGNED_OK to check.
749 */
750 register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
751 register ush scan_start = *(ushf*)scan;
752 register ush scan_end = *(ushf*)(scan+best_len-1);
753#else
754 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
755 register Byte scan_end1 = scan[best_len-1];
756 register Byte scan_end = scan[best_len];
757#endif
758
759 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
760 * It is easy to get rid of this optimization if necessary.
761 */
762 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
763
764 /* Do not waste too much time if we already have a good match: */
765 if (s->prev_length >= s->good_match) {
766 chain_length >>= 2;
767 }
768 /* Do not look for matches beyond the end of the input. This is necessary
769 * to make deflate deterministic.
770 */
771 if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
772
773 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
774
775 do {
776 Assert(cur_match < s->strstart, "no future");
777 match = s->window + cur_match;
778
779 /* Skip to next match if the match length cannot increase
780 * or if the match length is less than 2:
781 */
782#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
783 /* This code assumes sizeof(unsigned short) == 2. Do not use
784 * UNALIGNED_OK if your compiler uses a different size.
785 */
786 if (*(ushf*)(match+best_len-1) != scan_end ||
787 *(ushf*)match != scan_start) continue;
788
789 /* It is not necessary to compare scan[2] and match[2] since they are
790 * always equal when the other bytes match, given that the hash keys
791 * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
792 * strstart+3, +5, ... up to strstart+257. We check for insufficient
793 * lookahead only every 4th comparison; the 128th check will be made
794 * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
795 * necessary to put more guard bytes at the end of the window, or
796 * to check more often for insufficient lookahead.
797 */
798 Assert(scan[2] == match[2], "scan[2]?");
799 scan++, match++;
800 do {
801 } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
802 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
803 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
804 *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
805 scan < strend);
806 /* The funny "do {}" generates better code on most compilers */
807
808 /* Here, scan <= window+strstart+257 */
809 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
810 if (*scan == *match) scan++;
811
812 len = (MAX_MATCH - 1) - (int)(strend-scan);
813 scan = strend - (MAX_MATCH-1);
814
815#else /* UNALIGNED_OK */
816
817 if (match[best_len] != scan_end ||
818 match[best_len-1] != scan_end1 ||
819 *match != *scan ||
820 *++match != scan[1]) continue;
821
822 /* The check at best_len-1 can be removed because it will be made
823 * again later. (This heuristic is not always a win.)
824 * It is not necessary to compare scan[2] and match[2] since they
825 * are always equal when the other bytes match, given that
826 * the hash keys are equal and that HASH_BITS >= 8.
827 */
828 scan += 2, match++;
829 Assert(*scan == *match, "match[2]?");
830
831 /* We check for insufficient lookahead only every 8th comparison;
832 * the 256th check will be made at strstart+258.
833 */
834 do {
835 } while (*++scan == *++match && *++scan == *++match &&
836 *++scan == *++match && *++scan == *++match &&
837 *++scan == *++match && *++scan == *++match &&
838 *++scan == *++match && *++scan == *++match &&
839 scan < strend);
840
841 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
842
843 len = MAX_MATCH - (int)(strend - scan);
844 scan = strend - MAX_MATCH;
845
846#endif /* UNALIGNED_OK */
847
848 if (len > best_len) {
849 s->match_start = cur_match;
850 best_len = len;
851 if (len >= nice_match) break;
852#ifdef UNALIGNED_OK
853 scan_end = *(ushf*)(scan+best_len-1);
854#else
855 scan_end1 = scan[best_len-1];
856 scan_end = scan[best_len];
857#endif
858 }
859 } while ((cur_match = prev[cur_match & wmask]) > limit
860 && --chain_length != 0);
861
862 if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
863 return s->lookahead;
864}
865
866#else /* FASTEST */
867/* ---------------------------------------------------------------------------
868 * Optimized version for level == 1 only
869 */
870local uInt longest_match(s, cur_match)
871 deflate_state *s;
872 IPos cur_match; /* current match */
873{
874 register Bytef *scan = s->window + s->strstart; /* current string */
875 register Bytef *match; /* matched string */
876 register int len; /* length of current match */
877 register Bytef *strend = s->window + s->strstart + MAX_MATCH;
878
879 /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
880 * It is easy to get rid of this optimization if necessary.
881 */
882 Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
883
884 Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
885
886 Assert(cur_match < s->strstart, "no future");
887
888 match = s->window + cur_match;
889
890 /* Return failure if the match length is less than 2:
891 */
892 if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1;
893
894 /* The check at best_len-1 can be removed because it will be made
895 * again later. (This heuristic is not always a win.)
896 * It is not necessary to compare scan[2] and match[2] since they
897 * are always equal when the other bytes match, given that
898 * the hash keys are equal and that HASH_BITS >= 8.
899 */
900 scan += 2, match += 2;
901 Assert(*scan == *match, "match[2]?");
902
903 /* We check for insufficient lookahead only every 8th comparison;
904 * the 256th check will be made at strstart+258.
905 */
906 do {
907 } while (*++scan == *++match && *++scan == *++match &&
908 *++scan == *++match && *++scan == *++match &&
909 *++scan == *++match && *++scan == *++match &&
910 *++scan == *++match && *++scan == *++match &&
911 scan < strend);
912
913 Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
914
915 len = MAX_MATCH - (int)(strend - scan);
916
917 if (len < MIN_MATCH) return MIN_MATCH - 1;
918
919 s->match_start = cur_match;
920 return len <= s->lookahead ? len : s->lookahead;
921}
922#endif /* FASTEST */
923#endif /* ASMV */
924
925#ifdef DEBUG
926/* ===========================================================================
927 * Check that the match at match_start is indeed a match.
928 */
929local void check_match(s, start, match, length)
930 deflate_state *s;
931 IPos start, match;
932 int length;
933{
934 /* check that the match is indeed a match */
935 if (zmemcmp(s->window + match,
936 s->window + start, length) != EQUAL) {
937 fprintf(stderr, " start %u, match %u, length %d\n",
938 start, match, length);
939 do {
940 fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
941 } while (--length != 0);
942 z_error("invalid match");
943 }
944 if (z_verbose > 1) {
945 fprintf(stderr,"\\[%d,%d]", start-match, length);
946 do { putc(s->window[start++], stderr); } while (--length != 0);
947 }
948}
949#else
950# define check_match(s, start, match, length)
951#endif
952
953/* ===========================================================================
954 * Fill the window when the lookahead becomes insufficient.
955 * Updates strstart and lookahead.
956 *
957 * IN assertion: lookahead < MIN_LOOKAHEAD
958 * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
959 * At least one byte has been read, or avail_in == 0; reads are
960 * performed for at least two bytes (required for the zip translate_eol
961 * option -- not supported here).
962 */
963local void fill_window(s)
964 deflate_state *s;
965{
966 register unsigned n, m;
967 register Posf *p;
968 unsigned more; /* Amount of free space at the end of the window. */
969 uInt wsize = s->w_size;
970
971 do {
972 more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
973
974 /* Deal with !@#$% 64K limit: */
975 if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
976 more = wsize;
977
978 } else if (more == (unsigned)(-1)) {
979 /* Very unlikely, but possible on 16 bit machine if strstart == 0
980 * and lookahead == 1 (input done one byte at time)
981 */
982 more--;
983
984 /* If the window is almost full and there is insufficient lookahead,
985 * move the upper half to the lower one to make room in the upper half.
986 */
987 } else if (s->strstart >= wsize+MAX_DIST(s)) {
988
989 zmemcpy(s->window, s->window+wsize, (unsigned)wsize);
990 s->match_start -= wsize;
991 s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
992 s->block_start -= (long) wsize;
993
994 /* Slide the hash table (could be avoided with 32 bit values
995 at the expense of memory usage). We slide even when level == 0
996 to keep the hash table consistent if we switch back to level > 0
997 later. (Using level 0 permanently is not an optimal usage of
998 zlib, so we don't care about this pathological case.)
999 */
1000 n = s->hash_size;
1001 p = &s->head[n];
1002 do {
1003 m = *--p;
1004 *p = (Pos)(m >= wsize ? m-wsize : NIL);
1005 } while (--n);
1006
1007 n = wsize;
1008#ifndef FASTEST
1009 p = &s->prev[n];
1010 do {
1011 m = *--p;
1012 *p = (Pos)(m >= wsize ? m-wsize : NIL);
1013 /* If n is not on any hash chain, prev[n] is garbage but
1014 * its value will never be used.
1015 */
1016 } while (--n);
1017#endif
1018 more += wsize;
1019 }
1020 if (s->strm->avail_in == 0) return;
1021
1022 /* If there was no sliding:
1023 * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
1024 * more == window_size - lookahead - strstart
1025 * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
1026 * => more >= window_size - 2*WSIZE + 2
1027 * In the BIG_MEM or MMAP case (not yet supported),
1028 * window_size == input_size + MIN_LOOKAHEAD &&
1029 * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
1030 * Otherwise, window_size == 2*WSIZE so more >= 2.
1031 * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
1032 */
1033 Assert(more >= 2, "more < 2");
1034
12febd80 1035 n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more);
d4286ec4
PM
1036 s->lookahead += n;
1037
1038 /* Initialize the hash value now that we have some input: */
1039 if (s->lookahead >= MIN_MATCH) {
1040 s->ins_h = s->window[s->strstart];
1041 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1042#if MIN_MATCH != 3
1043 Call UPDATE_HASH() MIN_MATCH-3 more times
1044#endif
1045 }
1046 /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
1047 * but this is not important since only literal bytes will be emitted.
1048 */
1049
1050 } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
1051}
1052
1053/* ===========================================================================
1054 * Flush the current block, with given end-of-file flag.
1055 * IN assertion: strstart is set to the end of the current match.
1056 */
1057#define FLUSH_BLOCK_ONLY(s, eof) { \
1058 _tr_flush_block(s, (s->block_start >= 0L ? \
1059 (charf *)&s->window[(unsigned)s->block_start] : \
1060 (charf *)Z_NULL), \
1061 (ulg)((long)s->strstart - s->block_start), \
1062 (eof)); \
1063 s->block_start = s->strstart; \
1064 flush_pending(s->strm); \
1065 Tracev((stderr,"[FLUSH]")); \
1066}
1067
1068/* Same but force premature exit if necessary. */
1069#define FLUSH_BLOCK(s, eof) { \
1070 FLUSH_BLOCK_ONLY(s, eof); \
1071 if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
1072}
1073
1074/* ===========================================================================
1075 * Copy without compression as much as possible from the input stream, return
1076 * the current block state.
1077 * This function does not insert new strings in the dictionary since
1078 * uncompressible data is probably not useful. This function is used
1079 * only for the level=0 compression option.
1080 * NOTE: this function should be optimized to avoid extra copying from
1081 * window to pending_buf.
1082 */
1083local block_state deflate_stored(s, flush)
1084 deflate_state *s;
1085 int flush;
1086{
1087 /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
1088 * to pending_buf_size, and each stored block has a 5 byte header:
1089 */
1090 ulg max_block_size = 0xffff;
1091 ulg max_start;
1092
1093 if (max_block_size > s->pending_buf_size - 5) {
1094 max_block_size = s->pending_buf_size - 5;
1095 }
1096
1097 /* Copy as much as possible from input to output: */
1098 for (;;) {
1099 /* Fill the window as much as possible: */
1100 if (s->lookahead <= 1) {
1101
1102 Assert(s->strstart < s->w_size+MAX_DIST(s) ||
1103 s->block_start >= (long)s->w_size, "slide too late");
1104
1105 fill_window(s);
1106 if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
1107
1108 if (s->lookahead == 0) break; /* flush the current block */
1109 }
1110 Assert(s->block_start >= 0L, "block gone");
1111
1112 s->strstart += s->lookahead;
1113 s->lookahead = 0;
1114
0301b334
WD
1115 if (flush == Z_INSERT_ONLY) {
1116 s->block_start = s->strstart;
1117 continue;
1118 }
1119
d4286ec4
PM
1120 /* Emit a stored block if pending_buf will be full: */
1121 max_start = s->block_start + max_block_size;
1122 if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
1123 /* strstart == 0 is possible when wraparound on 16-bit machine */
1124 s->lookahead = (uInt)(s->strstart - max_start);
1125 s->strstart = (uInt)max_start;
1126 FLUSH_BLOCK(s, 0);
1127 }
1128 /* Flush if we may have to slide, otherwise block_start may become
1129 * negative and the data will be gone:
1130 */
1131 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
1132 FLUSH_BLOCK(s, 0);
1133 }
1134 }
0301b334
WD
1135 if (flush == Z_INSERT_ONLY) {
1136 s->block_start = s->strstart;
1137 return need_more;
1138 }
1139
d4286ec4
PM
1140 FLUSH_BLOCK(s, flush == Z_FINISH);
1141 return flush == Z_FINISH ? finish_done : block_done;
1142}
1143
1144/* ===========================================================================
1145 * Compress as much as possible from the input stream, return the current
1146 * block state.
1147 * This function does not perform lazy evaluation of matches and inserts
1148 * new strings in the dictionary only for unmatched strings or for short
1149 * matches. It is used only for the fast compression options.
1150 */
1151local block_state deflate_fast(s, flush)
1152 deflate_state *s;
1153 int flush;
1154{
1155 IPos hash_head = NIL; /* head of the hash chain */
1156 int bflush; /* set if current block must be flushed */
1157
1158 for (;;) {
1159 /* Make sure that we always have enough lookahead, except
1160 * at the end of the input file. We need MAX_MATCH bytes
1161 * for the next match, plus MIN_MATCH bytes to insert the
1162 * string following the next match.
1163 */
1164 if (s->lookahead < MIN_LOOKAHEAD) {
1165 fill_window(s);
1166 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1167 return need_more;
1168 }
1169 if (s->lookahead == 0) break; /* flush the current block */
1170 }
1171
1172 /* Insert the string window[strstart .. strstart+2] in the
1173 * dictionary, and set hash_head to the head of the hash chain:
1174 */
1175 if (s->lookahead >= MIN_MATCH) {
1176 INSERT_STRING(s, s->strstart, hash_head);
1177 }
1178
5914bf15
PM
1179 if (flush == Z_INSERT_ONLY) {
1180 s->strstart++;
1181 s->lookahead--;
1182 continue;
1183 }
1184
d4286ec4
PM
1185 /* Find the longest match, discarding those <= prev_length.
1186 * At this point we have always match_length < MIN_MATCH
1187 */
1188 if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
1189 /* To simplify the code, we prevent matches with the string
1190 * of window index 0 (in particular we have to avoid a match
1191 * of the string with itself at the start of the input file).
1192 */
1193 if (s->strategy != Z_HUFFMAN_ONLY) {
1194 s->match_length = longest_match (s, hash_head);
1195 }
1196 /* longest_match() sets match_start */
1197 }
1198 if (s->match_length >= MIN_MATCH) {
1199 check_match(s, s->strstart, s->match_start, s->match_length);
1200
1201 _tr_tally_dist(s, s->strstart - s->match_start,
1202 s->match_length - MIN_MATCH, bflush);
1203
1204 s->lookahead -= s->match_length;
1205
1206 /* Insert new strings in the hash table only if the match length
1207 * is not too large. This saves time but degrades compression.
1208 */
1209#ifndef FASTEST
1210 if (s->match_length <= s->max_insert_length &&
1211 s->lookahead >= MIN_MATCH) {
1212 s->match_length--; /* string at strstart already in hash table */
1213 do {
1214 s->strstart++;
1215 INSERT_STRING(s, s->strstart, hash_head);
1216 /* strstart never exceeds WSIZE-MAX_MATCH, so there are
1217 * always MIN_MATCH bytes ahead.
1218 */
1219 } while (--s->match_length != 0);
1220 s->strstart++;
1221 } else
1222#endif
1223 {
1224 s->strstart += s->match_length;
1225 s->match_length = 0;
1226 s->ins_h = s->window[s->strstart];
1227 UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
1228#if MIN_MATCH != 3
1229 Call UPDATE_HASH() MIN_MATCH-3 more times
1230#endif
1231 /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
1232 * matter since it will be recomputed at next deflate call.
1233 */
1234 }
1235 } else {
1236 /* No match, output a literal byte */
1237 Tracevv((stderr,"%c", s->window[s->strstart]));
1238 _tr_tally_lit (s, s->window[s->strstart], bflush);
1239 s->lookahead--;
1240 s->strstart++;
1241 }
1242 if (bflush) FLUSH_BLOCK(s, 0);
1243 }
5914bf15
PM
1244 if (flush == Z_INSERT_ONLY) {
1245 s->block_start = s->strstart;
1246 return need_more;
1247 }
d4286ec4
PM
1248 FLUSH_BLOCK(s, flush == Z_FINISH);
1249 return flush == Z_FINISH ? finish_done : block_done;
1250}
1251
1252/* ===========================================================================
1253 * Same as above, but achieves better compression. We use a lazy
1254 * evaluation for matches: a match is finally adopted only if there is
1255 * no better match at the next window position.
1256 */
1257local block_state deflate_slow(s, flush)
1258 deflate_state *s;
1259 int flush;
1260{
1261 IPos hash_head = NIL; /* head of hash chain */
1262 int bflush; /* set if current block must be flushed */
1263
1264 /* Process the input block. */
1265 for (;;) {
1266 /* Make sure that we always have enough lookahead, except
1267 * at the end of the input file. We need MAX_MATCH bytes
1268 * for the next match, plus MIN_MATCH bytes to insert the
1269 * string following the next match.
1270 */
1271 if (s->lookahead < MIN_LOOKAHEAD) {
1272 fill_window(s);
1273 if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
1274 return need_more;
1275 }
1276 if (s->lookahead == 0) break; /* flush the current block */
1277 }
1278
1279 /* Insert the string window[strstart .. strstart+2] in the
1280 * dictionary, and set hash_head to the head of the hash chain:
1281 */
1282 if (s->lookahead >= MIN_MATCH) {
1283 INSERT_STRING(s, s->strstart, hash_head);
1284 }
1285
5914bf15
PM
1286 if (flush == Z_INSERT_ONLY) {
1287 s->strstart++;
1288 s->lookahead--;
1289 continue;
1290 }
1291
d4286ec4
PM
1292 /* Find the longest match, discarding those <= prev_length.
1293 */
1294 s->prev_length = s->match_length, s->prev_match = s->match_start;
1295 s->match_length = MIN_MATCH-1;
1296
1297 if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
1298 s->strstart - hash_head <= MAX_DIST(s)) {
1299 /* To simplify the code, we prevent matches with the string
1300 * of window index 0 (in particular we have to avoid a match
1301 * of the string with itself at the start of the input file).
1302 */
1303 if (s->strategy != Z_HUFFMAN_ONLY) {
1304 s->match_length = longest_match (s, hash_head);
1305 }
1306 /* longest_match() sets match_start */
1307
1308 if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
1309 (s->match_length == MIN_MATCH &&
1310 s->strstart - s->match_start > TOO_FAR))) {
1311
1312 /* If prev_match is also MIN_MATCH, match_start is garbage
1313 * but we will ignore the current match anyway.
1314 */
1315 s->match_length = MIN_MATCH-1;
1316 }
1317 }
1318 /* If there was a match at the previous step and the current
1319 * match is not better, output the previous match:
1320 */
1321 if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
1322 uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
1323 /* Do not insert strings in hash table beyond this. */
1324
1325 check_match(s, s->strstart-1, s->prev_match, s->prev_length);
1326
1327 _tr_tally_dist(s, s->strstart -1 - s->prev_match,
1328 s->prev_length - MIN_MATCH, bflush);
1329
1330 /* Insert in hash table all strings up to the end of the match.
1331 * strstart-1 and strstart are already inserted. If there is not
1332 * enough lookahead, the last two strings are not inserted in
1333 * the hash table.
1334 */
1335 s->lookahead -= s->prev_length-1;
1336 s->prev_length -= 2;
1337 do {
1338 if (++s->strstart <= max_insert) {
1339 INSERT_STRING(s, s->strstart, hash_head);
1340 }
1341 } while (--s->prev_length != 0);
1342 s->match_available = 0;
1343 s->match_length = MIN_MATCH-1;
1344 s->strstart++;
1345
1346 if (bflush) FLUSH_BLOCK(s, 0);
1347
1348 } else if (s->match_available) {
1349 /* If there was no match at the previous position, output a
1350 * single literal. If there was a match but the current match
1351 * is longer, truncate the previous match to a single literal.
1352 */
1353 Tracevv((stderr,"%c", s->window[s->strstart-1]));
1354 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1355 if (bflush) {
1356 FLUSH_BLOCK_ONLY(s, 0);
1357 }
1358 s->strstart++;
1359 s->lookahead--;
1360 if (s->strm->avail_out == 0) return need_more;
1361 } else {
1362 /* There is no previous match to compare with, wait for
1363 * the next step to decide.
1364 */
1365 s->match_available = 1;
1366 s->strstart++;
1367 s->lookahead--;
1368 }
1369 }
5914bf15
PM
1370 if (flush == Z_INSERT_ONLY) {
1371 s->block_start = s->strstart;
1372 return need_more;
1373 }
d4286ec4
PM
1374 Assert (flush != Z_NO_FLUSH, "no flush?");
1375 if (s->match_available) {
1376 Tracevv((stderr,"%c", s->window[s->strstart-1]));
1377 _tr_tally_lit(s, s->window[s->strstart-1], bflush);
1378 s->match_available = 0;
1379 }
1380 FLUSH_BLOCK(s, flush == Z_FINISH);
1381 return flush == Z_FINISH ? finish_done : block_done;
1382}