X-Git-Url: https://mattmccutchen.net/rsync/rsync.git/blobdiff_plain/a3221d2ac14255c31109a617c4d62b949cd910de..a255c592e8fa9e4570ebbf333e95a402f9e84a03:/match.c diff --git a/match.c b/match.c index f3858e5b..b9336b86 100644 --- a/match.c +++ b/match.c @@ -24,6 +24,7 @@ extern int am_server; extern int do_progress; extern int checksum_seed; extern int inplace; +extern int make_backups; typedef unsigned short tag; @@ -141,11 +142,12 @@ static void matched(int f,struct sum_struct *s,struct map_struct *buf, static void hash_search(int f,struct sum_struct *s, struct map_struct *buf, OFF_T len) { - OFF_T offset, end; + OFF_T offset, end, backup; unsigned int k; size_t want_i; char sum2[SUM_LENGTH]; uint32 s1, s2, sum; + int more; schar *map; /* want_i is used to encourage adjacent matches, allowing the RLL @@ -203,7 +205,7 @@ static void hash_search(int f,struct sum_struct *s, /* inplace: ensure chunk's offset is either >= our * offset or that the data didn't move. */ - if (inplace && s->sums[i].offset < offset + if (inplace && !make_backups && s->sums[i].offset < offset && !(s->sums[i].flags & SUMFLG_SAME_OFFSET)) continue; @@ -225,7 +227,7 @@ static void hash_search(int f,struct sum_struct *s, /* If inplace is enabled, the best possible match is * one with an identical offset, so we prefer that over * the following want_i optimization. */ - if (inplace) { + if (inplace && !make_backups) { do { size_t i2 = targets[j].i; if (s->sums[i2].offset != offset) @@ -248,7 +250,7 @@ static void hash_search(int f,struct sum_struct *s, /* we've found a match, but now check to see * if want_i can hint at a better match. */ if (i != want_i && want_i < s->count - && (!inplace || s->sums[want_i].offset >= offset + && (!inplace || make_backups || s->sums[want_i].offset >= offset || s->sums[want_i].flags & SUMFLG_SAME_OFFSET) && sum == s->sums[want_i].sum1 && memcmp(sum2, s->sums[want_i].sum2, s->s2length) == 0) { @@ -271,14 +273,21 @@ static void hash_search(int f,struct sum_struct *s, } while (++j < s->count && targets[j].t == t); null_tag: + backup = offset - last_match; + /* We sometimes read 1 byte prior to last_match... */ + if (backup < 0) + backup = 0; + /* Trim off the first byte from the checksum */ - map = (schar *)map_ptr(buf, offset, k+1); + more = offset + k < len; + map = (schar *)map_ptr(buf, offset - backup, k + more + backup) + + backup; s1 -= map[0] + CHAR_OFFSET; s2 -= k * (map[0]+CHAR_OFFSET); /* Add on the next byte (if there is one) to the checksum */ - if (k < (len-offset)) { - s1 += (map[k]+CHAR_OFFSET); + if (more) { + s1 += map[k] + CHAR_OFFSET; s2 += s1; } else --k; @@ -289,9 +298,8 @@ static void hash_search(int f,struct sum_struct *s, match. The 3 reads are caused by the running match, the checksum update and the literal send. */ - if (offset > last_match - && offset-last_match >= CHUNK_SIZE+s->blength - && end-offset > CHUNK_SIZE) { + if (backup >= CHUNK_SIZE + s->blength + && end - offset > CHUNK_SIZE) { matched(f,s,buf,offset - s->blength, -2); } } while (++offset < end);