This patch makes the processing of large really large files more efficient by making sure that the sender's hash table is large enough to hold all the checksum entries without being overloaded. Unfortunately, the code adds a modulus calculation for (up to) every byte of the source file, which slows down the code for normal file sizes (e.g. 4 CPU seconds slower on a Pentium III when copying a 65 MB file without very much matching data). This was updated for the latest codebase from a patch written by Shachar Shemesh. To use this patch, run these commands for a successful build: patch -p1 >16) +#define SUM2HASH(sum) ((sum)%tablesize) static void build_hash_table(struct sum_struct *s) { int32 i; + uint32 prior_size = tablesize; - if (!hash_table) { - hash_table = new_array(int32, TABLESIZE); + /* Dynamically calculate the hash table size so that the hash load + * for big files is about 80%. This number must be odd or s2 will + * not be able to span the entire set. */ + tablesize = (uint32)(s->count/8) * 10 + 11; + if (tablesize < 65537) + tablesize = 65537; /* a prime number */ + if (tablesize != prior_size) { + if (hash_table) + free(hash_table); + hash_table = new_array(int32, tablesize); if (!hash_table) out_of_memory("build_hash_table"); } - memset(hash_table, 0xFF, TABLESIZE * sizeof hash_table[0]); + memset(hash_table, 0xFF, tablesize * sizeof hash_table[0]); for (i = 0; i < s->count; i++) { uint32 t = SUM2HASH(s->sums[i].sum1); @@ -165,11 +172,11 @@ static void hash_search(int f,struct sum (double)offset, s2 & 0xFFFF, s1 & 0xFFFF); } - i = hash_table[SUM2HASH2(s1,s2)]; + sum = (s1 & 0xffff) | (s2 << 16); + i = hash_table[SUM2HASH(sum)]; if (i < 0) goto null_hash; - sum = (s1 & 0xffff) | (s2 << 16); hash_hits++; do { int32 l;