6 * Idea here is very simple.
8 * We have total of (sz-N+1) N-byte overlapping sequences in buf whose
9 * size is sz. If the same N-byte sequence appears in both source and
10 * destination, we say the byte that starts that sequence is shared
11 * between them (i.e. copied from source to destination).
13 * For each possible N-byte sequence, if the source buffer has more
14 * instances of it than the destination buffer, that means the
15 * difference are the number of bytes not copied from source to
16 * destination. If the counts are the same, everything was copied
17 * from source to destination. If the destination has more,
18 * everything was copied, and destination added more.
20 * We are doing an approximation so we do not really have to waste
21 * memory by actually storing the sequence. We just hash them into
22 * somewhere around 2^16 hashbuckets and count the occurrences.
24 * The length of the sequence is arbitrarily set to 8 for now.
27 /* Wild guess at the initial hash size */
28 #define INITIAL_HASH_SIZE 9
29 #define HASHBASE 65537 /* next_prime(2^16) */
30 /* We leave more room in smaller hash but do not let it
31 * grow to have unused hole too much.
33 #define INITIAL_FREE(sz_log2) ((1<<(sz_log2))*(sz_log2-3)/(sz_log2))
36 unsigned long hashval;
42 struct spanhash data[FLEX_ARRAY];
45 static struct spanhash *spanhash_find(struct spanhash_top *top,
46 unsigned long hashval)
48 int sz = 1 << top->alloc_log2;
49 int bucket = hashval & (sz - 1);
51 struct spanhash *h = &(top->data[bucket++]);
54 if (h->hashval == hashval)
61 static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
63 struct spanhash_top *new;
65 int osz = 1 << orig->alloc_log2;
68 new = xmalloc(sizeof(*orig) + sizeof(struct spanhash) * sz);
69 new->alloc_log2 = orig->alloc_log2 + 1;
70 new->free = INITIAL_FREE(new->alloc_log2);
71 memset(new->data, 0, sizeof(struct spanhash) * sz);
72 for (i = 0; i < osz; i++) {
73 struct spanhash *o = &(orig->data[i]);
77 bucket = o->hashval & (sz - 1);
79 struct spanhash *h = &(new->data[bucket++]);
81 h->hashval = o->hashval;
94 static struct spanhash_top *add_spanhash(struct spanhash_top *top,
95 unsigned long hashval)
100 lim = (1 << top->alloc_log2);
101 bucket = hashval & (lim - 1);
103 h = &(top->data[bucket++]);
105 h->hashval = hashval;
109 return spanhash_rehash(top);
112 if (h->hashval == hashval) {
121 static struct spanhash_top *hash_chars(unsigned char *buf, unsigned long sz)
124 unsigned long accum1, accum2, hashval;
125 struct spanhash_top *hash;
127 i = INITIAL_HASH_SIZE;
128 hash = xmalloc(sizeof(*hash) + sizeof(struct spanhash) * (1<<i));
129 hash->alloc_log2 = i;
130 hash->free = INITIAL_FREE(i);
131 memset(hash->data, 0, sizeof(struct spanhash) * (1<<i));
133 /* an 8-byte shift register made of accum1 and accum2. New
134 * bytes come at LSB of accum2, and shifted up to accum1
136 for (i = accum1 = accum2 = 0; i < 7; i++, sz--) {
137 accum1 = (accum1 << 8) | (accum2 >> 24);
138 accum2 = (accum2 << 8) | *buf++;
141 accum1 = (accum1 << 8) | (accum2 >> 24);
142 accum2 = (accum2 << 8) | *buf++;
143 hashval = (accum1 + accum2 * 0x61) % HASHBASE;
144 hash = add_spanhash(hash, hashval);
150 int diffcore_count_changes(void *src, unsigned long src_size,
151 void *dst, unsigned long dst_size,
154 unsigned long delta_limit,
155 unsigned long *src_copied,
156 unsigned long *literal_added)
159 struct spanhash_top *src_count, *dst_count;
160 unsigned long sc, la;
162 if (src_size < 8 || dst_size < 8)
165 src_count = dst_count = NULL;
167 src_count = *src_count_p;
169 src_count = hash_chars(src, src_size);
171 *src_count_p = src_count;
174 dst_count = *dst_count_p;
176 dst_count = hash_chars(dst, dst_size);
178 *dst_count_p = dst_count;
182 ssz = 1 << src_count->alloc_log2;
183 for (i = 0; i < ssz; i++) {
184 struct spanhash *s = &(src_count->data[i]);
186 unsigned dst_cnt, src_cnt;
190 d = spanhash_find(dst_count, s->hashval);
191 dst_cnt = d ? d->cnt : 0;
192 if (src_cnt < dst_cnt) {
193 la += dst_cnt - src_cnt;