We now pass the POOL_QALIGN flag to pool_create(). Also optimized
[rsync/rsync.git] / lib / pool_alloc.c
CommitLineData
7efdcf32
S
1#include "rsync.h"
2
3#define POOL_DEF_EXTENT (32 * 1024)
4
5struct alloc_pool
6{
7 size_t size; /* extent size */
8 size_t quantum; /* allocation quantum */
676e6041 9 struct pool_extent *extents; /* top extent is "live" */
e3d27df4 10 void (*bomb)(); /* function to call if
7efdcf32
S
11 * malloc fails */
12 int flags;
13
14 /* statistical data */
15 unsigned long e_created; /* extents created */
16 unsigned long e_freed; /* extents detroyed */
707415d4
WD
17 int64 n_allocated; /* calls to alloc */
18 int64 n_freed; /* calls to free */
19 int64 b_allocated; /* cum. bytes allocated */
20 int64 b_freed; /* cum. bytes freed */
7efdcf32
S
21};
22
23struct pool_extent
24{
25 void *start; /* starting address */
26 size_t free; /* free bytecount */
27 size_t bound; /* bytes bound by padding,
28 * overhead and freed */
29 struct pool_extent *next;
30};
31
4d4df3cd 32struct align_test {
e3d27df4
WD
33 void *foo;
34 int64 bar;
4d4df3cd
WD
35};
36
37#define MINALIGN offsetof(struct align_test, bar)
7efdcf32 38
71b291d7
WD
39/* Temporarily cast a void* var into a char* var when adding an offset (to
40 * keep some compilers from complaining about the pointer arithmetic). */
41#define PTR_ADD(b,o) ( (void*) ((char*)(b) + (o)) )
42
7efdcf32 43alloc_pool_t
4a19c3b2 44pool_create(size_t size, size_t quantum, void (*bomb)(const char *), int flags)
7efdcf32
S
45{
46 struct alloc_pool *pool;
47
e3d27df4 48 if (!(pool = new(struct alloc_pool)))
7efdcf32
S
49 return pool;
50 memset(pool, 0, sizeof (struct alloc_pool));
51
52 pool->size = size /* round extent size to min alignment reqs */
53 ? (size + MINALIGN - 1) & ~(MINALIGN - 1)
54 : POOL_DEF_EXTENT;
a0f70237 55 if (flags & POOL_INTERN) {
7efdcf32
S
56 pool->size -= sizeof (struct pool_extent);
57 flags |= POOL_APPEND;
58 }
59 pool->quantum = quantum ? quantum : MINALIGN;
60 pool->bomb = bomb;
61 pool->flags = flags;
62
63 return pool;
64}
65
66void
67pool_destroy(alloc_pool_t p)
68{
69 struct alloc_pool *pool = (struct alloc_pool *) p;
70 struct pool_extent *cur, *next;
71
72 if (!pool)
73 return;
74
676e6041 75 for (cur = pool->extents; cur; cur = next) {
7efdcf32
S
76 next = cur->next;
77 free(cur->start);
78 if (!(pool->flags & POOL_APPEND))
79 free(cur);
80 }
81 free(pool);
82}
83
be20dc34 84void *
3fac8ca8 85pool_alloc(alloc_pool_t p, size_t len, const char *bomb_msg)
7efdcf32
S
86{
87 struct alloc_pool *pool = (struct alloc_pool *) p;
88 if (!pool)
15f85b1f 89 return NULL;
7efdcf32
S
90
91 if (!len)
92 len = pool->quantum;
93 else if (pool->quantum > 1 && len % pool->quantum)
94 len += pool->quantum - len % pool->quantum;
95
96 if (len > pool->size)
3fac8ca8 97 goto bomb_out;
7efdcf32 98
676e6041 99 if (!pool->extents || len > pool->extents->free) {
7efdcf32
S
100 void *start;
101 size_t free;
102 size_t bound;
e3d27df4 103 size_t skew;
7efdcf32 104 size_t asize;
e3d27df4 105 struct pool_extent *ext;
7efdcf32 106
7efdcf32
S
107 free = pool->size;
108 bound = 0;
109
110 asize = pool->size;
111 if (pool->flags & POOL_APPEND)
112 asize += sizeof (struct pool_extent);
113
e3d27df4 114 if (!(start = new_array(char, asize)))
3fac8ca8 115 goto bomb_out;
7efdcf32
S
116
117 if (pool->flags & POOL_CLEAR)
e3d27df4 118 memset(start, 0, free);
7efdcf32
S
119
120 if (pool->flags & POOL_APPEND)
e3d27df4
WD
121 ext = PTR_ADD(start, free);
122 else if (!(ext = new(struct pool_extent)))
3fac8ca8 123 goto bomb_out;
7efdcf32 124 if (pool->flags & POOL_QALIGN && pool->quantum > 1
e3d27df4
WD
125 && (skew = (size_t)PTR_ADD(start, free) % pool->quantum)) {
126 bound += skew;
127 free -= skew;
7efdcf32 128 }
e3d27df4
WD
129 ext->start = start;
130 ext->free = free;
131 ext->bound = bound;
676e6041
WD
132 ext->next = pool->extents;
133 pool->extents = ext;
7efdcf32
S
134
135 pool->e_created++;
136 }
137
138 pool->n_allocated++;
139 pool->b_allocated += len;
140
676e6041 141 pool->extents->free -= len;
7efdcf32 142
676e6041 143 return PTR_ADD(pool->extents->start, pool->extents->free);
7efdcf32 144
3fac8ca8 145 bomb_out:
7efdcf32 146 if (pool->bomb)
3fac8ca8 147 (*pool->bomb)(bomb_msg);
7efdcf32
S
148 return NULL;
149}
150
e3d27df4
WD
151/* This function allows you to declare memory in the pool that you are done
152 * using. If you free all the memory in a pool's extent, that extent will
153 * be freed. */
7efdcf32
S
154void
155pool_free(alloc_pool_t p, size_t len, void *addr)
156{
e3d27df4
WD
157 struct alloc_pool *pool = (struct alloc_pool *)p;
158 struct pool_extent *cur, *prev;
7efdcf32
S
159
160 if (!pool)
161 return;
162
163 if (!len)
164 len = pool->quantum;
165 else if (pool->quantum > 1 && len % pool->quantum)
166 len += pool->quantum - len % pool->quantum;
167
7efdcf32
S
168 pool->n_freed++;
169 pool->b_freed += len;
170
676e6041
WD
171 for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) {
172 if (addr >= cur->start
173 && addr < PTR_ADD(cur->start, pool->size))
174 break;
175 }
176 if (!cur)
177 return;
178
179 if (!prev) {
180 /* The "live" extent is kept ready for more allocations. */
181 if (cur->free + cur->bound + len >= pool->size) {
e3d27df4 182 size_t skew;
7efdcf32 183
e3d27df4
WD
184 if (pool->flags & POOL_CLEAR) {
185 memset(PTR_ADD(cur->start, cur->free), 0,
186 pool->size - cur->free);
187 }
7efdcf32
S
188 cur->free = pool->size;
189 cur->bound = 0;
190 if (pool->flags & POOL_QALIGN && pool->quantum > 1
e3d27df4
WD
191 && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) {
192 cur->bound += skew;
193 cur->free -= skew;
7efdcf32 194 }
676e6041
WD
195 } else if (addr == PTR_ADD(cur->start, cur->free)) {
196 if (pool->flags & POOL_CLEAR)
197 memset(addr, 0, len);
198 cur->free += len;
199 } else
200 cur->bound += len;
201 } else {
202 cur->bound += len;
203
204 if (cur->free + cur->bound >= pool->size) {
205 prev->next = cur->next;
206 free(cur->start);
207 if (!(pool->flags & POOL_APPEND))
208 free(cur);
209 pool->e_freed++;
210 } else if (prev != pool->extents) {
211 /* Move the extent to be the first non-live extent. */
212 prev->next = cur->next;
213 cur->next = pool->extents->next;
214 pool->extents->next = cur;
7efdcf32 215 }
7efdcf32 216 }
676e6041
WD
217}
218
219/* This allows you to declare that the given address marks the edge of some
220 * pool memory that is no longer needed. Any extents that hold only data
221 * older than the boundary address are freed. NOTE: You MUST NOT USE BOTH
222 * pool_free() and pool_free_old() on the same pool!! */
223void
224pool_free_old(alloc_pool_t p, void *addr)
225{
226 struct alloc_pool *pool = (struct alloc_pool *)p;
227 struct pool_extent *cur, *prev, *next;
228
8b498b9f 229 if (!pool || !addr)
676e6041
WD
230 return;
231
232 for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) {
7efdcf32 233 if (addr >= cur->start
04575bca 234 && addr < PTR_ADD(cur->start, pool->size))
7efdcf32
S
235 break;
236 }
237 if (!cur)
238 return;
239
676e6041
WD
240 if (addr == PTR_ADD(cur->start, cur->free)) {
241 if (prev) {
242 prev->next = NULL;
243 next = cur;
244 } else {
245 size_t skew;
7efdcf32 246
676e6041
WD
247 /* The most recent live extent can just be reset. */
248 if (pool->flags & POOL_CLEAR)
249 memset(addr, 0, pool->size - cur->free);
250 cur->free = pool->size;
251 cur->bound = 0;
252 if (pool->flags & POOL_QALIGN && pool->quantum > 1
253 && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) {
254 cur->bound += skew;
255 cur->free -= skew;
256 }
257 next = cur->next;
65a22a5f 258 cur->next = NULL;
676e6041
WD
259 }
260 } else {
261 next = cur->next;
262 cur->next = NULL;
263 }
7efdcf32 264
676e6041
WD
265 while ((cur = next) != NULL) {
266 next = cur->next;
7efdcf32
S
267 free(cur->start);
268 if (!(pool->flags & POOL_APPEND))
269 free(cur);
270 pool->e_freed++;
271 }
7efdcf32
S
272}
273
676e6041
WD
274/* If the current extent doesn't have "len" free space in it, mark it as full
275 * so that the next alloc will start a new extent. If len is (size_t)-1, this
276 * bump will always occur. The function returns a boundary address that can
277 * be used with pool_free_old(), or a NULL if no memory is allocated. */
278void *
279pool_boundary(alloc_pool_t p, size_t len)
280{
281 struct alloc_pool *pool = (struct alloc_pool *)p;
282 struct pool_extent *cur;
283
284 if (!pool || !pool->extents)
285 return NULL;
286
287 cur = pool->extents;
288
289 if (cur->free < len) {
290 cur->bound += cur->free;
291 cur->free = 0;
292 }
293
294 return PTR_ADD(cur->start, cur->free);
295}
296
7efdcf32 297#define FDPRINT(label, value) \
8ea17b50
WD
298 snprintf(buf, sizeof buf, label, value), \
299 write(fd, buf, strlen(buf))
7efdcf32
S
300
301#define FDEXTSTAT(ext) \
8ea17b50 302 snprintf(buf, sizeof buf, " %12ld %5ld\n", \
7efdcf32
S
303 (long) ext->free, \
304 (long) ext->bound), \
305 write(fd, buf, strlen(buf))
306
307void
308pool_stats(alloc_pool_t p, int fd, int summarize)
309{
310 struct alloc_pool *pool = (struct alloc_pool *) p;
311 struct pool_extent *cur;
312 char buf[BUFSIZ];
313
314 if (!pool)
315 return;
316
317 FDPRINT(" Extent size: %12ld\n", (long) pool->size);
318 FDPRINT(" Alloc quantum: %12ld\n", (long) pool->quantum);
319 FDPRINT(" Extents created: %12ld\n", pool->e_created);
320 FDPRINT(" Extents freed: %12ld\n", pool->e_freed);
321 FDPRINT(" Alloc count: %12.0f\n", (double) pool->n_allocated);
322 FDPRINT(" Free Count: %12.0f\n", (double) pool->n_freed);
e3d27df4
WD
323 FDPRINT(" Bytes allocated: %12.0f\n", (double) pool->b_allocated);
324 FDPRINT(" Bytes freed: %12.0f\n", (double) pool->b_freed);
7efdcf32
S
325
326 if (summarize)
327 return;
328
676e6041 329 if (!pool->extents)
7efdcf32
S
330 return;
331
332 write(fd, "\n", 1);
333
676e6041 334 for (cur = pool->extents; cur; cur = cur->next)
7efdcf32 335 FDEXTSTAT(cur);
7efdcf32 336}