- Fixed a bug where a pool_free() on the most-recently allocated pool
[rsync/rsync.git] / lib / pool_alloc.c
CommitLineData
7efdcf32
S
1#include "rsync.h"
2
3#define POOL_DEF_EXTENT (32 * 1024)
4
5struct alloc_pool
6{
7 size_t size; /* extent size */
8 size_t quantum; /* allocation quantum */
9 struct pool_extent *live; /* current extent for
10 * allocations */
11 struct pool_extent *free; /* unfreed extent list */
e3d27df4 12 void (*bomb)(); /* function to call if
7efdcf32
S
13 * malloc fails */
14 int flags;
15
16 /* statistical data */
17 unsigned long e_created; /* extents created */
18 unsigned long e_freed; /* extents detroyed */
707415d4
WD
19 int64 n_allocated; /* calls to alloc */
20 int64 n_freed; /* calls to free */
21 int64 b_allocated; /* cum. bytes allocated */
22 int64 b_freed; /* cum. bytes freed */
7efdcf32
S
23};
24
25struct pool_extent
26{
27 void *start; /* starting address */
28 size_t free; /* free bytecount */
29 size_t bound; /* bytes bound by padding,
30 * overhead and freed */
31 struct pool_extent *next;
32};
33
4d4df3cd 34struct align_test {
e3d27df4
WD
35 void *foo;
36 int64 bar;
4d4df3cd
WD
37};
38
39#define MINALIGN offsetof(struct align_test, bar)
7efdcf32 40
71b291d7
WD
41/* Temporarily cast a void* var into a char* var when adding an offset (to
42 * keep some compilers from complaining about the pointer arithmetic). */
43#define PTR_ADD(b,o) ( (void*) ((char*)(b) + (o)) )
44
7efdcf32 45alloc_pool_t
4a19c3b2 46pool_create(size_t size, size_t quantum, void (*bomb)(const char *), int flags)
7efdcf32
S
47{
48 struct alloc_pool *pool;
49
e3d27df4 50 if (!(pool = new(struct alloc_pool)))
7efdcf32
S
51 return pool;
52 memset(pool, 0, sizeof (struct alloc_pool));
53
54 pool->size = size /* round extent size to min alignment reqs */
55 ? (size + MINALIGN - 1) & ~(MINALIGN - 1)
56 : POOL_DEF_EXTENT;
8ea17b50 57 if (pool->flags & POOL_INTERN) {
7efdcf32
S
58 pool->size -= sizeof (struct pool_extent);
59 flags |= POOL_APPEND;
60 }
61 pool->quantum = quantum ? quantum : MINALIGN;
62 pool->bomb = bomb;
63 pool->flags = flags;
64
65 return pool;
66}
67
68void
69pool_destroy(alloc_pool_t p)
70{
71 struct alloc_pool *pool = (struct alloc_pool *) p;
72 struct pool_extent *cur, *next;
73
74 if (!pool)
75 return;
76
8ea17b50 77 if (pool->live) {
7efdcf32
S
78 cur = pool->live;
79 free(cur->start);
80 if (!(pool->flags & POOL_APPEND))
81 free(cur);
82 }
8ea17b50 83 for (cur = pool->free; cur; cur = next) {
7efdcf32
S
84 next = cur->next;
85 free(cur->start);
86 if (!(pool->flags & POOL_APPEND))
87 free(cur);
88 }
89 free(pool);
90}
91
be20dc34 92void *
4a19c3b2 93pool_alloc(alloc_pool_t p, size_t len, const char *bomb)
7efdcf32
S
94{
95 struct alloc_pool *pool = (struct alloc_pool *) p;
96 if (!pool)
15f85b1f 97 return NULL;
7efdcf32
S
98
99 if (!len)
100 len = pool->quantum;
101 else if (pool->quantum > 1 && len % pool->quantum)
102 len += pool->quantum - len % pool->quantum;
103
104 if (len > pool->size)
105 goto bomb;
106
8ea17b50 107 if (!pool->live || len > pool->live->free) {
7efdcf32
S
108 void *start;
109 size_t free;
110 size_t bound;
e3d27df4 111 size_t skew;
7efdcf32 112 size_t asize;
e3d27df4 113 struct pool_extent *ext;
7efdcf32 114
8ea17b50 115 if (pool->live) {
7efdcf32
S
116 pool->live->next = pool->free;
117 pool->free = pool->live;
118 }
119
120 free = pool->size;
121 bound = 0;
122
123 asize = pool->size;
124 if (pool->flags & POOL_APPEND)
125 asize += sizeof (struct pool_extent);
126
e3d27df4 127 if (!(start = new_array(char, asize)))
7efdcf32
S
128 goto bomb;
129
130 if (pool->flags & POOL_CLEAR)
e3d27df4 131 memset(start, 0, free);
7efdcf32
S
132
133 if (pool->flags & POOL_APPEND)
e3d27df4
WD
134 ext = PTR_ADD(start, free);
135 else if (!(ext = new(struct pool_extent)))
7efdcf32 136 goto bomb;
7efdcf32 137 if (pool->flags & POOL_QALIGN && pool->quantum > 1
e3d27df4
WD
138 && (skew = (size_t)PTR_ADD(start, free) % pool->quantum)) {
139 bound += skew;
140 free -= skew;
7efdcf32 141 }
e3d27df4
WD
142 ext->start = start;
143 ext->free = free;
144 ext->bound = bound;
145 ext->next = NULL;
146 pool->live = ext;
7efdcf32
S
147
148 pool->e_created++;
149 }
150
151 pool->n_allocated++;
152 pool->b_allocated += len;
153
154 pool->live->free -= len;
155
71b291d7 156 return PTR_ADD(pool->live->start, pool->live->free);
7efdcf32
S
157
158bomb:
159 if (pool->bomb)
160 (*pool->bomb)(bomb);
161 return NULL;
162}
163
e3d27df4
WD
164/* This function allows you to declare memory in the pool that you are done
165 * using. If you free all the memory in a pool's extent, that extent will
166 * be freed. */
7efdcf32
S
167void
168pool_free(alloc_pool_t p, size_t len, void *addr)
169{
e3d27df4
WD
170 struct alloc_pool *pool = (struct alloc_pool *)p;
171 struct pool_extent *cur, *prev;
7efdcf32
S
172
173 if (!pool)
174 return;
175
176 if (!len)
177 len = pool->quantum;
178 else if (pool->quantum > 1 && len % pool->quantum)
179 len += pool->quantum - len % pool->quantum;
180
8ea17b50 181 if (!addr && pool->live) {
7efdcf32
S
182 pool->live->next = pool->free;
183 pool->free = pool->live;
184 pool->live = NULL;
185 return;
186 }
187 pool->n_freed++;
188 pool->b_freed += len;
189
190 cur = pool->live;
8ea17b50
WD
191 if (cur && addr >= cur->start
192 && addr < PTR_ADD(cur->start, pool->size)) {
193 if (addr == PTR_ADD(cur->start, cur->free)) {
7efdcf32
S
194 if (pool->flags & POOL_CLEAR)
195 memset(addr, 0, len);
e3d27df4 196 cur->free += len;
8ea17b50 197 } else
7efdcf32 198 cur->bound += len;
8ea17b50 199 if (cur->free + cur->bound >= pool->size) {
e3d27df4 200 size_t skew;
7efdcf32 201
e3d27df4
WD
202 if (pool->flags & POOL_CLEAR) {
203 memset(PTR_ADD(cur->start, cur->free), 0,
204 pool->size - cur->free);
205 }
7efdcf32
S
206 cur->free = pool->size;
207 cur->bound = 0;
208 if (pool->flags & POOL_QALIGN && pool->quantum > 1
e3d27df4
WD
209 && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) {
210 cur->bound += skew;
211 cur->free -= skew;
7efdcf32
S
212 }
213 }
214 return;
215 }
8ea17b50 216 for (prev = NULL, cur = pool->free; cur; prev = cur, cur = cur->next) {
7efdcf32 217 if (addr >= cur->start
04575bca 218 && addr < PTR_ADD(cur->start, pool->size))
7efdcf32
S
219 break;
220 }
221 if (!cur)
222 return;
223
8ea17b50 224 if (prev) {
7efdcf32
S
225 prev->next = cur->next;
226 cur->next = pool->free;
227 pool->free = cur;
228 }
229 cur->bound += len;
230
8ea17b50 231 if (cur->free + cur->bound >= pool->size) {
7efdcf32
S
232 pool->free = cur->next;
233
234 free(cur->start);
235 if (!(pool->flags & POOL_APPEND))
236 free(cur);
237 pool->e_freed++;
238 }
7efdcf32
S
239}
240
241#define FDPRINT(label, value) \
8ea17b50
WD
242 snprintf(buf, sizeof buf, label, value), \
243 write(fd, buf, strlen(buf))
7efdcf32
S
244
245#define FDEXTSTAT(ext) \
8ea17b50 246 snprintf(buf, sizeof buf, " %12ld %5ld\n", \
7efdcf32
S
247 (long) ext->free, \
248 (long) ext->bound), \
249 write(fd, buf, strlen(buf))
250
251void
252pool_stats(alloc_pool_t p, int fd, int summarize)
253{
254 struct alloc_pool *pool = (struct alloc_pool *) p;
255 struct pool_extent *cur;
256 char buf[BUFSIZ];
257
258 if (!pool)
259 return;
260
261 FDPRINT(" Extent size: %12ld\n", (long) pool->size);
262 FDPRINT(" Alloc quantum: %12ld\n", (long) pool->quantum);
263 FDPRINT(" Extents created: %12ld\n", pool->e_created);
264 FDPRINT(" Extents freed: %12ld\n", pool->e_freed);
265 FDPRINT(" Alloc count: %12.0f\n", (double) pool->n_allocated);
266 FDPRINT(" Free Count: %12.0f\n", (double) pool->n_freed);
e3d27df4
WD
267 FDPRINT(" Bytes allocated: %12.0f\n", (double) pool->b_allocated);
268 FDPRINT(" Bytes freed: %12.0f\n", (double) pool->b_freed);
7efdcf32
S
269
270 if (summarize)
271 return;
272
273 if (!pool->live && !pool->free)
274 return;
275
276 write(fd, "\n", 1);
277
278 if (pool->live)
7efdcf32 279 FDEXTSTAT(pool->live);
c9bce0b8 280 strlcpy(buf, " FREE BOUND\n", sizeof buf);
7efdcf32
S
281 write(fd, buf, strlen(buf));
282
be20dc34 283 for (cur = pool->free; cur; cur = cur->next)
7efdcf32 284 FDEXTSTAT(cur);
7efdcf32 285}