X-Git-Url: https://mattmccutchen.net/rsync/rsync.git/blobdiff_plain/e3d27df44468267e7086e63307a61a72c0e60a1e..a02348b5df636914e356b539d91ed21f7b3a1ab5:/lib/pool_alloc.c diff --git a/lib/pool_alloc.c b/lib/pool_alloc.c index fd9c239b..0fb31225 100644 --- a/lib/pool_alloc.c +++ b/lib/pool_alloc.c @@ -6,9 +6,7 @@ struct alloc_pool { size_t size; /* extent size */ size_t quantum; /* allocation quantum */ - struct pool_extent *live; /* current extent for - * allocations */ - struct pool_extent *free; /* unfreed extent list */ + struct pool_extent *extents; /* top extent is "live" */ void (*bomb)(); /* function to call if * malloc fails */ int flags; @@ -54,7 +52,7 @@ pool_create(size_t size, size_t quantum, void (*bomb)(const char *), int flags) pool->size = size /* round extent size to min alignment reqs */ ? (size + MINALIGN - 1) & ~(MINALIGN - 1) : POOL_DEF_EXTENT; - if (pool->flags & POOL_INTERN) { + if (flags & POOL_INTERN) { pool->size -= sizeof (struct pool_extent); flags |= POOL_APPEND; } @@ -74,13 +72,7 @@ pool_destroy(alloc_pool_t p) if (!pool) return; - if (pool->live) { - cur = pool->live; - free(cur->start); - if (!(pool->flags & POOL_APPEND)) - free(cur); - } - for (cur = pool->free; cur; cur = next) { + for (cur = pool->extents; cur; cur = next) { next = cur->next; free(cur->start); if (!(pool->flags & POOL_APPEND)) @@ -90,7 +82,7 @@ pool_destroy(alloc_pool_t p) } void * -pool_alloc(alloc_pool_t p, size_t len, const char *bomb) +pool_alloc(alloc_pool_t p, size_t len, const char *bomb_msg) { struct alloc_pool *pool = (struct alloc_pool *) p; if (!pool) @@ -102,9 +94,9 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) len += pool->quantum - len % pool->quantum; if (len > pool->size) - goto bomb; + goto bomb_out; - if (!pool->live || len > pool->live->free) { + if (!pool->extents || len > pool->extents->free) { void *start; size_t free; size_t bound; @@ -112,11 +104,6 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) size_t asize; struct pool_extent *ext; - if (pool->live) { - pool->live->next = pool->free; - pool->free = pool->live; - } - free = pool->size; bound = 0; @@ -125,7 +112,7 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) asize += sizeof (struct pool_extent); if (!(start = new_array(char, asize))) - goto bomb; + goto bomb_out; if (pool->flags & POOL_CLEAR) memset(start, 0, free); @@ -133,7 +120,7 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) if (pool->flags & POOL_APPEND) ext = PTR_ADD(start, free); else if (!(ext = new(struct pool_extent))) - goto bomb; + goto bomb_out; if (pool->flags & POOL_QALIGN && pool->quantum > 1 && (skew = (size_t)PTR_ADD(start, free) % pool->quantum)) { bound += skew; @@ -142,8 +129,8 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) ext->start = start; ext->free = free; ext->bound = bound; - ext->next = NULL; - pool->live = ext; + ext->next = pool->extents; + pool->extents = ext; pool->e_created++; } @@ -151,13 +138,13 @@ pool_alloc(alloc_pool_t p, size_t len, const char *bomb) pool->n_allocated++; pool->b_allocated += len; - pool->live->free -= len; + pool->extents->free -= len; - return PTR_ADD(pool->live->start, pool->live->free); + return PTR_ADD(pool->extents->start, pool->extents->free); -bomb: + bomb_out: if (pool->bomb) - (*pool->bomb)(bomb); + (*pool->bomb)(bomb_msg); return NULL; } @@ -178,25 +165,20 @@ pool_free(alloc_pool_t p, size_t len, void *addr) else if (pool->quantum > 1 && len % pool->quantum) len += pool->quantum - len % pool->quantum; - if (!addr && pool->live) { - pool->live->next = pool->free; - pool->free = pool->live; - pool->live = NULL; - return; - } pool->n_freed++; pool->b_freed += len; - cur = pool->live; - if (cur && addr >= cur->start - && addr < PTR_ADD(cur->start, pool->size)) { - if (addr == PTR_ADD(cur->start, cur->free)) { - if (pool->flags & POOL_CLEAR) - memset(addr, 0, len); - cur->free += len; - } else - cur->bound += len; - if (cur->free + cur->bound >= pool->size) { + for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) { + if (addr >= cur->start + && addr < PTR_ADD(cur->start, pool->size)) + break; + } + if (!cur) + return; + + if (!prev) { + /* The "live" extent is kept ready for more allocations. */ + if (cur->free + cur->bound + len >= pool->size) { size_t skew; if (pool->flags & POOL_CLEAR) { @@ -210,10 +192,44 @@ pool_free(alloc_pool_t p, size_t len, void *addr) cur->bound += skew; cur->free -= skew; } + } else if (addr == PTR_ADD(cur->start, cur->free)) { + if (pool->flags & POOL_CLEAR) + memset(addr, 0, len); + cur->free += len; + } else + cur->bound += len; + } else { + cur->bound += len; + + if (cur->free + cur->bound >= pool->size) { + prev->next = cur->next; + free(cur->start); + if (!(pool->flags & POOL_APPEND)) + free(cur); + pool->e_freed++; + } else if (prev != pool->extents) { + /* Move the extent to be the first non-live extent. */ + prev->next = cur->next; + cur->next = pool->extents->next; + pool->extents->next = cur; } - return; } - for (prev = NULL, cur = pool->free; cur; prev = cur, cur = cur->next) { +} + +/* This allows you to declare that the given address marks the edge of some + * pool memory that is no longer needed. Any extents that hold only data + * older than the boundary address are freed. NOTE: You MUST NOT USE BOTH + * pool_free() and pool_free_old() on the same pool!! */ +void +pool_free_old(alloc_pool_t p, void *addr) +{ + struct alloc_pool *pool = (struct alloc_pool *)p; + struct pool_extent *cur, *prev, *next; + + if (!pool || !addr) + return; + + for (prev = NULL, cur = pool->extents; cur; prev = cur, cur = cur->next) { if (addr >= cur->start && addr < PTR_ADD(cur->start, pool->size)) break; @@ -221,16 +237,33 @@ pool_free(alloc_pool_t p, size_t len, void *addr) if (!cur) return; - if (prev) { - prev->next = cur->next; - cur->next = pool->free; - pool->free = cur; - } - cur->bound += len; + if (addr == PTR_ADD(cur->start, cur->free)) { + if (prev) { + prev->next = NULL; + next = cur; + } else { + size_t skew; - if (cur->free + cur->bound >= pool->size) { - pool->free = cur->next; + /* The most recent live extent can just be reset. */ + if (pool->flags & POOL_CLEAR) + memset(addr, 0, pool->size - cur->free); + cur->free = pool->size; + cur->bound = 0; + if (pool->flags & POOL_QALIGN && pool->quantum > 1 + && (skew = (size_t)PTR_ADD(cur->start, cur->free) % pool->quantum)) { + cur->bound += skew; + cur->free -= skew; + } + next = cur->next; + cur->next = NULL; + } + } else { + next = cur->next; + cur->next = NULL; + } + while ((cur = next) != NULL) { + next = cur->next; free(cur->start); if (!(pool->flags & POOL_APPEND)) free(cur); @@ -238,6 +271,29 @@ pool_free(alloc_pool_t p, size_t len, void *addr) } } +/* If the current extent doesn't have "len" free space in it, mark it as full + * so that the next alloc will start a new extent. If len is (size_t)-1, this + * bump will always occur. The function returns a boundary address that can + * be used with pool_free_old(), or a NULL if no memory is allocated. */ +void * +pool_boundary(alloc_pool_t p, size_t len) +{ + struct alloc_pool *pool = (struct alloc_pool *)p; + struct pool_extent *cur; + + if (!pool || !pool->extents) + return NULL; + + cur = pool->extents; + + if (cur->free < len) { + cur->bound += cur->free; + cur->free = 0; + } + + return PTR_ADD(cur->start, cur->free); +} + #define FDPRINT(label, value) \ snprintf(buf, sizeof buf, label, value), \ write(fd, buf, strlen(buf)) @@ -270,16 +326,11 @@ pool_stats(alloc_pool_t p, int fd, int summarize) if (summarize) return; - if (!pool->live && !pool->free) + if (!pool->extents) return; write(fd, "\n", 1); - if (pool->live) - FDEXTSTAT(pool->live); - strlcpy(buf, " FREE BOUND\n", sizeof buf); - write(fd, buf, strlen(buf)); - - for (cur = pool->free; cur; cur = cur->next) + for (cur = pool->extents; cur; cur = cur->next) FDEXTSTAT(cur); }