From 7efdcf3218fe580476608f3b3c53bce35fc5a2a9 Mon Sep 17 00:00:00 2001 From: "J.W. Schultz" Date: Tue, 10 Feb 2004 03:26:41 +0000 Subject: [PATCH] Added allocation pool code. --- lib/pool_alloc.3 | 199 +++++++++++++++++++++++++++++++ lib/pool_alloc.c | 297 +++++++++++++++++++++++++++++++++++++++++++++++ lib/pool_alloc.h | 20 ++++ 3 files changed, 516 insertions(+) create mode 100644 lib/pool_alloc.3 create mode 100644 lib/pool_alloc.c create mode 100644 lib/pool_alloc.h diff --git a/lib/pool_alloc.3 b/lib/pool_alloc.3 new file mode 100644 index 00000000..b9d338b1 --- /dev/null +++ b/lib/pool_alloc.3 @@ -0,0 +1,199 @@ +.ds d \-\^\- +.ds o \fR[\fP +.ds c \fR]\fP +.ds | \fR|\fP +.de D +\\.B \*d\\$1 +.. +.de DI +\\.BI \*d\\$1 \\$2 +.. +.de DR +\\.BR \*d\\$1 \\$2 +.. +.de Di +\\.BI \*d\\$1 " \\$2" +.. +.de Db +\\.B \*d\\$1 " \\$2" +.. +.de Df +\\.B \*d\*ono\*c\\$1 +.. +.de See +See \fB\\$1\fP for details. +.. +.de SeeIn +See \fB\\$1\fP in \fB\\$2\fP for details. +.. +.TH POOL_ALLOC 3 +.SH NAME +pool_alloc, pool_free, pool_talloc, pool_tfree, pool_create, pool_destroy +\- Allocate and free memory in managed allocation pools. +.SH SYNOPSIS +.B #include "pool_alloc.h" + +\fBstruct alloc_pool *pool_create(size_t \fIsize\fB, size_t \fIquantum\fB, void (*\fIbomb\fB)(char *), int \fIflags\fB); + +\fBvoid pool_destroy(struct alloc_pool *\fIpool\fB); + +\fBvoid *pool_alloc(struct alloc_pool *\fIpool\fB, size_t \fIsize\fB, char *\fImsg\fB); + +\fBvoid pool_free(struct alloc_pool *\fIpool\fB, sise_t \fIsize\fB, void *\fIaddr\fB); + +\fBvoid *pool_talloc(struct alloc_pool *\fIpool\fB, \fItype\fB), int \fIcount\fB, char *\fImsg\fB); + +\fBvoid pool_tfree(struct alloc_pool *\fIpool\fB, \fItype\fB, int \fIcount\fB, void *\fIaddr\fB); +.SH DESCRIPTION +.P +The pool allocation routines use +.B malloc() +for underlying memory management. +What allocation pools do is cause +memory within a given pool to be in large contigious blocks +(called extents) that when freed will be reusable. Unlike +.B malloc() +the allocations are not managed individually. +Instead each extent tracks the total free memory within the +extent. Each extent can either be used to allocate memory +or to manage the freeing of memory within that extent. +When an extent has less free memory than a given +allocation request or when the first request to free +memory within that extent is received the extent ceases to +be used for allocation. +.P +This form of memory management is suited to large numbers of small +related allocations that are held for a while +and then freed as a group. +Because the +underlying allocations are done in large contigious extents +when an extent is freed it releases a large enough +contigious block of memory to be useful to subsequent +.B malloc() +and +.B pool_alloc() +calls even if allocations from other pools or from +.B malloc() +are made between allocations from a given pool. +.P +.B pool_create() +Creates an allocation pool for subsequent calls to the pool +allocation functions. +When an extent is created for allocations it will be +.I size +bytes. +Allocations from the pool have their sizes rounded up to a +multiple of +.I quantum +bytes in length. +Specifying +.B 0 +for +.I quantum +Will produce a quantum that should meet maximal allignment +on most platforms. +If the +.B POOL_QALIGN +.I flag +is set allocations will be aligned to addresses that are a +multiple of +.IR quantum . +If the +.B POOL_CLEAR +.I flag +is set all allocations from the pool will be zero filled. +.P +.B pool_destroy() +destroys an allocation pool and frees all memory allocated +in that pool. +.P +.B pool_alloc() +allocates +.I size +bytes from the specified +.IR pool . +If +.I size +is +.B 0 +.I quantum +bytes will be freed. +If the requested memory cannot be allocated +.B pool_alloc() +will call +.I bomb() +function, if defined, with +.I msg +as it's sole argument and +.B NULL +will be returned. +.P +.B pool_free() +frees +.I size +bytes pointed to by +.I addr +previously allocated in the specified +.IR pool . +The memory freed within an extent will not be reusable until +all of the memory in that extent has been freed but +depending on the order in which the +allocations are freed some extents may be released for reuse +while others are still in use. +If +.I size +is +.B 0 +.I quantum +bytes will be freed. +If +.I addr +is +.B 0 +no memory will be freed but subsequent allocations will come +from a new extent. +.P +.B pool_talloc() +is a macro that take a +.I type +and +.I count +instead of +.I size +and will cast the return value to the correct type. +.P +.B pool_tfree +is a macro to free memory previously allocated in the +specified +.IR pool . +.SH RETURN VALUE +.B pool_create() +returns a pointer to +.BR "struct alloc_pool" . +.P +.B pool_alloc() +and +.B pool_talloc() +return pointers to the allocated memory, +or NULL if the request fails. +For each extent so long as no allocations are smaller than varaible +allignment requirements this pointer will be suitably +alligned for any kind of variable. +The return type of +.B pool_alloc() +will normally require casting to the desired type but +.B pool_talloc() +will returns a pointer of the requested +.IR type . +.P +.BR pool_free() , +.B pool_tfree() +and +.B pool_destroy() +return no value. +.SH SEE ALSO +.nf +malloc(3) +.SH AUTHOR +pool_alloc was created by J.W. Schultz of Pegasystems Technologies. +.SH BUGS AND ISSUES diff --git a/lib/pool_alloc.c b/lib/pool_alloc.c new file mode 100644 index 00000000..acb356a9 --- /dev/null +++ b/lib/pool_alloc.c @@ -0,0 +1,297 @@ +#include "rsync.h" + +#define POOL_DEF_EXTENT (32 * 1024) + +struct alloc_pool +{ + size_t size; /* extent size */ + size_t quantum; /* allocation quantum */ + struct pool_extent *live; /* current extent for + * allocations */ + struct pool_extent *free; /* unfreed extent list */ + void (*bomb)(); + /* function to call if + * malloc fails */ + int flags; + + /* statistical data */ + unsigned long e_created; /* extents created */ + unsigned long e_freed; /* extents detroyed */ + uint64 n_allocated; /* calls to alloc */ + uint64 n_freed; /* calls to free */ + uint64 b_allocated; /* cum. bytes allocated */ + uint64 b_freed; /* cum. bytes freed */ +}; + +struct pool_extent +{ + void *start; /* starting address */ + size_t free; /* free bytecount */ + size_t bound; /* bytes bound by padding, + * overhead and freed */ + struct pool_extent *next; +}; + +#define MINALIGN (sizeof (void *)) + +alloc_pool_t +pool_create(size_t size, size_t quantum, + void (*bomb)(char *), int flags) +{ + struct alloc_pool *pool; + + if (!(pool = (struct alloc_pool*) malloc(sizeof (struct alloc_pool)))) + return pool; + memset(pool, 0, sizeof (struct alloc_pool)); + + pool->size = size /* round extent size to min alignment reqs */ + ? (size + MINALIGN - 1) & ~(MINALIGN - 1) + : POOL_DEF_EXTENT; + if (pool->flags & POOL_INTERN) + { + pool->size -= sizeof (struct pool_extent); + flags |= POOL_APPEND; + } + pool->quantum = quantum ? quantum : MINALIGN; + pool->bomb = bomb; + pool->flags = flags; + + return pool; +} + +void +pool_destroy(alloc_pool_t p) +{ + struct alloc_pool *pool = (struct alloc_pool *) p; + struct pool_extent *cur, *next; + + if (!pool) + return; + + if (pool->live) + { + cur = pool->live; + free(cur->start); + if (!(pool->flags & POOL_APPEND)) + free(cur); + } + for (cur = pool->free; cur; cur = next) + { + next = cur->next; + free(cur->start); + if (!(pool->flags & POOL_APPEND)) + free(cur); + } + free(pool); +} + +void *pool_alloc(alloc_pool_t p, size_t len, char *bomb) +{ + struct alloc_pool *pool = (struct alloc_pool *) p; + if (!pool) + return NULL; + + if (!len) + len = pool->quantum; + else if (pool->quantum > 1 && len % pool->quantum) + len += pool->quantum - len % pool->quantum; + + if (len > pool->size) + goto bomb; + + if (!pool->live || len > pool->live->free) + { + void *start; + size_t free; + size_t bound; + size_t sqew; + size_t asize; + + if (pool->live) + { + pool->live->next = pool->free; + pool->free = pool->live; + } + + free = pool->size; + bound = 0; + + asize = pool->size; + if (pool->flags & POOL_APPEND) + asize += sizeof (struct pool_extent); + + if (!(start = (void *) malloc(asize))) + goto bomb; + + if (pool->flags & POOL_CLEAR) + memset(start, 0, pool->size); + + if (pool->flags & POOL_APPEND) + { + pool->live = start + free; + } + else if (!(pool->live = (struct pool_extent *) malloc(sizeof (struct pool_extent)))) + { + goto bomb; + } + if (pool->flags & POOL_QALIGN && pool->quantum > 1 + && (sqew = (size_t)(start + free) % pool->quantum)) + { + bound += sqew; + free -= sqew; + } + pool->live->start = start; + pool->live->free = free; + pool->live->bound = bound; + pool->live->next = NULL; + + pool->e_created++; + } + + pool->n_allocated++; + pool->b_allocated += len; + + pool->live->free -= len; + + return pool->live->start + pool->live->free; + +bomb: + if (pool->bomb) + (*pool->bomb)(bomb); + return NULL; +} + +void +pool_free(alloc_pool_t p, size_t len, void *addr) +{ + struct alloc_pool *pool = (struct alloc_pool *) p; + struct pool_extent *cur; + struct pool_extent *prev; + + if (!pool) + return; + + if (!len) + len = pool->quantum; + else if (pool->quantum > 1 && len % pool->quantum) + len += pool->quantum - len % pool->quantum; + + if (!addr && pool->live) + { + pool->live->next = pool->free; + pool->free = pool->live; + pool->live = NULL; + return; + } + pool->n_freed++; + pool->b_freed += len; + + cur = pool->live; + if (cur + && addr >= cur->start + && addr < cur->start + pool->size) + { + if (addr == cur->start + cur->free) + { + if (pool->flags & POOL_CLEAR) + memset(addr, 0, len); + pool->b_freed += len; + } else { + cur->bound += len; + } + if (cur->free + cur->bound >= pool->size) + { + size_t sqew; + + cur->free = pool->size; + cur->bound = 0; + if (pool->flags & POOL_QALIGN && pool->quantum > 1 + && (sqew = (size_t)(cur->start + cur->free) % pool->quantum)) + { + cur->bound += sqew; + cur->free -= sqew; + } + } + return; + } + for (prev = NULL, cur = pool->free; cur; prev = cur, cur = cur->next) + { + if (addr >= cur->start + && addr < cur->start + pool->size) + break; + } + if (!cur) + return; + + if (prev) + { + prev->next = cur->next; + cur->next = pool->free; + pool->free = cur; + } + cur->bound += len; + + if (cur->free + cur->bound >= pool->size) + { + pool->free = cur->next; + + free(cur->start); + if (!(pool->flags & POOL_APPEND)) + free(cur); + pool->e_freed++; + } + return; +} + +#define FDPRINT(label, value) \ + snprintf(buf, BUFSIZ, label, value), \ + write(fd, buf, strlen(buf)); + +#define FDEXTSTAT(ext) \ + snprintf(buf, BUFSIZ, " %12ld %5ld\n", \ + (long) ext->free, \ + (long) ext->bound), \ + write(fd, buf, strlen(buf)) + +void +pool_stats(alloc_pool_t p, int fd, int summarize) +{ + struct alloc_pool *pool = (struct alloc_pool *) p; + struct pool_extent *cur; + char buf[BUFSIZ]; + + if (!pool) + return; + + FDPRINT(" Extent size: %12ld\n", (long) pool->size); + FDPRINT(" Alloc quantum: %12ld\n", (long) pool->quantum); + FDPRINT(" Extents created: %12ld\n", pool->e_created); + FDPRINT(" Extents freed: %12ld\n", pool->e_freed); + FDPRINT(" Alloc count: %12.0f\n", (double) pool->n_allocated); + FDPRINT(" Free Count: %12.0f\n", (double) pool->n_freed); + FDPRINT(" Alloc bytes: %12.0f\n", (double) pool->b_allocated); + FDPRINT(" Free bytes: %12.0f\n", (double) pool->b_freed); + + if (summarize) + return; + + if (!pool->live && !pool->free) + return; + + write(fd, "\n", 1); + + if (pool->live) + { + FDEXTSTAT(pool->live); + } + strcpy(buf, " FREE BOUND\n"); + write(fd, buf, strlen(buf)); + + cur = pool->free; + while (cur) + { + FDEXTSTAT(cur); + cur = cur->next; + } +} + diff --git a/lib/pool_alloc.h b/lib/pool_alloc.h new file mode 100644 index 00000000..a4ab761c --- /dev/null +++ b/lib/pool_alloc.h @@ -0,0 +1,20 @@ +#include + +#define POOL_CLEAR (1<<0) /* zero fill allocations */ +#define POOL_QALIGN (1<<1) /* align data to quanta */ +#define POOL_INTERN (1<<2) /* Allocate extent structures */ +#define POOL_APPEND (1<<3) /* or appended to extent data */ + +typedef void *alloc_pool_t; + +alloc_pool_t pool_create(size_t size, size_t quantum, void (*bomb)(char *), int flags); +void pool_destroy(alloc_pool_t pool); +void *pool_alloc(alloc_pool_t pool, size_t size, char *bomb); +void pool_free(alloc_pool_t pool, size_t size, void *addr); + +#define pool_talloc(pool, type, count, bomb) \ + ((type *)pool_alloc(pool, sizeof(type) * count, bomb)) + +#define pool_tfree(pool, type, count, addr) \ + (pool_free(pool, sizeof(type) * count, addr)) + -- 2.34.1