summaryrefslogtreecommitdiff
path: root/lib/mempool.c
diff options
context:
space:
mode:
authorMartin Mares <mj@ucw.cz>1999-03-29 19:35:47 +0000
committerMartin Mares <mj@ucw.cz>1999-03-29 19:35:47 +0000
commitf5c687f7911501ac1efd8163fade4862dc65456c (patch)
tree04fa54e6c2e09a1f3bee060b4694c8822fcbec31 /lib/mempool.c
parentf54801ffedf3d6342b37c1560502bfc24e7fe64a (diff)
Added lp_flush() which flushes contents of a linear pool, leaving all the
memory available for subsequent allocations from the same pool. Both flushing and re-using the memory costs just few instructions.
Diffstat (limited to 'lib/mempool.c')
-rw-r--r--lib/mempool.c51
1 files changed, 40 insertions, 11 deletions
diff --git a/lib/mempool.c b/lib/mempool.c
index f3b84a57..49e6e900 100644
--- a/lib/mempool.c
+++ b/lib/mempool.c
@@ -1,7 +1,7 @@
/*
* BIRD Resource Manager -- Memory Pools
*
- * (c) 1998 Martin Mares <mj@ucw.cz>
+ * (c) 1998--1999 Martin Mares <mj@ucw.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@@ -20,8 +20,9 @@ struct lp_chunk {
struct linpool {
resource r;
byte *ptr, *end;
- struct lp_chunk *first, **plast;
- unsigned chunk_size, threshold, total;
+ struct lp_chunk *first, *current, **plast; /* Normal (reusable) chunks */
+ struct lp_chunk *first_large; /* Large chunks */
+ unsigned chunk_size, threshold, total, total_large;
};
void lp_free(resource *);
@@ -39,11 +40,12 @@ linpool
{
linpool *m = ralloc(p, &lp_class);
m->ptr = m->end = NULL;
- m->first = NULL;
+ m->first = m->current = NULL;
m->plast = &m->first;
+ m->first_large = NULL;
m->chunk_size = blk;
m->threshold = 3*blk/4;
- m->total = 0;
+ m->total = m->total_large = 0;
return m;
}
@@ -63,19 +65,30 @@ lp_alloc(linpool *m, unsigned size)
struct lp_chunk *c;
if (size >= m->threshold)
{
+ /* Too large => allocate large chunk */
c = xmalloc(sizeof(struct lp_chunk) + size);
- m->total += size;
+ m->total_large += size;
+ c->next = m->first_large;
+ m->first_large = c->next;
}
else
{
- c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
+ if (m->current && m->current->next)
+ /* Still have free chunks from previous incarnation (before lp_flush()) */
+ c = m->current->next;
+ else
+ {
+ /* Need to allocate a new chunk */
+ c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
+ m->total += m->chunk_size;
+ *m->plast = c;
+ m->plast = &c->next;
+ c->next = NULL;
+ }
+ m->current = c;
m->ptr = c->data + size;
m->end = c->data + m->chunk_size;
- m->total += m->chunk_size;
}
- *m->plast = c;
- m->plast = &c->next;
- c->next = NULL;
return c->data;
}
}
@@ -104,6 +117,22 @@ lp_allocz(linpool *m, unsigned size)
}
void
+lp_flush(linpool *m)
+{
+ struct lp_chunk *c;
+
+ /* Relink all normal chunks to free list and free all large chunks */
+ m->ptr = m->end = NULL;
+ m->current = m->first;
+ while (c = m->first_large)
+ {
+ m->first_large = c->next;
+ xfree(c);
+ }
+ m->total_large = 0;
+}
+
+void
lp_free(resource *r)
{
linpool *m = (linpool *) r;