summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap_test.c3
-rw-r--r--lib/buffer_test.c1
-rw-r--r--lib/event.c2
-rw-r--r--lib/event_test.c1
-rw-r--r--lib/flowspec_test.c11
-rw-r--r--lib/hash.h6
-rw-r--r--lib/hash_test.c1
-rw-r--r--lib/lists.h1
-rw-r--r--lib/mempool.c74
-rw-r--r--lib/printf.c48
-rw-r--r--lib/resource.c32
-rw-r--r--lib/resource.h26
-rw-r--r--lib/slab.c128
-rw-r--r--lib/slab_test.c171
-rw-r--r--lib/string.h5
-rw-r--r--lib/timer.c1
-rw-r--r--lib/tlists.h172
18 files changed, 554 insertions, 131 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 4378a7bd..812f721c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,6 +2,6 @@ src := bitmap.c bitops.c blake2s.c blake2b.c checksum.c event.c flowspec.c idm.c
obj := $(src-o-files)
$(all-daemon)
-tests_src := bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c
+tests_src := bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c slab_test.c
tests_targets := $(tests_targets) $(tests-target-files)
tests_objs := $(tests_objs) $(src-o-files)
diff --git a/lib/bitmap_test.c b/lib/bitmap_test.c
index 0595a4d0..07860c94 100644
--- a/lib/bitmap_test.c
+++ b/lib/bitmap_test.c
@@ -24,7 +24,6 @@ t_bmap_set_clear_random(void)
{
struct bmap b;
- resource_init();
bmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};
@@ -60,7 +59,6 @@ t_hmap_set_clear_random(void)
{
struct hmap b;
- resource_init();
hmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};
@@ -119,7 +117,6 @@ t_hmap_set_clear_fill(void)
{
struct hmap b;
- resource_init();
hmap_init(&b, &root_pool, 1024);
char expected[MAX_NUM] = {};
diff --git a/lib/buffer_test.c b/lib/buffer_test.c
index 5b7de330..0629e901 100644
--- a/lib/buffer_test.c
+++ b/lib/buffer_test.c
@@ -41,7 +41,6 @@ fill_expected_array(void)
static void
init_buffer(void)
{
- resource_init();
buffer_pool = &root_pool;
BUFFER_INIT(buf, buffer_pool, MAX_NUM);
}
diff --git a/lib/event.c b/lib/event.c
index 273447e0..33dc00b0 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -157,6 +157,7 @@ ev_run_list(event_list *l)
io_log_event(e->hook, e->data);
ev_run(e);
+ tmp_flush();
}
return !EMPTY_LIST(*l);
@@ -184,6 +185,7 @@ ev_run_list_limited(event_list *l, uint limit)
io_log_event(e->hook, e->data);
ev_run(e);
+ tmp_flush();
limit--;
}
diff --git a/lib/event_test.c b/lib/event_test.c
index e1215bba..e1fbea8f 100644
--- a/lib/event_test.c
+++ b/lib/event_test.c
@@ -53,7 +53,6 @@ t_ev_run_list(void)
{
int i;
- resource_init();
olock_init();
timer_init();
io_init();
diff --git a/lib/flowspec_test.c b/lib/flowspec_test.c
index ed4afe51..03649b99 100644
--- a/lib/flowspec_test.c
+++ b/lib/flowspec_test.c
@@ -446,10 +446,7 @@ t_validation6(void)
static int
t_builder4(void)
{
- resource_init();
-
struct flow_builder *fb = flow_builder_init(&root_pool);
- linpool *lp = lp_new_default(&root_pool);
/* Expectation */
@@ -492,7 +489,7 @@ t_builder4(void)
flow_builder_set_type(fb, FLOW_TYPE_TCP_FLAGS);
flow_builder_add_op_val(fb, 0, 0x55);
- net_addr_flow4 *res = flow_builder4_finalize(fb, lp);
+ net_addr_flow4 *res = flow_builder4_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
@@ -529,8 +526,6 @@ t_builder6(void)
{
net_addr_ip6 ip;
- resource_init();
- linpool *lp = lp_new_default(&root_pool);
struct flow_builder *fb = flow_builder_init(&root_pool);
fb->ipv6 = 1;
@@ -574,7 +569,7 @@ t_builder6(void)
flow_builder_set_type(fb, FLOW_TYPE_LABEL);
flow_builder_add_op_val(fb, 0, 0x55);
- net_addr_flow6 *res = flow_builder6_finalize(fb, lp);
+ net_addr_flow6 *res = flow_builder6_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
/* Reverse order */
@@ -601,7 +596,7 @@ t_builder6(void)
flow_builder_set_type(fb, FLOW_TYPE_DST_PREFIX);
flow_builder6_add_pfx(fb, &ip, 61);
- res = flow_builder6_finalize(fb, lp);
+ res = flow_builder6_finalize(fb, tmp_linpool);
bt_assert(memcmp(res, expect, expect->length) == 0);
return 1;
diff --git a/lib/hash.h b/lib/hash.h
index ea4ca6dd..8febb33f 100644
--- a/lib/hash.h
+++ b/lib/hash.h
@@ -215,6 +215,12 @@ mem_hash_mix(u64 *h, const void *p, uint s)
*h = *h * multiplier + pp[i];
}
+static inline void
+mem_hash_mix_num(u64 *h, u64 val)
+{
+ mem_hash_mix(h, &val, sizeof(val));
+}
+
static inline uint
mem_hash_value(u64 *h)
{
diff --git a/lib/hash_test.c b/lib/hash_test.c
index 59beb7c0..4bce7017 100644
--- a/lib/hash_test.c
+++ b/lib/hash_test.c
@@ -61,7 +61,6 @@ dump_nodes(void)
static void
init_hash_(uint order)
{
- resource_init();
my_pool = rp_new(&root_pool, "Test pool");
HASH_INIT(hash, my_pool, order);
diff --git a/lib/lists.h b/lib/lists.h
index 479f4ed1..7e6d5467 100644
--- a/lib/lists.h
+++ b/lib/lists.h
@@ -42,6 +42,7 @@ typedef union list { /* In fact two overlayed nodes */
};
} list;
+#define STATIC_LIST_INIT(name) name = { .head = &name.tail_node, .tail = &name.head_node, .null = NULL }
#define NODE (node *)
#define HEAD(list) ((void *)((list).head))
diff --git a/lib/mempool.c b/lib/mempool.c
index 90d7c774..325b1ecf 100644
--- a/lib/mempool.c
+++ b/lib/mempool.c
@@ -27,21 +27,22 @@
struct lp_chunk {
struct lp_chunk *next;
- uint size;
uintptr_t data_align[0];
byte data[0];
};
-const int lp_chunk_size = sizeof(struct lp_chunk);
+#define LP_DATA_SIZE (page_size - OFFSETOF(struct lp_chunk, data))
struct linpool {
resource r;
byte *ptr, *end;
struct lp_chunk *first, *current; /* Normal (reusable) chunks */
struct lp_chunk *first_large; /* Large chunks */
- uint chunk_size, threshold, total, total_large;
+ uint total, total_large;
};
+_Thread_local linpool *tmp_linpool;
+
static void lp_free(resource *);
static void lp_dump(resource *);
static resource *lp_lookup(resource *, unsigned long);
@@ -59,19 +60,14 @@ static struct resclass lp_class = {
/**
* lp_new - create a new linear memory pool
* @p: pool
- * @blk: block size
*
* lp_new() creates a new linear memory pool resource inside the pool @p.
- * The linear pool consists of a list of memory chunks of size at least
- * @blk.
+ * The linear pool consists of a list of memory chunks of page size.
*/
linpool
-*lp_new(pool *p, uint blk)
+*lp_new(pool *p)
{
- linpool *m = ralloc(p, &lp_class);
- m->chunk_size = blk;
- m->threshold = 3*blk/4;
- return m;
+ return ralloc(p, &lp_class);
}
/**
@@ -102,14 +98,13 @@ lp_alloc(linpool *m, uint size)
else
{
struct lp_chunk *c;
- if (size >= m->threshold)
+ if (size > LP_DATA_SIZE)
{
/* Too large => allocate large chunk */
c = xmalloc(sizeof(struct lp_chunk) + size);
m->total_large += size;
c->next = m->first_large;
m->first_large = c;
- c->size = size;
}
else
{
@@ -121,10 +116,10 @@ lp_alloc(linpool *m, uint size)
else
{
/* Need to allocate a new chunk */
- c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
- m->total += m->chunk_size;
+ c = alloc_page();
+
+ m->total += LP_DATA_SIZE;
c->next = NULL;
- c->size = m->chunk_size;
if (m->current)
m->current->next = c;
@@ -133,7 +128,7 @@ lp_alloc(linpool *m, uint size)
}
m->current = c;
m->ptr = c->data + size;
- m->end = c->data + m->chunk_size;
+ m->end = c->data + LP_DATA_SIZE;
}
return c->data;
}
@@ -195,7 +190,7 @@ lp_flush(linpool *m)
/* Move ptr to the first chunk and free all large chunks */
m->current = c = m->first;
m->ptr = c ? c->data : NULL;
- m->end = c ? c->data + m->chunk_size : NULL;
+ m->end = c ? c->data + LP_DATA_SIZE : NULL;
while (c = m->first_large)
{
@@ -218,6 +213,7 @@ lp_save(linpool *m, lp_state *p)
{
p->current = m->current;
p->large = m->first_large;
+ p->total_large = m->total_large;
p->ptr = m->ptr;
}
@@ -239,12 +235,12 @@ lp_restore(linpool *m, lp_state *p)
/* Move ptr to the saved pos and free all newer large chunks */
m->current = c = p->current;
m->ptr = p->ptr;
- m->end = c ? c->data + m->chunk_size : NULL;
+ m->end = c ? c->data + LP_DATA_SIZE : NULL;
+ m->total_large = p->total_large;
while ((c = m->first_large) && (c != p->large))
{
m->first_large = c->next;
- m->total_large -= c->size;
xfree(c);
}
}
@@ -258,7 +254,7 @@ lp_free(resource *r)
for(d=m->first; d; d = c)
{
c = d->next;
- xfree(d);
+ free_page(d);
}
for(d=m->first_large; d; d = c)
{
@@ -278,9 +274,7 @@ lp_dump(resource *r)
;
for(cntl=0, c=m->first_large; c; c=c->next, cntl++)
;
- debug("(chunk=%d threshold=%d count=%d+%d total=%d+%d)\n",
- m->chunk_size,
- m->threshold,
+ debug("(count=%d+%d total=%d+%d)\n",
cnt,
cntl,
m->total,
@@ -291,19 +285,22 @@ static struct resmem
lp_memsize(resource *r)
{
linpool *m = (linpool *) r;
- struct lp_chunk *c;
- int cnt = 0;
-
- for(c=m->first; c; c=c->next)
- cnt++;
- for(c=m->first_large; c; c=c->next)
- cnt++;
-
- return (struct resmem) {
- .effective = m->total + m->total_large,
- .overhead = ALLOC_OVERHEAD + sizeof(struct linpool) +
- cnt * (ALLOC_OVERHEAD + sizeof(struct lp_chunk)),
+ struct resmem sz = {
+ .overhead = sizeof(struct linpool) + ALLOC_OVERHEAD,
+ .effective = m->total_large,
};
+
+ for (struct lp_chunk *c = m->first_large; c; c = c->next)
+ sz.overhead += sizeof(struct lp_chunk) + ALLOC_OVERHEAD;
+
+ uint regular = 0;
+ for (struct lp_chunk *c = m->first; c; c = c->next)
+ regular++;
+
+ sz.effective += LP_DATA_SIZE * regular;
+ sz.overhead += (sizeof(struct lp_chunk) + ALLOC_OVERHEAD) * regular;
+
+ return sz;
}
@@ -314,10 +311,7 @@ lp_lookup(resource *r, unsigned long a)
struct lp_chunk *c;
for(c=m->first; c; c=c->next)
- if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a)
- return r;
- for(c=m->first_large; c; c=c->next)
- if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a)
+ if ((unsigned long) c->data <= a && (unsigned long) c->data + LP_DATA_SIZE > a)
return r;
return NULL;
}
diff --git a/lib/printf.c b/lib/printf.c
index 236df427..424d545f 100644
--- a/lib/printf.c
+++ b/lib/printf.c
@@ -568,3 +568,51 @@ buffer_puts(buffer *buf, const char *str)
buf->pos = (bp < be) ? bp : buf->end;
}
+
+#define POOL_PRINTF_MAXBUF 1024
+
+char *mb_vsprintf(pool *p, const char *fmt, va_list args)
+{
+ char buf[POOL_PRINTF_MAXBUF];
+ int count = bvsnprintf(buf, POOL_PRINTF_MAXBUF, fmt, args);
+
+ if (count < 0)
+ bug("Attempted to mb_vsprintf() a too long string");
+
+ char *out = mb_alloc(p, count + 1);
+ memcpy(out, buf, count + 1);
+ return out;
+}
+
+char *mb_sprintf(pool *p, const char *fmt, ...)
+{
+ va_list args;
+ char *out;
+ va_start(args, fmt);
+ out = mb_vsprintf(p, fmt, args);
+ va_end(args);
+ return out;
+}
+
+char *lp_vsprintf(linpool *p, const char *fmt, va_list args)
+{
+ char buf[POOL_PRINTF_MAXBUF];
+ int count = bvsnprintf(buf, POOL_PRINTF_MAXBUF, fmt, args);
+
+ if (count < 0)
+ bug("Attempted to mb_vsprintf() a too long string");
+
+ char *out = lp_alloc(p, count + 1);
+ memcpy(out, buf, count + 1);
+ return out;
+}
+
+char *lp_sprintf(linpool *p, const char *fmt, ...)
+{
+ va_list args;
+ char *out;
+ va_start(args, fmt);
+ out = lp_vsprintf(p, fmt, args);
+ va_end(args);
+ return out;
+}
diff --git a/lib/resource.c b/lib/resource.c
index 5d4c7780..89e559b4 100644
--- a/lib/resource.c
+++ b/lib/resource.c
@@ -70,6 +70,20 @@ rp_new(pool *p, const char *name)
return z;
}
+pool *
+rp_newf(pool *p, const char *fmt, ...)
+{
+ pool *z = rp_new(p, NULL);
+
+ va_list args;
+ va_start(args, fmt);
+ z->name = mb_vsprintf(p, fmt, args);
+ va_end(args);
+
+ return z;
+}
+
+
static void
pool_free(resource *P)
{
@@ -270,9 +284,12 @@ rlookup(unsigned long a)
void
resource_init(void)
{
+ resource_sys_init();
+
root_pool.r.class = &pool_class;
root_pool.name = "Root";
init_list(&root_pool.inside);
+ tmp_init(&root_pool);
}
/**
@@ -407,21 +424,6 @@ mb_realloc(void *m, unsigned size)
return b->data;
}
-/**
- * mb_move - move a memory block
- * @m: memory block
- * @p: target pool
- *
- * mb_move() moves the given memory block to another pool in the same way
- * as rmove() moves a plain resource.
- */
-void
-mb_move(void *m, pool *p)
-{
- struct mblock *b = SKIP_BACK(struct mblock, data, m);
- rmove(b, p);
-}
-
/**
* mb_free - free a memory block
diff --git a/lib/resource.h b/lib/resource.h
index 9ec41ed8..a4e110a5 100644
--- a/lib/resource.h
+++ b/lib/resource.h
@@ -44,6 +44,7 @@ typedef struct pool pool;
void resource_init(void);
pool *rp_new(pool *, const char *); /* Create new pool */
+pool *rp_newf(pool *, const char *, ...); /* Create a new pool with a formatted string as its name */
void rfree(void *); /* Free single resource */
void rdump(void *); /* Dump to debug output */
struct resmem rmemsize(void *res); /* Return size of memory used by the resource */
@@ -59,7 +60,6 @@ extern pool root_pool;
void *mb_alloc(pool *, unsigned size);
void *mb_allocz(pool *, unsigned size);
void *mb_realloc(void *m, unsigned size);
-void mb_move(void *, pool *);
void mb_free(void *);
/* Memory pools with linear allocation */
@@ -69,9 +69,10 @@ typedef struct linpool linpool;
typedef struct lp_state {
void *current, *large;
byte *ptr;
+ uint total_large;
} lp_state;
-linpool *lp_new(pool *, unsigned blk);
+linpool *lp_new(pool *);
void *lp_alloc(linpool *, unsigned size); /* Aligned */
void *lp_allocu(linpool *, unsigned size); /* Unaligned */
void *lp_allocz(linpool *, unsigned size); /* With clear */
@@ -79,10 +80,16 @@ void lp_flush(linpool *); /* Free everything, but leave linpool */
void lp_save(linpool *m, lp_state *p); /* Save state */
void lp_restore(linpool *m, lp_state *p); /* Restore state */
-extern const int lp_chunk_size;
-#define LP_GAS 1024
-#define LP_GOOD_SIZE(x) (((x + LP_GAS - 1) & (~(LP_GAS - 1))) - lp_chunk_size)
-#define lp_new_default(p) lp_new(p, LP_GOOD_SIZE(LP_GAS*4))
+extern _Thread_local linpool *tmp_linpool; /* Temporary linpool autoflushed regularily */
+
+#define tmp_alloc(sz) lp_alloc(tmp_linpool, sz)
+#define tmp_allocu(sz) lp_allocu(tmp_linpool, sz)
+#define tmp_allocz(sz) lp_allocz(tmp_linpool, sz)
+
+#define tmp_init(p) tmp_linpool = lp_new_default(p)
+#define tmp_flush() lp_flush(tmp_linpool)
+
+#define lp_new_default lp_new
/* Slabs */
@@ -91,7 +98,7 @@ typedef struct slab slab;
slab *sl_new(pool *, unsigned size);
void *sl_alloc(slab *);
void *sl_allocz(slab *);
-void sl_free(slab *, void *);
+void sl_free(void *);
/*
* Low-level memory allocation functions, please don't use
@@ -101,10 +108,11 @@ void sl_free(slab *, void *);
void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_size);
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
-u64 get_page_size(void);
+extern long page_size;
void *alloc_page(void);
void free_page(void *);
-extern uint pages_kept;
+
+void resource_sys_init(void);
#ifdef HAVE_LIBDMALLOC
/*
diff --git a/lib/slab.c b/lib/slab.c
index 1d844bab..38d10626 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -32,6 +32,7 @@
#include "nest/bird.h"
#include "lib/resource.h"
#include "lib/string.h"
+#include "lib/tlists.h"
#undef FAKE_SLAB /* Turn on if you want to debug memory allocations */
@@ -98,7 +99,7 @@ sl_allocz(slab *s)
}
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
@@ -153,11 +154,38 @@ slab_memsize(resource *r)
#define MAX_EMPTY_HEADS 1
+enum sl_head_state {
+ slh_empty = 2,
+ slh_partial = 0,
+ slh_full = 1,
+} PACKED;
+
+struct sl_head {
+ struct slab *slab;
+ TLIST_NODE(sl_head, struct sl_head) n;
+ u16 num_full;
+ enum sl_head_state state;
+ u32 used_bits[0];
+};
+
+struct sl_alignment { /* Magic structure for testing of alignment */
+ byte data;
+ int x[0];
+};
+
+#define TLIST_PREFIX sl_head
+#define TLIST_TYPE struct sl_head
+#define TLIST_ITEM n
+#define TLIST_WANT_WALK
+#define TLIST_WANT_ADD_HEAD
+
+#include "lib/tlists.h"
+
struct slab {
resource r;
uint obj_size, head_size, head_bitfield_len;
uint objs_per_slab, num_empty_heads, data_size;
- list empty_heads, partial_heads, full_heads;
+ struct sl_head_list empty_heads, partial_heads, full_heads;
};
static struct resclass sl_class = {
@@ -169,18 +197,15 @@ static struct resclass sl_class = {
slab_memsize
};
-struct sl_head {
- node n;
- u32 num_full;
- u32 used_bits[0];
-};
+#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(page_size-1)))
-struct sl_alignment { /* Magic structure for testing of alignment */
- byte data;
- int x[0];
-};
+#define SL_HEAD_CHANGE_STATE(_s, _h, _from, _to) ({ \
+ ASSERT_DIE(_h->state == slh_##_from); \
+ sl_head_rem_node(&_s->_from##_heads, _h); \
+ sl_head_add_head(&_s->_to##_heads, _h); \
+ _h->state = slh_##_to; \
+ })
-#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(get_page_size()-1)))
/**
* sl_new - create a new Slab
@@ -202,7 +227,6 @@ sl_new(pool *p, uint size)
s->obj_size = size;
s->head_size = sizeof(struct sl_head);
- u64 page_size = get_page_size();
do {
s->objs_per_slab = (page_size - s->head_size) / size;
@@ -218,9 +242,6 @@ sl_new(pool *p, uint size)
bug("Slab: object too large");
s->num_empty_heads = 0;
- init_list(&s->empty_heads);
- init_list(&s->partial_heads);
- init_list(&s->full_heads);
return s;
}
@@ -237,8 +258,7 @@ sl_alloc(slab *s)
struct sl_head *h;
redo:
- h = HEAD(s->partial_heads);
- if (!h->n.next)
+ if (!(h = s->partial_heads.first))
goto no_partial;
okay:
for (uint i=0; i<s->head_bitfield_len; i++)
@@ -258,23 +278,27 @@ okay:
return out;
}
- rem_node(&h->n);
- add_tail(&s->full_heads, &h->n);
+ SL_HEAD_CHANGE_STATE(s, h, partial, full);
goto redo;
no_partial:
- h = HEAD(s->empty_heads);
- if (h->n.next)
+ if (h = s->empty_heads.first)
{
- rem_node(&h->n);
- add_head(&s->partial_heads, &h->n);
+ SL_HEAD_CHANGE_STATE(s, h, empty, partial);
s->num_empty_heads--;
goto okay;
}
+
h = alloc_page();
ASSERT_DIE(SL_GET_HEAD(h) == h);
+
+#ifdef POISON
+ memset(h, 0xba, page_size);
+#endif
+
memset(h, 0, s->head_size);
- add_head(&s->partial_heads, &h->n);
+ h->slab = s;
+ sl_head_add_head(&s->partial_heads, h);
goto okay;
}
@@ -303,9 +327,10 @@ sl_allocz(slab *s)
* and returns it back to the Slab @s.
*/
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_head *h = SL_GET_HEAD(oo);
+ struct slab *s = h->slab;
#ifdef POISON
memset(oo, 0xdb, s->data_size);
@@ -318,19 +343,22 @@ sl_free(slab *s, void *oo)
h->used_bits[pos / 32] &= ~(1 << (pos % 32));
- if (h->num_full-- == s->objs_per_slab)
- {
- rem_node(&h->n);
- add_head(&s->partial_heads, &h->n);
- }
+ if ((h->num_full-- == s->objs_per_slab) && (h->state == slh_full))
+ SL_HEAD_CHANGE_STATE(s, h, full, partial);
else if (!h->num_full)
{
- rem_node(&h->n);
+ sl_head_rem_node(&s->partial_heads, h);
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
+ {
+#ifdef POISON
+ memset(h, 0xde, page_size);
+#endif
free_page(h);
+ }
else
{
- add_head(&s->empty_heads, &h->n);
+ sl_head_add_head(&s->empty_heads, h);
+ h->state = slh_empty;
s->num_empty_heads++;
}
}
@@ -340,13 +368,12 @@ static void
slab_free(resource *r)
{
slab *s = (slab *) r;
- struct sl_head *h, *g;
- WALK_LIST_DELSAFE(h, g, s->empty_heads)
+ WALK_TLIST_DELSAFE(sl_head, h, &s->empty_heads)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->partial_heads)
+ WALK_TLIST_DELSAFE(sl_head, h, &s->partial_heads)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->full_heads)
+ WALK_TLIST_DELSAFE(sl_head, h, &s->full_heads)
free_page(h);
}
@@ -355,13 +382,12 @@ slab_dump(resource *r)
{
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
- struct sl_head *h;
- WALK_LIST(h, s->empty_heads)
+ WALK_TLIST(sl_head, h, &s->empty_heads)
ec++;
- WALK_LIST(h, s->partial_heads)
+ WALK_TLIST(sl_head, h, &s->partial_heads)
pc++;
- WALK_LIST(h, s->full_heads)
+ WALK_TLIST(sl_head, h, &s->full_heads)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
@@ -371,27 +397,26 @@ slab_memsize(resource *r)
{
slab *s = (slab *) r;
size_t heads = 0;
- struct sl_head *h;
- WALK_LIST(h, s->full_heads)
+ WALK_TLIST(sl_head, h, &s->full_heads)
heads++;
size_t items = heads * s->objs_per_slab;
- WALK_LIST(h, s->partial_heads)
+ WALK_TLIST(sl_head, h, &s->partial_heads)
{
heads++;
items += h->num_full;
}
- WALK_LIST(h, s->empty_heads)
+ WALK_TLIST(sl_head, h, &s->empty_heads)
heads++;
- size_t eff = items * s->obj_size;
+ size_t eff = items * s->data_size;
return (struct resmem) {
.effective = eff,
- .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * get_page_size() - eff,
+ .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff,
};
}
@@ -399,13 +424,12 @@ static resource *
slab_lookup(resource *r, unsigned long a)
{
slab *s = (slab *) r;
- struct sl_head *h;
- WALK_LIST(h, s->partial_heads)
- if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
+ WALK_TLIST(sl_head, h, &s->partial_heads)
+ if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
- WALK_LIST(h, s->full_heads)
- if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
+ WALK_TLIST(sl_head, h, &s->full_heads)
+ if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
}
diff --git a/lib/slab_test.c b/lib/slab_test.c
new file mode 100644
index 00000000..803d0215
--- /dev/null
+++ b/lib/slab_test.c
@@ -0,0 +1,171 @@
+/*
+ * BIRD Library -- Slab Alloc / Dealloc Tests
+ *
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#include "test/birdtest.h"
+#include "lib/resource.h"
+#include "lib/bitops.h"
+
+static const int sizes[] = {
+ 8, 12, 18, 27, 41, 75, 131, 269,
+};
+
+#define TEST_SIZE 1024 * 128
+#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
+
+struct test_request {
+ int size;
+ enum strategy {
+ TEST_NONE,
+ TEST_FORWARDS,
+ TEST_BACKWARDS,
+ TEST_RANDOM,
+ TEST_MIXED,
+ TEST__MAX,
+ } strategy;
+};
+
+const char * const strategy_name[TEST__MAX] = {
+ [TEST_FORWARDS] = "forwards",
+ [TEST_BACKWARDS] = "backwards",
+ [TEST_RANDOM] = "random",
+ [TEST_MIXED] = "mixed",
+};
+
+static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
+{
+ byte *out = sl_alloc(s);
+
+ for (int p=0; p < sz; p++)
+ out[p] = p & 0xff;
+
+ struct resmem ns = rmemsize((resource *) s);
+
+ bt_assert(sliz->effective + sz == ns.effective);
+ bt_assert((sliz->overhead - sz - ns.overhead) % page_size == 0);
+
+ *sliz = ns;
+
+ return out;
+}
+
+static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
+{
+ for (int p=0; p < sz; p++)
+ {
+ bt_assert(block[p] == (p & 0xff));
+ block[p]++;
+ }
+
+ sl_free(block);
+
+ struct resmem ns = rmemsize((resource *) s);
+
+ bt_assert(sliz->effective - sz == ns.effective);
+ bt_assert((sliz->overhead + sz - ns.overhead) % page_size == 0);
+
+ *sliz = ns;
+}
+
+static inline struct resmem get_memsize(slab *s)
+{
+ struct resmem sz = rmemsize((resource *) s);
+ bt_assert(sz.effective == 0);
+ return sz;
+}
+
+static int
+t_slab(const void *data)
+{
+ const struct test_request *tr = data;
+ int sz = tr->size;
+
+ slab *s = sl_new(&root_pool, sz);
+ struct resmem sliz = get_memsize(s);
+
+ int n = ITEMS(sz);
+ byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+
+ switch (tr->strategy) {
+ case TEST_FORWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = 0; i < n; i++)
+ test_free(s, block[i], sz, &sliz);
+
+ break;
+
+ case TEST_BACKWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = n - 1; i >= 0; i--)
+ test_free(s, block[i], sz, &sliz);
+
+ break;
+
+ case TEST_RANDOM:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = 0; i < n; i++)
+ {
+ int pos = bt_random() % (n - i);
+ test_free(s, block[pos], sz, &sliz);
+ if (pos != n - i - 1)
+ block[pos] = block[n - i - 1];
+ }
+
+ break;
+
+ case TEST_MIXED:
+ {
+ int cur = 0;
+ int pending = n;
+
+ while (cur + pending > 0) {
+ int action = bt_random() % (cur + pending);
+
+ if (action < cur) {
+ test_free(s, block[action], sz, &sliz);
+ if (action != --cur)
+ block[action] = block[cur];
+ } else {
+ block[cur++] = test_alloc(s, sz, &sliz);
+ pending--;
+ }
+ }
+
+ break;
+ }
+
+ default: bug("This shouldn't happen");
+ }
+
+ mb_free(block);
+ return 1;
+}
+int main(int argc, char *argv[])
+{
+ bt_init(argc, argv);
+
+ struct test_request tr;
+
+ for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
+ for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
+ {
+ tr = (struct test_request) {
+ .size = sizes[i],
+ .strategy = strategy,
+ };
+ bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, strategy=%s",
+ tr.size, strategy_name[strategy]);
+ }
+
+ return bt_exit_value();
+}
diff --git a/lib/string.h b/lib/string.h
index 976b1c24..2829943d 100644
--- a/lib/string.h
+++ b/lib/string.h
@@ -20,6 +20,11 @@ int bvsprintf(char *str, const char *fmt, va_list args);
int bsnprintf(char *str, int size, const char *fmt, ...);
int bvsnprintf(char *str, int size, const char *fmt, va_list args);
+char *mb_sprintf(pool *p, const char *fmt, ...);
+char *mb_vsprintf(pool *p, const char *fmt, va_list args);
+char *lp_sprintf(linpool *p, const char *fmt, ...);
+char *lp_vsprintf(linpool *p, const char *fmt, va_list args);
+
int buffer_vprint(buffer *buf, const char *fmt, va_list args);
int buffer_print(buffer *buf, const char *fmt, ...);
void buffer_puts(buffer *buf, const char *str);
diff --git a/lib/timer.c b/lib/timer.c
index 381163d0..c47e0bbc 100644
--- a/lib/timer.c
+++ b/lib/timer.c
@@ -233,6 +233,7 @@ timers_fire(struct timeloop *loop)
io_log_event(t->hook, t->data);
t->hook(t);
+ tmp_flush();
}
}
diff --git a/lib/tlists.h b/lib/tlists.h
new file mode 100644
index 00000000..e1ed79ea
--- /dev/null
+++ b/lib/tlists.h
@@ -0,0 +1,172 @@
+/*
+ * BIRD Library -- Typed Linked Lists
+ *
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ *
+ *
+ * This implementation of linked lists forces its members to be
+ * typed. On the other hand, it needs to be implemented as ugly macros to
+ * keep the needed genericity.
+ *
+ * Usage:
+ * 1. Include this file
+ * 2. Define the node structure
+ * 3. For every list type you need to define:
+ * A. #define TLIST_PREFIX and other macros
+ * B. Include this file once again
+ *
+ * Macros to define:
+ * TLIST_PREFIX: prefix to prepend to everything generated
+ * TLIST_TYPE: the actual node type
+ * TLIST_ITEM: where the tlist structure is
+ * TLIST_WANT_WALK: if defined, generates a helper functions for list walking macros
+ * TLIST_WANT_ADD_HEAD: if defined, TLIST_PREFIX_add_head() is generated to
+ * add an item to the beginning of the list
+ * TLIST_WANT_ADD_TAIL: if defined, TLIST_PREFIX_add_tail() is generated to
+ * add an item to the end of the list
+ *
+ * TLIST_PREFIX_rem_node() is generated always.
+ *
+ * All these macros are #undef-ed by including this file.
+ *
+ * Example:
+ *
+ * #include "lib/tlists.h"
+ *
+ * struct foo {
+ * ...
+ * TLIST_NODE(bar, struct foo) baz;
+ * ...
+ * };
+ *
+ * #define TLIST_PREFIX bar
+ * #define TLIST_TYPE struct foo
+ * #define TLIST_ITEM baz
+ *
+ * #define TLIST_WANT_WALK
+ * #define TLIST_WANT_ADD_HEAD
+ *
+ * #include "lib/tlists.h"
+ *
+ * ...
+ * (end of example)
+ *
+ */
+
+#ifdef _BIRD_LIB_TLISTS_H_
+# ifdef TLIST_PREFIX
+
+/* Check for mandatory arguments */
+#ifndef TLIST_TYPE
+#error "TLIST_TYPE must be defined"
+#endif
+#ifndef TLIST_ITEM
+#error "TLIST_ITEM must be defined"
+#endif
+#ifndef TLIST_PREFIX
+#error "TLIST_PREFIX must be defined"
+#endif
+
+#define TLIST_NAME(x) MACRO_CONCAT_AFTER(TLIST_PREFIX,_##x)
+#ifndef TLIST_LIST_STRUCT
+#define TLIST_LIST_STRUCT TLIST_NAME(list)
+#endif
+
+typedef struct TLIST_LIST_STRUCT {
+ TLIST_TYPE *first;
+ TLIST_TYPE *last;
+} TLIST_LIST_STRUCT;
+
+#ifdef TLIST_WANT_WALK
+static inline struct TLIST_NAME(node) * TLIST_NAME(node_get)(TLIST_TYPE *node)
+{ return &(node->TLIST_ITEM); }
+#endif
+
+#ifdef TLIST_WANT_ADD_HEAD
+static inline void TLIST_NAME(add_head)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
+{
+ ASSERT_DIE(!node->TLIST_ITEM.prev && !node->TLIST_ITEM.next);
+ if (node->TLIST_ITEM.next = list->first)
+ list->first->TLIST_ITEM.prev = node;
+ else
+ list->last = node;
+ list->first = node;
+}
+#endif
+
+#ifdef TLIST_WANT_ADD_TAIL
+static inline void TLIST_NAME(add_tail)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
+{
+ ASSERT_DIE(!node->TLIST_ITEM.prev && !node->TLIST_ITEM.next);
+ if (node->TLIST_ITEM.prev = list->last)
+ list->last->TLIST_ITEM.next = node;
+ else
+ list->first = node;
+ list->last = node;
+}
+#endif
+
+static inline void TLIST_NAME(rem_node)(TLIST_LIST_STRUCT *list, TLIST_TYPE *node)
+{
+ if (node->TLIST_ITEM.prev)
+ node->TLIST_ITEM.prev->TLIST_ITEM.next = node->TLIST_ITEM.next;
+ else
+ {
+ ASSERT_DIE(list->first == node);
+ list->first = node->TLIST_ITEM.next;
+ }
+
+ if (node->TLIST_ITEM.next)
+ node->TLIST_ITEM.next->TLIST_ITEM.prev = node->TLIST_ITEM.prev;
+ else
+ {
+ ASSERT_DIE(list->last == node);
+ list->last = node->TLIST_ITEM.prev;
+ }
+
+ node->TLIST_ITEM.next = node->TLIST_ITEM.prev = NULL;
+}
+
+#undef TLIST_PREFIX
+#undef TLIST_NAME
+#undef TLIST_LIST_STRUCT
+#undef TLIST_TYPE
+#undef TLIST_ITEM
+#undef TLIST_WANT_ADD_HEAD
+#undef TLIST_WANT_ADD_TAIL
+
+# endif
+#else
+#define _BIRD_LIB_TLISTS_H_
+
+#include "lib/macro.h"
+
+#if defined(TLIST_NAME) || defined(TLIST_PREFIX)
+#error "You should first include lib/tlists.h without requesting a TLIST"
+#endif
+
+#define TLIST_NODE(_name, _type) struct _name##_node { _type *next; _type *prev; }
+#define TLIST_LIST(_name) struct _name##_list
+
+/* Use ->first and ->last to access HEAD and TAIL */
+#define THEAD(_name, _list) (_list)->first
+#define TTAIL(_name, _list) (_list)->last
+
+/* Walkaround macros: simple and resilient to node removal */
+#define WALK_TLIST(_name, _node, _list) \
+ for (typeof((_list)->first) _node = (_list)->first; \
+ _node; _node = _name##_node_get((_node))->next)
+
+#define WALK_TLIST_DELSAFE(_name, _node, _list) \
+ for (typeof((_list)->first) _node = (_list)->first, \
+ _helper = _node ? _name##_node_get((_list)->first)->next : NULL; \
+ _node; \
+ (_node = _helper) ? (_helper = _name##_node_get(_helper)->next) : 0)
+
+/* Empty check */
+#define EMPTY_TLIST(_name, _list) (!(_list)->first)
+
+#endif
+