summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2022-05-30 15:27:46 +0200
committerMaria Matejka <mq@ucw.cz>2022-05-30 15:27:46 +0200
commitd024f471ea5239a8cb6ce2ccd83b686a1d438aa5 (patch)
treebefe1cead84544d44aec88dbd3aee35aa5920f75 /lib
parent921344c3ba5a0e30f04511d2039dff79b6f82dd9 (diff)
parentebd807c0b8eb0b7a3dc3371cd4c87ae886c00885 (diff)
Merge commit 'ebd807c0b8eb0b7a3dc3371cd4c87ae886c00885' into haugesund
Diffstat (limited to 'lib')
-rw-r--r--lib/resource.h2
-rw-r--r--lib/slab.c44
-rw-r--r--lib/slab_test.c158
3 files changed, 104 insertions, 100 deletions
diff --git a/lib/resource.h b/lib/resource.h
index ed9e109d..8b180603 100644
--- a/lib/resource.h
+++ b/lib/resource.h
@@ -100,7 +100,7 @@ typedef struct slab slab;
slab *sl_new(pool *, unsigned size);
void *sl_alloc(slab *);
void *sl_allocz(slab *);
-void sl_free(slab *, void *);
+void sl_free(void *);
/*
* Low-level memory allocation functions, please don't use
diff --git a/lib/slab.c b/lib/slab.c
index 9e0f7798..6da602f8 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -98,7 +98,7 @@ sl_allocz(slab *s)
}
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
@@ -170,6 +170,7 @@ static struct resclass sl_class = {
};
struct sl_head {
+ struct slab *slab;
node n;
u32 num_full;
u32 used_bits[0];
@@ -236,7 +237,7 @@ sl_alloc(slab *s)
struct sl_head *h;
redo:
- h = HEAD(s->partial_heads);
+ h = SKIP_BACK(struct sl_head, n, HEAD(s->partial_heads));
if (!h->n.next)
goto no_partial;
okay:
@@ -262,7 +263,7 @@ okay:
goto redo;
no_partial:
- h = HEAD(s->empty_heads);
+ h = SKIP_BACK(struct sl_head, n, HEAD(s->empty_heads));
if (h->n.next)
{
rem_node(&h->n);
@@ -270,12 +271,16 @@ no_partial:
s->num_empty_heads--;
goto okay;
}
+
h = alloc_page();
+ ASSERT_DIE(SL_GET_HEAD(h) == h);
+
#ifdef POISON
memset(h, 0xba, page_size);
#endif
- ASSERT_DIE(SL_GET_HEAD(h) == h);
+
memset(h, 0, s->head_size);
+ h->slab = s;
add_head(&s->partial_heads, &h->n);
goto okay;
}
@@ -305,9 +310,10 @@ sl_allocz(slab *s)
* and returns it back to the Slab @s.
*/
void
-sl_free(slab *s, void *oo)
+sl_free(void *oo)
{
struct sl_head *h = SL_GET_HEAD(oo);
+ struct slab *s = h->slab;
#ifdef POISON
memset(oo, 0xdb, s->data_size);
@@ -347,13 +353,14 @@ static void
slab_free(resource *r)
{
slab *s = (slab *) r;
- struct sl_head *h, *g;
+ struct sl_head *h;
+ node *nn, *nxt;
- WALK_LIST_DELSAFE(h, g, s->empty_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->empty_heads, n)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->partial_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->partial_heads, n)
free_page(h);
- WALK_LIST_DELSAFE(h, g, s->full_heads)
+ WALK_LIST2_DELSAFE(h, nn, nxt, s->full_heads, n)
free_page(h);
}
@@ -363,12 +370,13 @@ slab_dump(resource *r)
slab *s = (slab *) r;
int ec=0, pc=0, fc=0;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST2(h, nn, s->empty_heads, n)
ec++;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
pc++;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
fc++;
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
@@ -379,19 +387,20 @@ slab_memsize(resource *r)
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
heads++;
size_t items = heads * s->objs_per_slab;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
{
heads++;
items += h->num_full;
}
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST2(h, nn, s->empty_heads, n)
heads++;
size_t eff = items * s->data_size;
@@ -407,11 +416,12 @@ slab_lookup(resource *r, unsigned long a)
{
slab *s = (slab *) r;
struct sl_head *h;
+ node *nn;
- WALK_LIST(h, s->partial_heads)
+ WALK_LIST2(h, nn, s->partial_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
- WALK_LIST(h, s->full_heads)
+ WALK_LIST2(h, nn, s->full_heads, n)
if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
diff --git a/lib/slab_test.c b/lib/slab_test.c
index e3ed0d58..803d0215 100644
--- a/lib/slab_test.c
+++ b/lib/slab_test.c
@@ -17,6 +17,25 @@ static const int sizes[] = {
#define TEST_SIZE 1024 * 128
#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
+struct test_request {
+ int size;
+ enum strategy {
+ TEST_NONE,
+ TEST_FORWARDS,
+ TEST_BACKWARDS,
+ TEST_RANDOM,
+ TEST_MIXED,
+ TEST__MAX,
+ } strategy;
+};
+
+const char * const strategy_name[TEST__MAX] = {
+ [TEST_FORWARDS] = "forwards",
+ [TEST_BACKWARDS] = "backwards",
+ [TEST_RANDOM] = "random",
+ [TEST_MIXED] = "mixed",
+};
+
static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
{
byte *out = sl_alloc(s);
@@ -42,7 +61,7 @@ static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
block[p]++;
}
- sl_free(s, block);
+ sl_free(block);
struct resmem ns = rmemsize((resource *) s);
@@ -60,118 +79,93 @@ static inline struct resmem get_memsize(slab *s)
}
static int
-t_slab_forwards(const void *data)
+t_slab(const void *data)
{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
-
- struct resmem sliz = get_memsize(s);
-
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
-
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
-
- for (int i = 0; i < n; i++)
- test_free(s, block[i], sz, &sliz);
+ const struct test_request *tr = data;
+ int sz = tr->size;
- mb_free(block);
-
- return 1;
-}
-
-static int
-t_slab_backwards(const void *data)
-{
- int sz = (intptr_t) data;
slab *s = sl_new(&root_pool, sz);
-
struct resmem sliz = get_memsize(s);
int n = ITEMS(sz);
byte **block = mb_alloc(&root_pool, n * sizeof(*block));
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
+ switch (tr->strategy) {
+ case TEST_FORWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
- for (int i = n - 1; i >= 0; i--)
- test_free(s, block[i], sz, &sliz);
+ for (int i = 0; i < n; i++)
+ test_free(s, block[i], sz, &sliz);
- mb_free(block);
+ break;
- return 1;
-}
+ case TEST_BACKWARDS:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
-static int
-t_slab_random(const void *data)
-{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
+ for (int i = n - 1; i >= 0; i--)
+ test_free(s, block[i], sz, &sliz);
- struct resmem sliz = get_memsize(s);
+ break;
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+ case TEST_RANDOM:
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
- for (int i = 0; i < n; i++)
- block[i] = test_alloc(s, sz, &sliz);
+ for (int i = 0; i < n; i++)
+ {
+ int pos = bt_random() % (n - i);
+ test_free(s, block[pos], sz, &sliz);
+ if (pos != n - i - 1)
+ block[pos] = block[n - i - 1];
+ }
- for (int i = 0; i < n; i++)
- {
- int pos = bt_random() % (n - i);
- test_free(s, block[pos], sz, &sliz);
- if (pos != n - i - 1)
- block[pos] = block[n - i - 1];
- }
+ break;
- mb_free(block);
+ case TEST_MIXED:
+ {
+ int cur = 0;
+ int pending = n;
- return 1;
-}
+ while (cur + pending > 0) {
+ int action = bt_random() % (cur + pending);
-static int
-t_slab_mixed(const void *data)
-{
- int sz = (intptr_t) data;
- slab *s = sl_new(&root_pool, sz);
-
- struct resmem sliz = get_memsize(s);
-
- int n = ITEMS(sz);
- byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+ if (action < cur) {
+ test_free(s, block[action], sz, &sliz);
+ if (action != --cur)
+ block[action] = block[cur];
+ } else {
+ block[cur++] = test_alloc(s, sz, &sliz);
+ pending--;
+ }
+ }
- int cur = 0;
- int pending = n;
+ break;
+ }
- while (cur + pending > 0) {
- int action = bt_random() % (cur + pending);
-
- if (action < cur) {
- test_free(s, block[action], sz, &sliz);
- if (action != --cur)
- block[action] = block[cur];
- } else {
- block[cur++] = test_alloc(s, sz, &sliz);
- pending--;
- }
+ default: bug("This shouldn't happen");
}
mb_free(block);
-
return 1;
}
int main(int argc, char *argv[])
{
bt_init(argc, argv);
+ struct test_request tr;
+
for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
- {
- bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]);
- bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]);
- }
+ for (uint strategy = TEST_FORWARDS; strategy < TEST__MAX; strategy++)
+ {
+ tr = (struct test_request) {
+ .size = sizes[i],
+ .strategy = strategy,
+ };
+ bt_test_suite_arg(t_slab, &tr, "Slab allocator test, size=%d, strategy=%s",
+ tr.size, strategy_name[strategy]);
+ }
return bt_exit_value();
}