summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2022-03-14 17:29:39 +0100
committerMaria Matejka <mq@ucw.cz>2022-03-14 17:37:56 +0100
commit3c42f7af6a23de3f135235521318301e5b34f2de (patch)
tree74a2505c66e5bd590d61b64e7393cf016d9be284 /lib
parent4e60b3ee7270a432357e203824a7d519a3134f01 (diff)
Slab memory allocator unit test
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/slab.c2
-rw-r--r--lib/slab_test.c177
3 files changed, 179 insertions, 2 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 4378a7bd..812f721c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,6 +2,6 @@ src := bitmap.c bitops.c blake2s.c blake2b.c checksum.c event.c flowspec.c idm.c
obj := $(src-o-files)
$(all-daemon)
-tests_src := bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c
+tests_src := bitmap_test.c heap_test.c buffer_test.c event_test.c flowspec_test.c bitops_test.c patmatch_test.c fletcher16_test.c slist_test.c checksum_test.c lists_test.c mac_test.c ip_test.c hash_test.c printf_test.c slab_test.c
tests_targets := $(tests_targets) $(tests-target-files)
tests_objs := $(tests_objs) $(src-o-files)
diff --git a/lib/slab.c b/lib/slab.c
index 9be9844d..9e0f7798 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -394,7 +394,7 @@ slab_memsize(resource *r)
WALK_LIST(h, s->empty_heads)
heads++;
- size_t eff = items * s->obj_size;
+ size_t eff = items * s->data_size;
return (struct resmem) {
.effective = eff,
diff --git a/lib/slab_test.c b/lib/slab_test.c
new file mode 100644
index 00000000..e3ed0d58
--- /dev/null
+++ b/lib/slab_test.c
@@ -0,0 +1,177 @@
+/*
+ * BIRD Library -- Slab Alloc / Dealloc Tests
+ *
+ * (c) 2022 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#include "test/birdtest.h"
+#include "lib/resource.h"
+#include "lib/bitops.h"
+
+static const int sizes[] = {
+ 8, 12, 18, 27, 41, 75, 131, 269,
+};
+
+#define TEST_SIZE 1024 * 128
+#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 )
+
+static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz)
+{
+ byte *out = sl_alloc(s);
+
+ for (int p=0; p < sz; p++)
+ out[p] = p & 0xff;
+
+ struct resmem ns = rmemsize((resource *) s);
+
+ bt_assert(sliz->effective + sz == ns.effective);
+ bt_assert((sliz->overhead - sz - ns.overhead) % page_size == 0);
+
+ *sliz = ns;
+
+ return out;
+}
+
+static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz)
+{
+ for (int p=0; p < sz; p++)
+ {
+ bt_assert(block[p] == (p & 0xff));
+ block[p]++;
+ }
+
+ sl_free(s, block);
+
+ struct resmem ns = rmemsize((resource *) s);
+
+ bt_assert(sliz->effective - sz == ns.effective);
+ bt_assert((sliz->overhead + sz - ns.overhead) % page_size == 0);
+
+ *sliz = ns;
+}
+
+static inline struct resmem get_memsize(slab *s)
+{
+ struct resmem sz = rmemsize((resource *) s);
+ bt_assert(sz.effective == 0);
+ return sz;
+}
+
+static int
+t_slab_forwards(const void *data)
+{
+ int sz = (intptr_t) data;
+ slab *s = sl_new(&root_pool, sz);
+
+ struct resmem sliz = get_memsize(s);
+
+ int n = ITEMS(sz);
+ byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = 0; i < n; i++)
+ test_free(s, block[i], sz, &sliz);
+
+ mb_free(block);
+
+ return 1;
+}
+
+static int
+t_slab_backwards(const void *data)
+{
+ int sz = (intptr_t) data;
+ slab *s = sl_new(&root_pool, sz);
+
+ struct resmem sliz = get_memsize(s);
+
+ int n = ITEMS(sz);
+ byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = n - 1; i >= 0; i--)
+ test_free(s, block[i], sz, &sliz);
+
+ mb_free(block);
+
+ return 1;
+}
+
+static int
+t_slab_random(const void *data)
+{
+ int sz = (intptr_t) data;
+ slab *s = sl_new(&root_pool, sz);
+
+ struct resmem sliz = get_memsize(s);
+
+ int n = ITEMS(sz);
+ byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+
+ for (int i = 0; i < n; i++)
+ block[i] = test_alloc(s, sz, &sliz);
+
+ for (int i = 0; i < n; i++)
+ {
+ int pos = bt_random() % (n - i);
+ test_free(s, block[pos], sz, &sliz);
+ if (pos != n - i - 1)
+ block[pos] = block[n - i - 1];
+ }
+
+ mb_free(block);
+
+ return 1;
+}
+
+static int
+t_slab_mixed(const void *data)
+{
+ int sz = (intptr_t) data;
+ slab *s = sl_new(&root_pool, sz);
+
+ struct resmem sliz = get_memsize(s);
+
+ int n = ITEMS(sz);
+ byte **block = mb_alloc(&root_pool, n * sizeof(*block));
+
+ int cur = 0;
+ int pending = n;
+
+ while (cur + pending > 0) {
+ int action = bt_random() % (cur + pending);
+
+ if (action < cur) {
+ test_free(s, block[action], sz, &sliz);
+ if (action != --cur)
+ block[action] = block[cur];
+ } else {
+ block[cur++] = test_alloc(s, sz, &sliz);
+ pending--;
+ }
+ }
+
+ mb_free(block);
+
+ return 1;
+}
+int main(int argc, char *argv[])
+{
+ bt_init(argc, argv);
+
+ for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++)
+ {
+ bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]);
+ bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]);
+ bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]);
+ bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]);
+ }
+
+ return bt_exit_value();
+}