diff options
author | Maria Matejka <mq@ucw.cz> | 2022-03-14 17:29:39 +0100 |
---|---|---|
committer | Maria Matejka <mq@ucw.cz> | 2022-03-14 17:37:56 +0100 |
commit | 3c42f7af6a23de3f135235521318301e5b34f2de (patch) | |
tree | 74a2505c66e5bd590d61b64e7393cf016d9be284 /lib/slab_test.c | |
parent | 4e60b3ee7270a432357e203824a7d519a3134f01 (diff) |
Slab memory allocator unit test
Diffstat (limited to 'lib/slab_test.c')
-rw-r--r-- | lib/slab_test.c | 177 |
1 files changed, 177 insertions, 0 deletions
diff --git a/lib/slab_test.c b/lib/slab_test.c new file mode 100644 index 00000000..e3ed0d58 --- /dev/null +++ b/lib/slab_test.c @@ -0,0 +1,177 @@ +/* + * BIRD Library -- Slab Alloc / Dealloc Tests + * + * (c) 2022 Maria Matejka <mq@jmq.cz> + * + * Can be freely distributed and used under the terms of the GNU GPL. + */ + +#include "test/birdtest.h" +#include "lib/resource.h" +#include "lib/bitops.h" + +static const int sizes[] = { + 8, 12, 18, 27, 41, 75, 131, 269, +}; + +#define TEST_SIZE 1024 * 128 +#define ITEMS(sz) TEST_SIZE / ( (sz) >> u32_log2((sz))/2 ) + +static inline byte *test_alloc(slab *s, int sz, struct resmem *sliz) +{ + byte *out = sl_alloc(s); + + for (int p=0; p < sz; p++) + out[p] = p & 0xff; + + struct resmem ns = rmemsize((resource *) s); + + bt_assert(sliz->effective + sz == ns.effective); + bt_assert((sliz->overhead - sz - ns.overhead) % page_size == 0); + + *sliz = ns; + + return out; +} + +static inline void test_free(slab *s, byte *block, int sz, struct resmem *sliz) +{ + for (int p=0; p < sz; p++) + { + bt_assert(block[p] == (p & 0xff)); + block[p]++; + } + + sl_free(s, block); + + struct resmem ns = rmemsize((resource *) s); + + bt_assert(sliz->effective - sz == ns.effective); + bt_assert((sliz->overhead + sz - ns.overhead) % page_size == 0); + + *sliz = ns; +} + +static inline struct resmem get_memsize(slab *s) +{ + struct resmem sz = rmemsize((resource *) s); + bt_assert(sz.effective == 0); + return sz; +} + +static int +t_slab_forwards(const void *data) +{ + int sz = (intptr_t) data; + slab *s = sl_new(&root_pool, sz); + + struct resmem sliz = get_memsize(s); + + int n = ITEMS(sz); + byte **block = mb_alloc(&root_pool, n * sizeof(*block)); + + for (int i = 0; i < n; i++) + block[i] = test_alloc(s, sz, &sliz); + + for (int i = 0; i < n; i++) + test_free(s, block[i], sz, &sliz); + + mb_free(block); + + return 1; +} + +static int +t_slab_backwards(const void *data) +{ + int sz = (intptr_t) data; + slab *s = sl_new(&root_pool, sz); + + struct resmem sliz = get_memsize(s); + + int n = ITEMS(sz); + byte **block = mb_alloc(&root_pool, n * sizeof(*block)); + + for (int i = 0; i < n; i++) + block[i] = test_alloc(s, sz, &sliz); + + for (int i = n - 1; i >= 0; i--) + test_free(s, block[i], sz, &sliz); + + mb_free(block); + + return 1; +} + +static int +t_slab_random(const void *data) +{ + int sz = (intptr_t) data; + slab *s = sl_new(&root_pool, sz); + + struct resmem sliz = get_memsize(s); + + int n = ITEMS(sz); + byte **block = mb_alloc(&root_pool, n * sizeof(*block)); + + for (int i = 0; i < n; i++) + block[i] = test_alloc(s, sz, &sliz); + + for (int i = 0; i < n; i++) + { + int pos = bt_random() % (n - i); + test_free(s, block[pos], sz, &sliz); + if (pos != n - i - 1) + block[pos] = block[n - i - 1]; + } + + mb_free(block); + + return 1; +} + +static int +t_slab_mixed(const void *data) +{ + int sz = (intptr_t) data; + slab *s = sl_new(&root_pool, sz); + + struct resmem sliz = get_memsize(s); + + int n = ITEMS(sz); + byte **block = mb_alloc(&root_pool, n * sizeof(*block)); + + int cur = 0; + int pending = n; + + while (cur + pending > 0) { + int action = bt_random() % (cur + pending); + + if (action < cur) { + test_free(s, block[action], sz, &sliz); + if (action != --cur) + block[action] = block[cur]; + } else { + block[cur++] = test_alloc(s, sz, &sliz); + pending--; + } + } + + mb_free(block); + + return 1; +} +int main(int argc, char *argv[]) +{ + bt_init(argc, argv); + + for (uint i = 0; i < sizeof(sizes) / sizeof(*sizes); i++) + { + bt_test_suite_arg(t_slab_forwards, (void *) (intptr_t) sizes[i], "Slab deallocation from beginning to end, size=%d", sizes[i]); + bt_test_suite_arg(t_slab_backwards, (void *) (intptr_t) sizes[i], "Slab deallocation from end to beginning, size=%d", sizes[i]); + bt_test_suite_arg(t_slab_random, (void *) (intptr_t) sizes[i], "Slab deallocation in random order, size=%d", sizes[i]); + bt_test_suite_arg(t_slab_mixed, (void *) (intptr_t) sizes[i], "Slab deallocation in mixed order, size=%d", sizes[i]); + } + + return bt_exit_value(); +} |