summaryrefslogtreecommitdiff
path: root/lib/slab.c
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2021-11-27 00:21:12 +0100
committerMaria Matejka <mq@ucw.cz>2021-11-27 22:54:15 +0100
commitf772afc525156498900770ffe5a98349df89a45c (patch)
treefef3e68e487fb35335bdb14eb60821165b7dc066 /lib/slab.c
parent644e9ca94e2d10ba0c2de45f94523da2414328e3 (diff)
Memory statistics split into Effective and Overhead
This feature is intended mostly for checking that BIRD's allocation strategies don't consume much memory space. There are some cases where withdrawing routes in a specific order lead to memory fragmentation and this output should give the user at least a notion of how much memory is actually used for data storage and how much memory is "just allocated" or used for overhead. Also raising the "system allocator overhead estimation" from 8 to 16 bytes; it is probably even more. I've found 16 as a local minimum in best scenarios among reachable machines. I couldn't find any reasonable method to estimate this value when BIRD starts up. This commit also fixes the inaccurate computation of memory overhead for slabs where the "system allocater overhead estimation" was improperly added to the size of mmap-ed memory.
Diffstat (limited to 'lib/slab.c')
-rw-r--r--lib/slab.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/lib/slab.c b/lib/slab.c
index b0a01ae7..6cab6b7b 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -42,7 +42,7 @@
static void slab_free(resource *r);
static void slab_dump(resource *r);
static resource *slab_lookup(resource *r, unsigned long addr);
-static size_t slab_memsize(resource *r);
+static struct resmem slab_memsize(resource *r);
#ifdef FAKE_SLAB
@@ -128,7 +128,7 @@ slab_dump(resource *r)
debug("(%d objects per %d bytes)\n", cnt, s->size);
}
-static size_t
+static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
@@ -138,7 +138,10 @@ slab_memsize(resource *r)
WALK_LIST(o, s->objs)
cnt++;
- return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size);
+ return (struct resmem) {
+ .effective = cnt * s->size,
+ .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + cnt * ALLOC_OVERHEAD,
+ };
}
@@ -363,21 +366,33 @@ slab_dump(resource *r)
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
-static size_t
+static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST(h, s->full_heads)
heads++;
+
+ size_t items = heads * s->objs_per_slab;
+
WALK_LIST(h, s->partial_heads)
+ {
heads++;
- WALK_LIST(h, s->full_heads)
+ items += h->num_full;
+ }
+
+ WALK_LIST(h, s->empty_heads)
heads++;
- return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + get_page_size());
+ size_t eff = items * s->obj_size;
+
+ return (struct resmem) {
+ .effective = eff,
+ .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * get_page_size() - eff,
+ };
}
static resource *