summaryrefslogtreecommitdiff
path: root/lib/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/slab.c')
-rw-r--r--lib/slab.c44
1 files changed, 29 insertions, 15 deletions
diff --git a/lib/slab.c b/lib/slab.c
index 8d16c433..9be9844d 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -42,7 +42,7 @@
static void slab_free(resource *r);
static void slab_dump(resource *r);
static resource *slab_lookup(resource *r, unsigned long addr);
-static size_t slab_memsize(resource *r);
+static struct resmem slab_memsize(resource *r);
#ifdef FAKE_SLAB
@@ -128,7 +128,7 @@ slab_dump(resource *r)
debug("(%d objects per %d bytes)\n", cnt, s->size);
}
-static size_t
+static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
@@ -138,7 +138,10 @@ slab_memsize(resource *r)
WALK_LIST(o, s->objs)
cnt++;
- return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size);
+ return (struct resmem) {
+ .effective = cnt * s->size,
+ .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + cnt * ALLOC_OVERHEAD,
+ };
}
@@ -177,7 +180,7 @@ struct sl_alignment { /* Magic structure for testing of alignment */
int x[0];
};
-#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(get_page_size()-1)))
+#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(page_size-1)))
/**
* sl_new - create a new Slab
@@ -192,14 +195,13 @@ sl_new(pool *p, uint size)
{
slab *s = ralloc(p, &sl_class);
uint align = sizeof(struct sl_alignment);
- if (align < sizeof(int))
- align = sizeof(int);
+ if (align < sizeof(void *))
+ align = sizeof(void *);
s->data_size = size;
size = (size + align - 1) / align * align;
s->obj_size = size;
s->head_size = sizeof(struct sl_head);
- u64 page_size = get_page_size();
do {
s->objs_per_slab = (page_size - s->head_size) / size;
@@ -270,7 +272,7 @@ no_partial:
}
h = alloc_page();
#ifdef POISON
- memset(h, 0xba, get_page_size());
+ memset(h, 0xba, page_size);
#endif
ASSERT_DIE(SL_GET_HEAD(h) == h);
memset(h, 0, s->head_size);
@@ -329,7 +331,7 @@ sl_free(slab *s, void *oo)
if (s->num_empty_heads >= MAX_EMPTY_HEADS)
{
#ifdef POISON
- memset(h, 0xde, get_page_size());
+ memset(h, 0xde, page_size);
#endif
free_page(h);
}
@@ -371,21 +373,33 @@ slab_dump(resource *r)
debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size);
}
-static size_t
+static struct resmem
slab_memsize(resource *r)
{
slab *s = (slab *) r;
size_t heads = 0;
struct sl_head *h;
- WALK_LIST(h, s->empty_heads)
+ WALK_LIST(h, s->full_heads)
heads++;
+
+ size_t items = heads * s->objs_per_slab;
+
WALK_LIST(h, s->partial_heads)
+ {
heads++;
- WALK_LIST(h, s->full_heads)
+ items += h->num_full;
+ }
+
+ WALK_LIST(h, s->empty_heads)
heads++;
- return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + get_page_size());
+ size_t eff = items * s->obj_size;
+
+ return (struct resmem) {
+ .effective = eff,
+ .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff,
+ };
}
static resource *
@@ -395,10 +409,10 @@ slab_lookup(resource *r, unsigned long a)
struct sl_head *h;
WALK_LIST(h, s->partial_heads)
- if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
+ if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
WALK_LIST(h, s->full_heads)
- if ((unsigned long) h < a && (unsigned long) h + get_page_size() < a)
+ if ((unsigned long) h < a && (unsigned long) h + page_size < a)
return r;
return NULL;
}