diff options
Diffstat (limited to 'lib/slab.c')
-rw-r--r-- | lib/slab.c | 48 |
1 files changed, 30 insertions, 18 deletions
@@ -42,7 +42,7 @@ static void slab_free(resource *r); static void slab_dump(resource *r); static resource *slab_lookup(resource *r, unsigned long addr); -static size_t slab_memsize(resource *r); +static struct resmem slab_memsize(resource *r); #ifdef FAKE_SLAB @@ -128,7 +128,7 @@ slab_dump(resource *r) debug("(%d objects per %d bytes)\n", cnt, s->size); } -static size_t +static struct resmem slab_memsize(resource *r) { slab *s = (slab *) r; @@ -138,7 +138,10 @@ slab_memsize(resource *r) WALK_LIST(o, s->objs) cnt++; - return ALLOC_OVERHEAD + sizeof(struct slab) + cnt * (ALLOC_OVERHEAD + s->size); + return (struct resmem) { + .effective = cnt * s->size, + .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + cnt * ALLOC_OVERHEAD, + }; } @@ -152,7 +155,6 @@ slab_memsize(resource *r) struct slab { resource r; - pool *p; uint obj_size, head_size, head_bitfield_len; uint objs_per_slab, num_empty_heads, data_size; list empty_heads, partial_heads, full_heads; @@ -178,7 +180,7 @@ struct sl_alignment { /* Magic structure for testing of alignment */ int x[0]; }; -#define SL_GET_HEAD(x) ((struct sl_head *) PAGE_HEAD(x)) +#define SL_GET_HEAD(x) ((struct sl_head *) (((uintptr_t) (x)) & ~(page_size-1))) /** * sl_new - create a new Slab @@ -192,10 +194,9 @@ slab * sl_new(pool *p, uint size) { slab *s = ralloc(p, &sl_class); - s->p = p; uint align = sizeof(struct sl_alignment); - if (align < sizeof(int)) - align = sizeof(int); + if (align < sizeof(void *)) + align = sizeof(void *); s->data_size = size; size = (size + align - 1) / align * align; s->obj_size = size; @@ -269,7 +270,7 @@ no_partial: s->num_empty_heads--; goto okay; } - h = alloc_page(s->p); + h = alloc_page(); #ifdef POISON memset(h, 0xba, page_size); #endif @@ -332,7 +333,7 @@ sl_free(slab *s, void *oo) #ifdef POISON memset(h, 0xde, page_size); #endif - free_page(s->p, h); + free_page(h); } else { @@ -349,11 +350,11 @@ slab_free(resource *r) struct sl_head *h, *g; WALK_LIST_DELSAFE(h, g, s->empty_heads) - free_page(s->p, h); + free_page(h); WALK_LIST_DELSAFE(h, g, s->partial_heads) - free_page(s->p, h); + free_page(h); WALK_LIST_DELSAFE(h, g, s->full_heads) - free_page(s->p, h); + free_page(h); } static void @@ -372,22 +373,33 @@ slab_dump(resource *r) debug("(%de+%dp+%df blocks per %d objs per %d bytes)\n", ec, pc, fc, s->objs_per_slab, s->obj_size); } -static size_t +static struct resmem slab_memsize(resource *r) { slab *s = (slab *) r; size_t heads = 0; struct sl_head *h; - WALK_LIST(h, s->empty_heads) + WALK_LIST(h, s->full_heads) heads++; + + size_t items = heads * s->objs_per_slab; + WALK_LIST(h, s->partial_heads) + { heads++; - WALK_LIST(h, s->full_heads) + items += h->num_full; + } + + WALK_LIST(h, s->empty_heads) heads++; -// return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size); - return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */ + size_t eff = items * s->obj_size; + + return (struct resmem) { + .effective = eff, + .overhead = ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size - eff, + }; } static resource * |