summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2021-11-30 23:57:14 +0100
committerMaria Matejka <mq@ucw.cz>2021-12-01 13:00:54 +0100
commitbb63e99d7877023667edaf26495dd657ec2fd57b (patch)
tree3ae919a00541c27c8f661addb56c6d4ef681d361 /lib
parent385b3ea3956aefc2868cdd838fc0a90f1d8a7857 (diff)
Page allocator moved from pools to IO loops.
The resource pool system is highly hierarchical and keeping spare pages in pools leads to unnecessarily complex memory management. Loops have a flat hiearchy, at least for now, and it is therefore much easier to keep care of pages, especially in cases of excessive virtual memory fragmentation.
Diffstat (limited to 'lib')
-rw-r--r--lib/mempool.c4
-rw-r--r--lib/resource.c67
-rw-r--r--lib/resource.h4
-rw-r--r--lib/slab.c13
4 files changed, 10 insertions, 78 deletions
diff --git a/lib/mempool.c b/lib/mempool.c
index 8f300b81..ed3ae8de 100644
--- a/lib/mempool.c
+++ b/lib/mempool.c
@@ -130,7 +130,7 @@ lp_alloc(linpool *m, uint size)
{
/* Need to allocate a new chunk */
if (m->use_pages)
- c = alloc_page(m->p);
+ c = alloc_page();
else
c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size);
@@ -271,7 +271,7 @@ lp_free(resource *r)
{
c = d->next;
if (m->use_pages)
- free_page(m->p, d);
+ free_page(d);
else
xfree(d);
}
diff --git a/lib/resource.c b/lib/resource.c
index c847d41a..d98cd4ff 100644
--- a/lib/resource.c
+++ b/lib/resource.c
@@ -30,14 +30,6 @@
* is freed upon shutdown of the module.
*/
-struct pool_pages {
- uint free;
- uint used;
- void *ptr[0];
-};
-
-#define POOL_PAGES_MAX ((page_size - sizeof(struct pool_pages)) / sizeof (void *))
-
static void pool_dump(resource *);
static void pool_free(resource *);
static resource *pool_lookup(resource *, unsigned long);
@@ -54,9 +46,6 @@ static struct resclass pool_class = {
pool root_pool;
-void *alloc_sys_page(void);
-int free_sys_page(void *);
-
static int indent;
/**
@@ -103,16 +92,6 @@ pool_free(resource *P)
r = rr;
}
- if (p->pages)
- {
- ASSERT_DIE(!p->pages->used);
-
- for (uint i = 0; i < p->pages->free; i++)
- free_sys_page(p->pages->ptr[i]);
-
- free_sys_page(p->pages);
- }
-
pool_parent = parent;
}
@@ -185,9 +164,6 @@ pool_memsize_locked(pool *p)
WALK_LIST(r, p->inside)
sum += rmemsize(r);
- if (p->pages)
- sum += page_size * (p->pages->used + p->pages->free + 1);
-
return sum;
}
@@ -551,49 +527,6 @@ mb_free(void *m)
rfree(b);
}
-void *
-alloc_page(pool *p)
-{
- if (!p->pages)
- {
- p->pages = alloc_sys_page();
- p->pages->free = 0;
- p->pages->used = 1;
- }
- else
- p->pages->used++;
-
- if (p->pages->free)
- {
- void *ptr = p->pages->ptr[--p->pages->free];
- bzero(ptr, page_size);
- return ptr;
- }
- else
- return alloc_sys_page();
-}
-
-void
-free_page(pool *p, void *ptr)
-{
- ASSERT_DIE(p->pages);
- p->pages->used--;
-
- ASSERT_DIE(p->pages->free <= POOL_PAGES_MAX);
-
- if (p->pages->free == POOL_PAGES_MAX)
- {
- const unsigned long keep = POOL_PAGES_MAX / 4;
-
- for (uint i = keep; i < p->pages->free; i++)
- free_sys_page(p->pages->ptr[i]);
-
- p->pages->free = keep;
- }
-
- p->pages->ptr[p->pages->free++] = ptr;
-}
-
#define STEP_UP(x) ((x) + (x)/2 + 4)
diff --git a/lib/resource.h b/lib/resource.h
index 7adde493..9d7dae69 100644
--- a/lib/resource.h
+++ b/lib/resource.h
@@ -108,8 +108,8 @@ void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_siz
extern long page_size;
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
-void *alloc_page(pool *);
-void free_page(pool *, void *);
+void *alloc_page(void);
+void free_page(void *);
#define PAGE_HEAD(x) ((void *) (((intptr_t) (x)) & ~(page_size-1)))
#ifdef HAVE_LIBDMALLOC
diff --git a/lib/slab.c b/lib/slab.c
index 6348e29b..32e241e4 100644
--- a/lib/slab.c
+++ b/lib/slab.c
@@ -269,7 +269,7 @@ no_partial:
s->num_empty_heads--;
goto okay;
}
- h = alloc_page(s->p);
+ h = alloc_page();
#ifdef POISON
memset(h, 0xba, page_size);
#endif
@@ -332,7 +332,7 @@ sl_free(slab *s, void *oo)
#ifdef POISON
memset(h, 0xde, page_size);
#endif
- free_page(s->p, h);
+ free_page(h);
}
else
{
@@ -349,11 +349,11 @@ slab_free(resource *r)
struct sl_head *h, *g;
WALK_LIST_DELSAFE(h, g, s->empty_heads)
- free_page(s->p, h);
+ free_page(h);
WALK_LIST_DELSAFE(h, g, s->partial_heads)
- free_page(s->p, h);
+ free_page(h);
WALK_LIST_DELSAFE(h, g, s->full_heads)
- free_page(s->p, h);
+ free_page(h);
}
static void
@@ -386,8 +386,7 @@ slab_memsize(resource *r)
WALK_LIST(h, s->full_heads)
heads++;
-// return ALLOC_OVERHEAD + sizeof(struct slab) + heads * (ALLOC_OVERHEAD + page_size);
- return ALLOC_OVERHEAD + sizeof(struct slab); /* The page sizes are accounted for in the pool */
+ return ALLOC_OVERHEAD + sizeof(struct slab) + heads * page_size;
}
static resource *