summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2022-09-09 20:57:59 +0200
committerMaria Matejka <mq@ucw.cz>2022-09-18 16:33:51 +0200
commitb80823fe828a0687d4baae3c34c737d46fbf439c (patch)
tree89833602bbe291778e3de42a939efe297e9ce084
parent3d627d09d4da3bdc1712bde67ba62c9cfbfcedc9 (diff)
Memory pages allocator is now a global simple lockless structure
-rw-r--r--lib/resource.c2
-rw-r--r--lib/resource.h1
-rw-r--r--nest/cmds.c6
-rw-r--r--sysdep/unix/alloc.c118
4 files changed, 64 insertions, 63 deletions
diff --git a/lib/resource.c b/lib/resource.c
index 898fb533..2e367132 100644
--- a/lib/resource.c
+++ b/lib/resource.c
@@ -279,8 +279,8 @@ rlookup(unsigned long a)
void
resource_init(void)
{
- resource_sys_init();
rcu_init();
+ resource_sys_init();
root_pool.r.class = &pool_class;
root_pool.name = "Root";
diff --git a/lib/resource.h b/lib/resource.h
index 5ad011ec..56a746bb 100644
--- a/lib/resource.h
+++ b/lib/resource.h
@@ -122,6 +122,7 @@ void buffer_realloc(void **buf, unsigned *size, unsigned need, unsigned item_siz
/* Allocator of whole pages; for use in slabs and other high-level allocators. */
#define PAGE_HEAD(x) ((void *) (((uintptr_t) (x)) & ~(page_size-1)))
extern long page_size;
+extern _Atomic int pages_kept;
void *alloc_page(void);
void free_page(void *);
diff --git a/nest/cmds.c b/nest/cmds.c
index 092be48a..96a3ec91 100644
--- a/nest/cmds.c
+++ b/nest/cmds.c
@@ -109,7 +109,6 @@ print_size(char *dsc, struct resmem vals)
extern pool *rt_table_pool;
extern pool *rta_pool;
-extern uint *pages_kept;
void
cmd_show_memory(void)
@@ -121,8 +120,9 @@ cmd_show_memory(void)
print_size("Protocols:", rmemsize(proto_pool));
struct resmem total = rmemsize(&root_pool);
#ifdef HAVE_MMAP
- print_size("Standby memory:", (struct resmem) { .overhead = page_size * *pages_kept });
- total.overhead += page_size * *pages_kept;
+ int pages_kept = atomic_load_explicit(&pages_kept, memory_order_relaxed);
+ print_size("Standby memory:", (struct resmem) { .overhead = page_size * pages_kept });
+ total.overhead += page_size * pages_kept;
#endif
print_size("Total:", total);
cli_msg(0, "");
diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c
index a2384ca8..47cd4624 100644
--- a/sysdep/unix/alloc.c
+++ b/sysdep/unix/alloc.c
@@ -10,6 +10,7 @@
#include "lib/resource.h"
#include "lib/lists.h"
#include "lib/event.h"
+#include "lib/rcu.h"
#include <errno.h>
#include <stdlib.h>
@@ -22,41 +23,32 @@
long page_size = 0;
#ifdef HAVE_MMAP
-#define KEEP_PAGES_MAIN_MAX 256
-#define KEEP_PAGES_MAIN_MIN 8
-#define CLEANUP_PAGES_BULK 256
+#define KEEP_PAGES_MAX 256
+#define KEEP_PAGES_MIN 8
-STATIC_ASSERT(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX);
+STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);
static _Bool use_fake = 0;
+static _Bool initialized = 0;
#if DEBUGGING
struct free_page {
node unused[42];
- node n;
+ struct free_page * _Atomic next;
};
#else
struct free_page {
- node n;
+ struct free_page * _Atomic next;
};
#endif
-struct free_pages {
- list pages;
- u16 min, max; /* Minimal and maximal number of free pages kept */
- uint cnt; /* Number of empty pages */
- event cleanup;
-};
-
-static void global_free_pages_cleanup_event(void *);
+static struct free_page * _Atomic page_stack = NULL;
-static struct free_pages global_free_pages = {
- .min = KEEP_PAGES_MAIN_MIN,
- .max = KEEP_PAGES_MAIN_MAX,
- .cleanup = { .hook = global_free_pages_cleanup_event },
-};
+static void page_cleanup(void *);
+static event page_cleanup_event = { .hook = page_cleanup, };
+#define SCHEDULE_CLEANUP do if (initialized && !shutting_down) ev_send(&global_event_list, &page_cleanup_event); while (0)
-uint *pages_kept = &global_free_pages.cnt;
+_Atomic int pages_kept = 0;
static void *
alloc_sys_page(void)
@@ -90,20 +82,21 @@ alloc_page(void)
}
#ifdef HAVE_MMAP
- struct free_pages *fps = &global_free_pages;
+ rcu_read_lock();
+ struct free_page *fp = atomic_load_explicit(&page_stack, memory_order_acquire);
+ while (fp && !atomic_compare_exchange_strong_explicit(
+ &page_stack, &fp, atomic_load_explicit(&fp->next, memory_order_acquire),
+ memory_order_acq_rel, memory_order_acquire))
+ ;
+ rcu_read_unlock();
- if (fps->cnt)
- {
- struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages));
- rem_node(&fp->n);
- if ((--fps->cnt < fps->min) && !shutting_down)
- ev_send(&global_work_list, &fps->cleanup);
+ if (!fp)
+ return alloc_sys_page();
- bzero(fp, page_size);
- return fp;
- }
+ if (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) <= KEEP_PAGES_MIN)
+ SCHEDULE_CLEANUP;
- return alloc_sys_page();
+ return fp;
#endif
}
@@ -117,45 +110,51 @@ free_page(void *ptr)
}
#ifdef HAVE_MMAP
- struct free_pages *fps = &global_free_pages;
+ rcu_read_lock();
struct free_page *fp = ptr;
+ struct free_page *next = atomic_load_explicit(&page_stack, memory_order_acquire);
- fp->n = (node) {};
- add_tail(&fps->pages, &fp->n);
+ do atomic_store_explicit(&fp->next, next, memory_order_release);
+ while (!atomic_compare_exchange_strong_explicit(
+ &page_stack, &next, fp,
+ memory_order_acq_rel, memory_order_acquire));
+ rcu_read_unlock();
- if ((++fps->cnt > fps->max) && !shutting_down)
- ev_send(&global_work_list, &fps->cleanup);
+ if (atomic_fetch_add_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX)
+ SCHEDULE_CLEANUP;
#endif
}
#ifdef HAVE_MMAP
static void
-global_free_pages_cleanup_event(void *data UNUSED)
+page_cleanup(void *_ UNUSED)
{
- if (shutting_down)
+ struct free_page *stack = atomic_exchange_explicit(&page_stack, NULL, memory_order_acq_rel);
+ if (!stack)
return;
- struct free_pages *fps = &global_free_pages;
+ synchronize_rcu();
- while (fps->cnt / 2 < fps->min)
- {
- struct free_page *fp = alloc_sys_page();
- fp->n = (node) {};
- add_tail(&fps->pages, &fp->n);
- fps->cnt++;
+ do {
+ struct free_page *f = stack;
+ stack = atomic_load_explicit(&f->next, memory_order_acquire);
+
+ if (munmap(f, page_size) == 0)
+ continue;
+ else if (errno != ENOMEM)
+ bug("munmap(%p) failed: %m", f);
+ else
+ free_page(f);
}
+ while (stack && (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX / 2));
- for (uint seen = 0; (seen < CLEANUP_PAGES_BULK) && (fps->cnt > fps->max / 2); seen++)
+ while (stack)
{
- struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages));
- rem_node(&fp->n);
+ atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
- if (munmap(fp, page_size) == 0)
- fps->cnt--;
- else if (errno == ENOMEM)
- add_head(&fps->pages, &fp->n);
- else
- bug("munmap(%p) failed: %m", fp);
+ struct free_page *f = stack;
+ stack = atomic_load_explicit(&f->next, memory_order_acquire);
+ free_page(f);
}
}
#endif
@@ -164,17 +163,17 @@ void
resource_sys_init(void)
{
#ifdef HAVE_MMAP
- ASSERT_DIE(global_free_pages.cnt == 0);
-
if (!(page_size = sysconf(_SC_PAGESIZE)))
die("System page size must be non-zero");
if (u64_popcount(page_size) == 1)
{
- struct free_pages *fps = &global_free_pages;
- init_list(&fps->pages);
- global_free_pages_cleanup_event(NULL);
+ for (int i = 0; i < (KEEP_PAGES_MIN * 2); i++)
+ free_page(alloc_page());
+
+ page_cleanup(NULL);
+ initialized = 1;
return;
}
@@ -184,4 +183,5 @@ resource_sys_init(void)
#endif
page_size = 4096;
+ initialized = 1;
}