summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/locking.h1
-rw-r--r--lib/resource.c23
-rw-r--r--sysdep/unix/alloc.c43
3 files changed, 59 insertions, 8 deletions
diff --git a/lib/locking.h b/lib/locking.h
index 0a69f50f..1a8bdcd4 100644
--- a/lib/locking.h
+++ b/lib/locking.h
@@ -19,6 +19,7 @@ struct lock_order {
struct domain_generic *attrs;
struct domain_generic *cork;
struct domain_generic *event;
+ struct domain_generic *resource;
};
extern _Thread_local struct lock_order locking_stack;
diff --git a/lib/resource.c b/lib/resource.c
index e80b315b..2d041ad5 100644
--- a/lib/resource.c
+++ b/lib/resource.c
@@ -60,7 +60,7 @@ static struct resclass pool_class = {
pool root_pool;
void *alloc_sys_page(void);
-void free_sys_page(void *);
+int free_sys_page(void *);
static int indent;
@@ -98,8 +98,10 @@ pool_free(resource *P)
if (p->pages)
{
ASSERT_DIE(!p->pages->used);
- for (uint i=0; i<p->pages->free; i++)
+
+ for (uint i = 0; i < p->pages->free; i++)
free_sys_page(p->pages->ptr[i]);
+
free_sys_page(p->pages);
}
}
@@ -476,10 +478,19 @@ free_page(pool *p, void *ptr)
ASSERT_DIE(p->pages);
p->pages->used--;
- if (p->pages->free >= POOL_PAGES_MAX)
- return free_sys_page(ptr);
- else
- p->pages->ptr[p->pages->free++] = ptr;
+ ASSERT_DIE(p->pages->free <= POOL_PAGES_MAX);
+
+ if (p->pages->free == POOL_PAGES_MAX)
+ {
+ const unsigned long keep = POOL_PAGES_MAX / 4;
+
+ for (uint i = keep; i < p->pages->free; i++)
+ free_sys_page(p->pages->ptr[i]);
+
+ p->pages->free = keep;
+ }
+
+ p->pages->ptr[p->pages->free++] = ptr;
}
diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c
index 4c9d5eb5..4ae1a9db 100644
--- a/sysdep/unix/alloc.c
+++ b/sysdep/unix/alloc.c
@@ -11,6 +11,8 @@
#include <stdlib.h>
#include <unistd.h>
+#include <stdatomic.h>
+#include <errno.h>
#ifdef HAVE_MMAP
#include <sys/mman.h>
@@ -19,6 +21,13 @@
long page_size = 0;
_Bool alloc_multipage = 0;
+static _Atomic int global_page_list_not_empty;
+static list global_page_list;
+static _Atomic int global_page_spinlock;
+
+#define GLOBAL_PAGE_SPIN_LOCK for (int v = 0; !atomic_compare_exchange_weak_explicit(&global_page_spinlock, &v, 1, memory_order_acq_rel, memory_order_acquire); v = 0)
+#define GLOBAL_PAGE_SPIN_UNLOCK do { int v = 1; ASSERT_DIE(atomic_compare_exchange_strong_explicit(&global_page_spinlock, &v, 0, memory_order_acq_rel, memory_order_acquire)); } while (0)
+
#ifdef HAVE_MMAP
static _Bool use_fake = 0;
#else
@@ -28,12 +37,14 @@ static _Bool use_fake = 1;
void resource_sys_init(void)
{
#ifdef HAVE_MMAP
+ init_list(&global_page_list);
+
if (!(page_size = sysconf(_SC_PAGESIZE)))
die("System page size must be non-zero");
if ((u64_popcount(page_size) > 1) || (page_size > 16384))
- {
#endif
+ {
/* Too big or strange page, use the aligned allocator instead */
page_size = 4096;
use_fake = 1;
@@ -46,6 +57,22 @@ alloc_sys_page(void)
#ifdef HAVE_MMAP
if (!use_fake)
{
+ if (atomic_load_explicit(&global_page_list_not_empty, memory_order_relaxed))
+ {
+ GLOBAL_PAGE_SPIN_LOCK;
+ if (!EMPTY_LIST(global_page_list))
+ {
+ node *ret = HEAD(global_page_list);
+ rem_node(ret);
+ if (EMPTY_LIST(global_page_list))
+ atomic_store_explicit(&global_page_list_not_empty, 0, memory_order_relaxed);
+ GLOBAL_PAGE_SPIN_UNLOCK;
+ memset(ret, 0, sizeof(node));
+ return (void *) ret;
+ }
+ GLOBAL_PAGE_SPIN_UNLOCK;
+ }
+
if (alloc_multipage)
{
void *big = mmap(NULL, page_size * 2, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -90,7 +117,19 @@ free_sys_page(void *ptr)
if (!use_fake)
{
if (munmap(ptr, page_size) < 0)
- bug("munmap(%p) failed: %m", ptr);
+#ifdef ENOMEM
+ if (errno == ENOMEM)
+ {
+ memset(ptr, 0, page_size);
+
+ GLOBAL_PAGE_SPIN_LOCK;
+ add_tail(&global_page_list, (node *) ptr);
+ atomic_store_explicit(&global_page_list_not_empty, 1, memory_order_relaxed);
+ GLOBAL_PAGE_SPIN_UNLOCK;
+ }
+ else
+#endif
+ bug("munmap(%p) failed: %m", ptr);
}
else
#endif