summaryrefslogtreecommitdiff
path: root/sysdep/unix/alloc.c
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2020-07-22 00:09:15 +0200
committerMaria Matejka <mq@ucw.cz>2021-03-25 16:47:48 +0100
commit886dd92eeefa070d8db6aaf0245a67f7a9e9b983 (patch)
tree67911e19951d083c003e212578fae76d93deb01c /sysdep/unix/alloc.c
parent82f19ba95e421f00a8e99a866a2b8d9bbdba6cdc (diff)
Slab: head now uses bitmask for used/free nodes info instead of lists
From now, there are no auxiliary pointers stored in the free slab nodes. This led to strange debugging problems if use-after-free happened in slab-allocated structures, especially if the structure's first member is a next pointer. This also reduces the memory needed by 1 pointer per allocated object. OTOH, we now rely on pages being aligned to their size's multiple, which is quite common anyway.
Diffstat (limited to 'sysdep/unix/alloc.c')
-rw-r--r--sysdep/unix/alloc.c80
1 files changed, 80 insertions, 0 deletions
diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c
new file mode 100644
index 00000000..c525f713
--- /dev/null
+++ b/sysdep/unix/alloc.c
@@ -0,0 +1,80 @@
+/*
+ * BIRD Internet Routing Daemon -- Raw allocation
+ *
+ * (c) 2020 Maria Matejka <mq@ucw.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#include "nest/bird.h"
+#include "lib/resource.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#ifdef HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#ifdef HAVE_MMAP
+static u64 page_size = 0;
+static _Bool use_fake = 0;
+#else
+static const u64 page_size = 4096; /* Fake page size */
+#endif
+
+u64 get_page_size(void)
+{
+ if (page_size)
+ return page_size;
+
+#ifdef HAVE_MMAP
+ if (page_size = sysconf(_SC_PAGESIZE))
+ {
+ if ((u64_popcount(page_size) > 1) || (page_size > 16384))
+ {
+ /* Too big or strange page, use the aligned allocator instead */
+ page_size = 4096;
+ use_fake = 1;
+ }
+ return page_size;
+ }
+
+ bug("Page size must be non-zero");
+#endif
+}
+
+void *
+alloc_page(void)
+{
+#ifdef HAVE_MMAP
+ if (!use_fake)
+ {
+ void *ret = mmap(NULL, get_page_size(), PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ret == MAP_FAILED)
+ bug("mmap(%lu) failed: %m", page_size);
+ return ret;
+ }
+ else
+#endif
+ {
+ void *ret = aligned_alloc(page_size, page_size);
+ if (!ret)
+ bug("aligned_alloc(%lu) failed", page_size);
+ return ret;
+ }
+}
+
+void
+free_page(void *ptr)
+{
+#ifdef HAVE_MMAP
+ if (!use_fake)
+ {
+ if (munmap(ptr, get_page_size()) < 0)
+ bug("munmap(%p) failed: %m", ptr);
+ }
+ else
+#endif
+ free(ptr);
+}