summaryrefslogtreecommitdiff
path: root/sysdep/unix
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2023-01-18 09:39:45 +0100
committerMaria Matejka <mq@ucw.cz>2023-01-18 09:39:45 +0100
commit973aa37e1e28a9c508fe09c008196f64cd3966fd (patch)
treef9732862af619bac52caf0d4e1bd6ecc60363f62 /sysdep/unix
parent64a2b7aaa303be0b407508747bfc96c1c656f1e2 (diff)
Fix memory pre-allocation
When BIRD has no free memory mapped, it allocates several pages in advance just to be sure that there is some memory available if needed. This hysteresis tactics works quite well to reduce memory ping-ping with kernel. Yet it had a subtle bug: this pre-allocation didn't take a memory coldlist into account, therefore requesting new pages from kernel even in cases when there were other pages available. This led to slow memory bloating. To demonstrate this behavior fast enough to be seen well, you may: * temporarily set the values in sysdep/unix/alloc.c as follows to exacerbate the issue: #define KEEP_PAGES_MAIN_MAX 4096 #define KEEP_PAGES_MAIN_MIN 1000 #define CLEANUP_PAGES_BULK 4096 * create a config file with several millions of static routes * periodically disable all static protocols and then reload config * log memory consumption This should give you a steady growth rate of about 16kB per cycle. If you don't set the values this high, the issue happens much more slowly, yet after 14 days of running, you are going to see an OOM kill. After this fix, pre-allocation uses the memory coldlist to get some hot pages and the same test as described here gets you a perfectly stable constant memory consumption (after some initial wobbling). Thanks to NIX-CZ for reporting and helping to investigate this issue. Thanks to Santiago for finding the cause in the code.
Diffstat (limited to 'sysdep/unix')
-rw-r--r--sysdep/unix/alloc.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c
index e7c4e6b0..0ca12ec3 100644
--- a/sysdep/unix/alloc.c
+++ b/sysdep/unix/alloc.c
@@ -58,6 +58,7 @@ struct free_pages {
};
static void global_free_pages_cleanup_event(void *);
+static void *alloc_cold_page(void);
static struct free_pages global_free_pages = {
.min = KEEP_PAGES_MAIN_MIN,
@@ -114,6 +115,14 @@ alloc_page(void)
return fp;
}
+ else
+ return alloc_cold_page();
+}
+
+static void *
+alloc_cold_page(void)
+{
+ struct free_pages *fps = &global_free_pages;
/* If there is any free page kept cold, we use that. */
if (!EMPTY_LIST(fps->empty))
@@ -170,12 +179,7 @@ global_free_pages_cleanup_event(void *data UNUSED)
/* Cleanup may get called when hot free page cache is short of pages. Replenishing. */
while (fps->cnt / 2 < fps->min)
- {
- struct free_page *fp = alloc_sys_page();
- fp->n = (node) {};
- add_tail(&fps->pages, &fp->n);
- fps->cnt++;
- }
+ free_page(alloc_cold_page());
/* Or the hot free page cache is too big. Moving some pages to the cold free page cache. */
for (int limit = CLEANUP_PAGES_BULK; limit && (fps->cnt > fps->max / 2); fps->cnt--, limit--)