summaryrefslogtreecommitdiff
path: root/sysdep/unix/alloc.c
blob: 47cd46249b246c3ec48f172fabfc509c98762b38 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/*
 *	BIRD Internet Routing Daemon -- Raw allocation
 *
 *	(c) 2020  Maria Matejka <mq@ucw.cz>
 *
 *	Can be freely distributed and used under the terms of the GNU GPL.
 */

#include "nest/bird.h"
#include "lib/resource.h"
#include "lib/lists.h"
#include "lib/event.h"
#include "lib/rcu.h"

#include <errno.h>
#include <stdlib.h>
#include <unistd.h>

#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif

long page_size = 0;

#ifdef HAVE_MMAP
#define KEEP_PAGES_MAX	256
#define KEEP_PAGES_MIN	8

STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);

static _Bool use_fake = 0;
static _Bool initialized = 0;

#if DEBUGGING
struct free_page {
  node unused[42];
  struct free_page * _Atomic next;
};
#else
struct free_page {
  struct free_page * _Atomic next;
};
#endif

static struct free_page * _Atomic page_stack = NULL;

static void page_cleanup(void *);
static event page_cleanup_event = { .hook = page_cleanup, };
#define SCHEDULE_CLEANUP  do if (initialized && !shutting_down) ev_send(&global_event_list, &page_cleanup_event); while (0)

_Atomic int pages_kept = 0;

static void *
alloc_sys_page(void)
{
  void *ptr = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);

  if (ptr == MAP_FAILED)
    bug("mmap(%lu) failed: %m", page_size);

  return ptr;
}

extern int shutting_down; /* Shutdown requested. */

#else // ! HAVE_MMAP
#define use_fake  1
#endif

void *
alloc_page(void)
{
  if (use_fake)
  {
    void *ptr = NULL;
    int err = posix_memalign(&ptr, page_size, page_size);

    if (err || !ptr)
      bug("posix_memalign(%lu) failed", (long unsigned int) page_size);

    return ptr;
  }

#ifdef HAVE_MMAP
  rcu_read_lock();
  struct free_page *fp = atomic_load_explicit(&page_stack, memory_order_acquire);
  while (fp && !atomic_compare_exchange_strong_explicit(
	&page_stack, &fp, atomic_load_explicit(&fp->next, memory_order_acquire),
	memory_order_acq_rel, memory_order_acquire))
    ;
  rcu_read_unlock();

  if (!fp)
    return alloc_sys_page();

  if (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) <= KEEP_PAGES_MIN)
    SCHEDULE_CLEANUP;

  return fp;
#endif
}

void
free_page(void *ptr)
{
  if (use_fake)
  {
    free(ptr);
    return;
  }

#ifdef HAVE_MMAP
  rcu_read_lock();
  struct free_page *fp = ptr;
  struct free_page *next = atomic_load_explicit(&page_stack, memory_order_acquire);

  do atomic_store_explicit(&fp->next, next, memory_order_release);
  while (!atomic_compare_exchange_strong_explicit(
	&page_stack, &next, fp,
	memory_order_acq_rel, memory_order_acquire));
  rcu_read_unlock();

  if (atomic_fetch_add_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX)
    SCHEDULE_CLEANUP;
#endif
}

#ifdef HAVE_MMAP
static void
page_cleanup(void *_ UNUSED)
{
  struct free_page *stack = atomic_exchange_explicit(&page_stack, NULL, memory_order_acq_rel);
  if (!stack)
    return;

  synchronize_rcu();

  do {
    struct free_page *f = stack;
    stack = atomic_load_explicit(&f->next, memory_order_acquire);

    if (munmap(f, page_size) == 0)
      continue;
    else if (errno != ENOMEM)
      bug("munmap(%p) failed: %m", f);
    else
      free_page(f);
  }
  while (stack && (atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX / 2));

  while (stack)
  {
    atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);

    struct free_page *f = stack;
    stack = atomic_load_explicit(&f->next, memory_order_acquire);
    free_page(f);
  }
}
#endif

void
resource_sys_init(void)
{
#ifdef HAVE_MMAP
  if (!(page_size = sysconf(_SC_PAGESIZE)))
    die("System page size must be non-zero");

  if (u64_popcount(page_size) == 1)
  {

    for (int i = 0; i < (KEEP_PAGES_MIN * 2); i++)
      free_page(alloc_page());

    page_cleanup(NULL);
    initialized = 1;
    return;
  }

  /* Too big or strange page, use the aligned allocator instead */
  log(L_WARN "Got strange memory page size (%lu), using the aligned allocator instead", page_size);
  use_fake = 1;
#endif

  page_size = 4096;
  initialized = 1;
}