summaryrefslogtreecommitdiff
path: root/sysdep/unix
diff options
context:
space:
mode:
Diffstat (limited to 'sysdep/unix')
-rw-r--r--sysdep/unix/Makefile2
-rw-r--r--sysdep/unix/alloc.c241
-rw-r--r--sysdep/unix/config.Y11
-rw-r--r--sysdep/unix/domain.c126
-rw-r--r--sysdep/unix/io-loop.c1296
-rw-r--r--sysdep/unix/io-loop.h93
-rw-r--r--sysdep/unix/io.c333
-rw-r--r--sysdep/unix/krt.Y5
-rw-r--r--sysdep/unix/krt.c486
-rw-r--r--sysdep/unix/krt.h10
-rw-r--r--sysdep/unix/log.c46
-rw-r--r--sysdep/unix/main.c34
-rw-r--r--sysdep/unix/unix.h8
13 files changed, 2049 insertions, 642 deletions
diff --git a/sysdep/unix/Makefile b/sysdep/unix/Makefile
index d0d36b5f..6f6b0d26 100644
--- a/sysdep/unix/Makefile
+++ b/sysdep/unix/Makefile
@@ -1,4 +1,4 @@
-src := alloc.c io.c krt.c log.c main.c random.c
+src := alloc.c io.c io-loop.c krt.c log.c main.c random.c domain.c
obj := $(src-o-files)
$(all-daemon)
$(cf-local)
diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c
index c8f1c83f..cafcc8dd 100644
--- a/sysdep/unix/alloc.c
+++ b/sysdep/unix/alloc.c
@@ -10,6 +10,7 @@
#include "lib/resource.h"
#include "lib/lists.h"
#include "lib/event.h"
+#include "lib/rcu.h"
#include <errno.h>
#include <stdlib.h>
@@ -29,56 +30,55 @@
long page_size = 0;
#ifdef HAVE_MMAP
-#define KEEP_PAGES_MAIN_MAX 256
-#define KEEP_PAGES_MAIN_MIN 8
-#define CLEANUP_PAGES_BULK 256
+#define KEEP_PAGES_MAX 512
+#define KEEP_PAGES_MIN 32
+#define KEEP_PAGES_MAX_LOCAL 16
+#define ALLOC_PAGES_AT_ONCE 8
-STATIC_ASSERT(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX);
+STATIC_ASSERT(KEEP_PAGES_MIN * 4 < KEEP_PAGES_MAX);
+STATIC_ASSERT(ALLOC_PAGES_AT_ONCE < KEEP_PAGES_MAX_LOCAL);
static _Bool use_fake = 0;
+static _Bool initialized = 0;
#if DEBUGGING
struct free_page {
node unused[42];
- node n;
+ struct free_page * _Atomic next;
};
#else
struct free_page {
- node n;
+ struct free_page * _Atomic next;
};
#endif
#define EP_POS_MAX ((page_size - OFFSETOF(struct empty_pages, pages)) / sizeof (void *))
struct empty_pages {
- node n;
+ struct empty_pages *next;
uint pos;
void *pages[0];
};
-struct free_pages {
- list pages; /* List of (struct free_page) keeping free pages without releasing them (hot) */
- list empty; /* List of (struct empty_pages) keeping invalidated pages mapped for us (cold) */
- u16 min, max; /* Minimal and maximal number of free pages kept */
- uint cnt; /* Number of free pages in list */
- event cleanup;
-};
+DEFINE_DOMAIN(resource);
+static DOMAIN(resource) empty_pages_domain;
+static struct empty_pages *empty_pages = NULL;
-static void global_free_pages_cleanup_event(void *);
-static void *alloc_cold_page(void);
+static struct free_page * _Atomic page_stack = NULL;
+static _Thread_local struct free_page * local_page_stack = NULL;
-static struct free_pages global_free_pages = {
- .min = KEEP_PAGES_MAIN_MIN,
- .max = KEEP_PAGES_MAIN_MAX,
- .cleanup = { .hook = global_free_pages_cleanup_event },
-};
+static void page_cleanup(void *);
+static event page_cleanup_event = { .hook = page_cleanup, };
+#define SCHEDULE_CLEANUP do if (initialized && !shutting_down) ev_send(&global_event_list, &page_cleanup_event); while (0)
-uint *pages_kept = &global_free_pages.cnt;
+_Atomic int pages_kept = 0;
+_Atomic int pages_kept_locally = 0;
+static _Thread_local int pages_kept_here = 0;
static void *
alloc_sys_page(void)
{
- void *ptr = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ void *ptr = mmap(NULL, page_size * ALLOC_PAGES_AT_ONCE, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED)
die("mmap(%ld) failed: %m", (s64) page_size);
@@ -108,45 +108,57 @@ alloc_page(void)
}
#ifdef HAVE_MMAP
- struct free_pages *fps = &global_free_pages;
-
- /* If there is any free page kept hot, we use it. */
- if (fps->cnt)
+ /* If there is any free page kept hot in this thread, we use it. */
+ struct free_page *fp = local_page_stack;
+ if (fp)
{
- struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages));
- rem_node(&fp->n);
+ local_page_stack = atomic_load_explicit(&fp->next, memory_order_acquire);
+ atomic_fetch_sub_explicit(&pages_kept_locally, 1, memory_order_relaxed);
+ pages_kept_here--;
+ return fp;
+ }
- /* If the hot-free-page cache is getting short, request the cleanup routine to replenish the cache */
- if ((--fps->cnt < fps->min) && !shutting_down)
- ev_schedule(&fps->cleanup);
+ ASSERT_DIE(pages_kept_here == 0);
+ /* If there is any free page kept hot in global storage, we use it. */
+ rcu_read_lock();
+ fp = atomic_load_explicit(&page_stack, memory_order_acquire);
+ while (fp && !atomic_compare_exchange_strong_explicit(
+ &page_stack, &fp, atomic_load_explicit(&fp->next, memory_order_acquire),
+ memory_order_acq_rel, memory_order_acquire))
+ ;
+ rcu_read_unlock();
+
+ if (fp)
+ {
+ atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
return fp;
}
- else
- return alloc_cold_page();
-}
-
-static void *
-alloc_cold_page(void)
-{
- struct free_pages *fps = &global_free_pages;
/* If there is any free page kept cold, we use that. */
- if (!EMPTY_LIST(fps->empty))
- {
- struct empty_pages *ep = HEAD(fps->empty);
+ LOCK_DOMAIN(resource, empty_pages_domain);
+ if (empty_pages) {
+ if (empty_pages->pos)
+ /* Either the keeper page contains at least one cold page pointer, return that */
+ fp = empty_pages->pages[--empty_pages->pos];
+ else
+ {
+ /* Or the keeper page has no more cold page pointer, return the keeper page */
+ fp = (struct free_page *) empty_pages;
+ empty_pages = empty_pages->next;
+ }
+ }
+ UNLOCK_DOMAIN(resource, empty_pages_domain);
- /* Either the keeper page contains at least one cold page pointer, return that */
- if (ep->pos)
- return ep->pages[--ep->pos];
+ if (fp)
+ return fp;
- /* Or the keeper page has no more cold page pointer, return the keeper page */
- rem_node(&ep->n);
- return ep;
- }
+ /* And in the worst case, allocate some new pages by mmap() */
+ void *ptr = alloc_sys_page();
+ for (int i=1; i<ALLOC_PAGES_AT_ONCE; i++)
+ free_page(ptr + page_size * i);
- /* And in the worst case, allocate a new page by mmap() */
- return alloc_sys_page();
+ return ptr;
#endif
}
@@ -161,53 +173,108 @@ free_page(void *ptr)
}
#ifdef HAVE_MMAP
- struct free_pages *fps = &global_free_pages;
+ /* We primarily try to keep the pages locally. */
struct free_page *fp = ptr;
+ if (shutting_down || (pages_kept_here < KEEP_PAGES_MAX_LOCAL))
+ {
+ atomic_store_explicit(&fp->next, local_page_stack, memory_order_relaxed);
+ local_page_stack = fp;
- /* Otherwise, we add the free page to the hot-free-page list */
- fp->n = (node) {};
- add_tail(&fps->pages, &fp->n);
+ atomic_fetch_add_explicit(&pages_kept_locally, 1, memory_order_relaxed);
+ pages_kept_here++;
+ return;
+ }
+
+ /* If there are too many local pages, we add the free page to the global hot-free-page list */
+ rcu_read_lock();
+ struct free_page *next = atomic_load_explicit(&page_stack, memory_order_acquire);
- /* And if there are too many hot free pages, we ask for page cleanup */
- if ((++fps->cnt > fps->max) && !shutting_down)
- ev_schedule(&fps->cleanup);
+ do atomic_store_explicit(&fp->next, next, memory_order_release);
+ while (!atomic_compare_exchange_strong_explicit(
+ &page_stack, &next, fp,
+ memory_order_acq_rel, memory_order_acquire));
+ rcu_read_unlock();
+
+ /* And if there are too many global hot free pages, we ask for page cleanup */
+ if (atomic_fetch_add_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX)
+ SCHEDULE_CLEANUP;
#endif
}
+/* When the routine is going to sleep for a long time, we flush the local
+ * hot page cache to not keep dirty pages for nothing. */
+void
+flush_local_pages(void)
+{
+ if (use_fake || !local_page_stack || shutting_down)
+ return;
+
+ /* We first count the pages to enable consistency checking.
+ * Also, we need to know the last page. */
+ struct free_page *last = local_page_stack, *next;
+ int check_count = 1;
+ while (next = atomic_load_explicit(&last->next, memory_order_acquire))
+ {
+ check_count++;
+ last = next;
+ }
+
+ /* The actual number of pages must be equal to the counter value. */
+ ASSERT_DIE(check_count == pages_kept_here);
+
+ /* Repeatedly trying to insert the whole page list into global page stack at once. */
+ rcu_read_lock();
+ next = atomic_load_explicit(&page_stack, memory_order_acquire);
+
+ /* First we set the outwards pointer (from our last),
+ * then we try to set the inwards pointer to our first page. */
+ do atomic_store_explicit(&last->next, next, memory_order_release);
+ while (!atomic_compare_exchange_strong_explicit(
+ &page_stack, &next, local_page_stack,
+ memory_order_acq_rel, memory_order_acquire));
+ rcu_read_unlock();
+
+ /* Finished. Now the local stack is empty. */
+ local_page_stack = NULL;
+ pages_kept_here = 0;
+
+ /* Check the state of global page cache and maybe schedule its cleanup. */
+ atomic_fetch_sub_explicit(&pages_kept_locally, check_count, memory_order_relaxed);
+ if (atomic_fetch_add_explicit(&pages_kept, check_count, memory_order_relaxed) >= KEEP_PAGES_MAX)
+ SCHEDULE_CLEANUP;
+}
+
#ifdef HAVE_MMAP
static void
-global_free_pages_cleanup_event(void *data UNUSED)
+page_cleanup(void *_ UNUSED)
{
/* Cleanup on shutdown is ignored. All pages may be kept hot, OS will take care. */
if (shutting_down)
return;
- struct free_pages *fps = &global_free_pages;
+ struct free_page *stack = atomic_exchange_explicit(&page_stack, NULL, memory_order_acq_rel);
+ if (!stack)
+ return;
- /* Cleanup may get called when hot free page cache is short of pages. Replenishing. */
- while (fps->cnt / 2 < fps->min)
- free_page(alloc_cold_page());
- /* Or the hot free page cache is too big. Moving some pages to the cold free page cache. */
- for (int limit = CLEANUP_PAGES_BULK; limit && (fps->cnt > fps->max / 2); fps->cnt--, limit--)
- {
- struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages));
- rem_node(&fp->n);
+ do {
+ synchronize_rcu();
+ struct free_page *fp = stack;
+ stack = atomic_load_explicit(&fp->next, memory_order_acquire);
+ LOCK_DOMAIN(resource, empty_pages_domain);
/* Empty pages are stored as pointers. To store them, we need a pointer block. */
- struct empty_pages *ep;
- if (EMPTY_LIST(fps->empty) || ((ep = HEAD(fps->empty))->pos == EP_POS_MAX))
+ if (!empty_pages || (empty_pages->pos == EP_POS_MAX))
{
/* There is either no pointer block or the last block is full. We use this block as a pointer block. */
- ep = (struct empty_pages *) fp;
- *ep = (struct empty_pages) {};
- add_head(&fps->empty, &ep->n);
+ empty_pages = (struct empty_pages *) fp;
+ *empty_pages = (struct empty_pages) {};
}
else
{
/* We store this block as a pointer into the first free place
* and tell the OS that the underlying memory is trash. */
- ep->pages[ep->pos++] = fp;
+ empty_pages->pages[empty_pages->pos++] = fp;
if (madvise(fp, page_size,
#ifdef CONFIG_MADV_DONTNEED_TO_FREE
MADV_DONTNEED
@@ -217,12 +284,18 @@ global_free_pages_cleanup_event(void *data UNUSED)
) < 0)
bug("madvise(%p) failed: %m", fp);
}
+ UNLOCK_DOMAIN(resource, empty_pages_domain);
}
+ while ((atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed) >= KEEP_PAGES_MAX / 2) && stack);
- /* If the hot free page cleanup hit the limit, re-schedule this routine
- * to allow for other routines to run. */
- if (fps->cnt > fps->max)
- ev_schedule(&fps->cleanup);
+ while (stack)
+ {
+ struct free_page *f = stack;
+ stack = atomic_load_explicit(&f->next, memory_order_acquire);
+ free_page(f);
+
+ atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
+ }
}
#endif
@@ -236,8 +309,6 @@ resource_sys_init(void)
#endif
#ifdef HAVE_MMAP
- ASSERT_DIE(global_free_pages.cnt == 0);
-
/* Check what page size the system supports */
if (!(page_size = sysconf(_SC_PAGESIZE)))
die("System page size must be non-zero");
@@ -247,11 +318,8 @@ resource_sys_init(void)
/* We assume that page size has only one bit and is between 1K and 256K (incl.).
* Otherwise, the assumptions in lib/slab.c (sl_head's num_full range) aren't met. */
- struct free_pages *fps = &global_free_pages;
-
- init_list(&fps->pages);
- init_list(&fps->empty);
- global_free_pages_cleanup_event(NULL);
+ empty_pages_domain = DOMAIN_NEW(resource, "Empty Pages");
+ initialized = 1;
return;
}
@@ -261,4 +329,5 @@ resource_sys_init(void)
#endif
page_size = 4096;
+ initialized = 1;
}
diff --git a/sysdep/unix/config.Y b/sysdep/unix/config.Y
index 5c4b5bef..a50ec757 100644
--- a/sysdep/unix/config.Y
+++ b/sysdep/unix/config.Y
@@ -101,6 +101,12 @@ mrtdump_base:
;
+conf: THREADS expr {
+ if ($2 < 1) cf_error("Number of threads must be at least one.");
+ new_config->thread_count = $2;
+}
+
+
conf: debug_unix ;
debug_unix:
@@ -145,6 +151,11 @@ CF_CLI_HELP(GRACEFUL, restart, [[Shut the daemon down for graceful restart]])
CF_CLI(GRACEFUL RESTART,,, [[Shut the daemon down for graceful restart]])
{ cmd_graceful_restart(); } ;
+CF_CLI(SHOW THREADS,,, [[Write out thread information]])
+{ cmd_show_threads(0); } ;
+
+CF_CLI(SHOW THREADS ALL,,, [[Write out thread and IO loop information]])
+{ cmd_show_threads(1); } ;
cfg_name:
/* empty */ { $$ = NULL; }
diff --git a/sysdep/unix/domain.c b/sysdep/unix/domain.c
new file mode 100644
index 00000000..f4ee595d
--- /dev/null
+++ b/sysdep/unix/domain.c
@@ -0,0 +1,126 @@
+/*
+ * BIRD Locking
+ *
+ * (c) 2020 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#undef LOCAL_DEBUG
+
+#undef DEBUG_LOCKING
+
+#include "lib/birdlib.h"
+#include "lib/locking.h"
+#include "lib/resource.h"
+#include "lib/timer.h"
+
+#include "conf/conf.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdatomic.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Locking subsystem
+ */
+
+_Thread_local struct lock_order locking_stack = {};
+_Thread_local struct domain_generic **last_locked = NULL;
+
+#define ASSERT_NO_LOCK ASSERT_DIE(last_locked == NULL)
+
+struct domain_generic {
+ pthread_mutex_t mutex;
+ uint order;
+ struct domain_generic **prev;
+ struct lock_order *locked_by;
+ const char *name;
+};
+
+#define DOMAIN_INIT(_name, _order) { .mutex = PTHREAD_MUTEX_INITIALIZER, .name = _name, .order = _order }
+
+static struct domain_generic the_bird_domain_gen = DOMAIN_INIT("The BIRD", OFFSETOF(struct lock_order, the_bird));
+
+DOMAIN(the_bird) the_bird_domain = { .the_bird = &the_bird_domain_gen };
+
+struct domain_generic *
+domain_new(const char *name, uint order)
+{
+ ASSERT_DIE(order < sizeof(struct lock_order));
+ struct domain_generic *dg = xmalloc(sizeof(struct domain_generic));
+ *dg = (struct domain_generic) DOMAIN_INIT(name, order);
+ return dg;
+}
+
+void
+domain_free(struct domain_generic *dg)
+{
+ pthread_mutex_destroy(&dg->mutex);
+ xfree(dg);
+}
+
+const char *
+domain_name(struct domain_generic *dg)
+{
+ return dg->name;
+}
+
+uint dg_order(struct domain_generic *dg)
+{
+ return dg->order;
+}
+
+void do_lock(struct domain_generic *dg, struct domain_generic **lsp)
+{
+ struct lock_order stack_copy;
+ memcpy(&stack_copy, &locking_stack, sizeof(stack_copy));
+ struct domain_generic **lll = last_locked;
+
+ if ((char *) lsp - (char *) &locking_stack != dg->order)
+ bug("Trying to lock on bad position: order=%u, lsp=%p, base=%p", dg->order, lsp, &locking_stack);
+
+ if (lsp <= last_locked)
+ bug("Trying to lock in a bad order: %p %p", &stack_copy, lll);
+ if (*lsp)
+ bug("Inconsistent locking stack state on lock");
+
+ btime lock_begin = current_time();
+ pthread_mutex_lock(&dg->mutex);
+ btime duration = current_time() - lock_begin;
+ if (config && (duration > config->watchdog_warning))
+ log(L_WARN "Locking of %s took %d ms", dg->name, (int) (duration TO_MS));
+
+ if (dg->prev || dg->locked_by)
+ bug("Previous unlock not finished correctly");
+ dg->prev = last_locked;
+ *lsp = dg;
+ last_locked = lsp;
+ dg->locked_by = &locking_stack;
+}
+
+void do_unlock(struct domain_generic *dg, struct domain_generic **lsp)
+{
+ if ((char *) lsp - (char *) &locking_stack != dg->order)
+ bug("Trying to unlock on bad position: order=%u, lsp=%p, base=%p", dg->order, lsp, &locking_stack);
+
+ if (dg->locked_by != &locking_stack)
+ bug("Inconsistent domain state on unlock");
+ if ((last_locked != lsp) || (*lsp != dg))
+ bug("Inconsistent locking stack state on unlock");
+ dg->locked_by = NULL;
+ last_locked = dg->prev;
+ *lsp = NULL;
+ dg->prev = NULL;
+ pthread_mutex_unlock(&dg->mutex);
+}
diff --git a/sysdep/unix/io-loop.c b/sysdep/unix/io-loop.c
new file mode 100644
index 00000000..2532fe33
--- /dev/null
+++ b/sysdep/unix/io-loop.c
@@ -0,0 +1,1296 @@
+/*
+ * BIRD -- I/O and event loop
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <pthread.h>
+#include <time.h>
+#include <sys/time.h>
+
+#include "nest/bird.h"
+
+#include "lib/buffer.h"
+#include "lib/lists.h"
+#include "lib/locking.h"
+#include "lib/resource.h"
+#include "lib/event.h"
+#include "lib/timer.h"
+#include "lib/socket.h"
+
+#include "lib/io-loop.h"
+#include "sysdep/unix/io-loop.h"
+#include "conf/conf.h"
+#include "nest/cli.h"
+
+#define THREAD_STACK_SIZE 65536 /* To be lowered in near future */
+
+static struct birdloop *birdloop_new_internal(pool *pp, uint order, const char *name, int request_pickup);
+
+/*
+ * Nanosecond time for accounting purposes
+ *
+ * A fixed point on startup is set as zero, all other values are relative to that.
+ * Caution: this overflows after like 500 years or so. If you plan to run
+ * BIRD for such a long time, please implement some means of overflow prevention.
+ */
+
+static struct timespec ns_begin;
+
+static void ns_init(void)
+{
+ if (clock_gettime(CLOCK_MONOTONIC, &ns_begin))
+ bug("clock_gettime: %m");
+}
+
+static u64 ns_now(void)
+{
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts))
+ bug("clock_gettime: %m");
+
+ return (u64) (ts.tv_sec - ns_begin.tv_sec) * 1000000000 + ts.tv_nsec - ns_begin.tv_nsec;
+}
+
+
+/*
+ * Current thread context
+ */
+
+_Thread_local struct birdloop *birdloop_current;
+static _Thread_local struct birdloop *birdloop_wakeup_masked;
+static _Thread_local uint birdloop_wakeup_masked_count;
+
+#define LOOP_NAME(loop) domain_name((loop)->time.domain)
+
+#define LOOP_TRACE(loop, fmt, args...) do { if (config && config->latency_debug) log(L_TRACE "%s (%p): " fmt, LOOP_NAME(loop), (loop), ##args); } while (0)
+#define THREAD_TRACE(...) do { if (config && config->latency_debug) log(L_TRACE "Thread: " __VA_ARGS__); } while (0)
+
+#define LOOP_WARN(loop, fmt, args...) log(L_TRACE "%s (%p): " fmt, LOOP_NAME(loop), (loop), ##args)
+
+
+event_list *
+birdloop_event_list(struct birdloop *loop)
+{
+ return &loop->event_list;
+}
+
+struct timeloop *
+birdloop_time_loop(struct birdloop *loop)
+{
+ return &loop->time;
+}
+
+_Bool
+birdloop_inside(struct birdloop *loop)
+{
+ for (struct birdloop *c = birdloop_current; c; c = c->prev_loop)
+ if (loop == c)
+ return 1;
+
+ return 0;
+}
+
+_Bool
+birdloop_in_this_thread(struct birdloop *loop)
+{
+ return pthread_equal(pthread_self(), loop->thread->thread_id);
+}
+
+void
+birdloop_flag(struct birdloop *loop, u32 flag)
+{
+ atomic_fetch_or_explicit(&loop->flags, flag, memory_order_acq_rel);
+ birdloop_ping(loop);
+}
+
+void
+birdloop_flag_set_handler(struct birdloop *loop, struct birdloop_flag_handler *fh)
+{
+ ASSERT_DIE(birdloop_inside(loop));
+ loop->flag_handler = fh;
+}
+
+static int
+birdloop_process_flags(struct birdloop *loop)
+{
+ if (!loop->flag_handler)
+ return 0;
+
+ u32 flags = atomic_exchange_explicit(&loop->flags, 0, memory_order_acq_rel);
+ if (!flags)
+ return 0;
+
+ loop->flag_handler->hook(loop->flag_handler, flags);
+ return 1;
+}
+
+/*
+ * Wakeup code for birdloop
+ */
+
+void
+pipe_new(struct pipe *p)
+{
+ int rv = pipe(p->fd);
+ if (rv < 0)
+ die("pipe: %m");
+
+ if (fcntl(p->fd[0], F_SETFL, O_NONBLOCK) < 0)
+ die("fcntl(O_NONBLOCK): %m");
+
+ if (fcntl(p->fd[1], F_SETFL, O_NONBLOCK) < 0)
+ die("fcntl(O_NONBLOCK): %m");
+}
+
+void
+pipe_drain(struct pipe *p)
+{
+ while (1) {
+ char buf[64];
+ int rv = read(p->fd[0], buf, sizeof(buf));
+ if ((rv < 0) && (errno == EAGAIN))
+ return;
+
+ if (rv == 0)
+ bug("wakeup read eof");
+ if ((rv < 0) && (errno != EINTR))
+ bug("wakeup read: %m");
+ }
+}
+
+int
+pipe_read_one(struct pipe *p)
+{
+ while (1) {
+ char v;
+ int rv = read(p->fd[0], &v, sizeof(v));
+ if (rv == 1)
+ return 1;
+ if ((rv < 0) && (errno == EAGAIN))
+ return 0;
+ if (rv > 1)
+ bug("wakeup read more bytes than expected: %d", rv);
+ if (rv == 0)
+ bug("wakeup read eof");
+ if (errno != EINTR)
+ bug("wakeup read: %m");
+ }
+}
+
+void
+pipe_kick(struct pipe *p)
+{
+ char v = 1;
+ int rv;
+
+ while (1) {
+ rv = write(p->fd[1], &v, sizeof(v));
+ if ((rv >= 0) || (errno == EAGAIN))
+ return;
+ if (errno != EINTR)
+ bug("wakeup write: %m");
+ }
+}
+
+void
+pipe_pollin(struct pipe *p, struct pfd *pfd)
+{
+ BUFFER_PUSH(pfd->pfd) = (struct pollfd) {
+ .fd = p->fd[0],
+ .events = POLLIN,
+ };
+ BUFFER_PUSH(pfd->loop) = NULL;
+}
+
+static inline void
+wakeup_init(struct bird_thread *loop)
+{
+ pipe_new(&loop->wakeup);
+}
+
+static inline void
+wakeup_drain(struct bird_thread *loop)
+{
+ pipe_drain(&loop->wakeup);
+}
+
+static inline void
+wakeup_do_kick(struct bird_thread *loop)
+{
+ pipe_kick(&loop->wakeup);
+}
+
+static inline _Bool
+birdloop_try_ping(struct birdloop *loop, u32 ltt)
+{
+ /* Somebody else is already pinging, be idempotent */
+ if (ltt & LTT_PING)
+ {
+ LOOP_TRACE(loop, "already being pinged");
+ return 0;
+ }
+
+ /* Thread moving is an implicit ping */
+ if (ltt & LTT_MOVE)
+ {
+ LOOP_TRACE(loop, "ping while moving");
+ return 1;
+ }
+
+ /* No more flags allowed */
+ ASSERT_DIE(!ltt);
+
+ /* No ping when not picked up */
+ if (!loop->thread)
+ {
+ LOOP_TRACE(loop, "not picked up yet, can't ping");
+ return 1;
+ }
+
+ /* No ping when masked */
+ if (loop == birdloop_wakeup_masked)
+ {
+ LOOP_TRACE(loop, "wakeup masked, can't ping");
+ birdloop_wakeup_masked_count++;
+ return 1;
+ }
+
+ /* Send meta event to ping */
+ if ((loop != loop->thread->meta) && (loop != &main_birdloop))
+ {
+ LOOP_TRACE(loop, "Ping by meta event to %p", loop->thread->meta);
+ ev_send_loop(loop->thread->meta, &loop->event);
+ return 1;
+ }
+
+ /* Do the real ping */
+ LOOP_TRACE(loop, "sending pipe ping");
+ wakeup_do_kick(loop->thread);
+ return 0;
+}
+
+static inline void
+birdloop_do_ping(struct birdloop *loop)
+{
+ /* Register our ping effort */
+ u32 ltt = atomic_fetch_or_explicit(&loop->thread_transition, LTT_PING, memory_order_acq_rel);
+
+ /* Try to ping in multiple ways */
+ if (birdloop_try_ping(loop, ltt))
+ atomic_fetch_and_explicit(&loop->thread_transition, ~LTT_PING, memory_order_acq_rel);
+}
+
+void
+birdloop_ping(struct birdloop *loop)
+{
+ if (!birdloop_inside(loop))
+ {
+ LOOP_TRACE(loop, "ping from outside");
+ birdloop_do_ping(loop);
+ }
+ else
+ {
+ LOOP_TRACE(loop, "ping from inside, pending=%d", loop->ping_pending);
+ if (!loop->ping_pending)
+ loop->ping_pending++;
+ }
+}
+
+
+/*
+ * Sockets
+ */
+
+static void
+sockets_init(struct birdloop *loop)
+{
+ init_list(&loop->sock_list);
+ loop->sock_num = 0;
+}
+
+void
+socket_changed(sock *s)
+{
+ struct birdloop *loop = s->loop;
+ ASSERT_DIE(birdloop_inside(loop));
+
+ loop->sock_changed++;
+ birdloop_ping(loop);
+}
+
+void
+birdloop_add_socket(struct birdloop *loop, sock *s)
+{
+ ASSERT_DIE(birdloop_inside(loop));
+ ASSERT_DIE(!s->loop);
+
+ LOOP_TRACE(loop, "adding socket %p (total=%d)", s, loop->sock_num);
+ add_tail(&loop->sock_list, &s->n);
+ loop->sock_num++;
+
+ s->loop = loop;
+ s->index = -1;
+
+ socket_changed(s);
+}
+
+extern sock *stored_sock; /* mainloop hack */
+
+void
+birdloop_remove_socket(struct birdloop *loop, sock *s)
+{
+ ASSERT_DIE(!enlisted(&s->n) == !s->loop);
+
+ if (!s->loop)
+ return;
+
+ ASSERT_DIE(birdloop_inside(loop));
+ ASSERT_DIE(s->loop == loop);
+
+ /* Decouple the socket from the loop at all. */
+ LOOP_TRACE(loop, "removing socket %p (total=%d)", s, loop->sock_num);
+
+ if (loop->sock_active == s)
+ loop->sock_active = sk_next(s);
+
+ if ((loop == &main_birdloop) && (s == stored_sock))
+ stored_sock = sk_next(s);
+
+ rem_node(&s->n);
+ loop->sock_num--;
+
+ socket_changed(s);
+
+ s->loop = NULL;
+ s->index = -1;
+}
+
+void
+sk_reloop(sock *s, struct birdloop *loop)
+{
+ ASSERT_DIE(birdloop_inside(loop));
+ ASSERT_DIE(birdloop_inside(s->loop));
+
+ if (loop == s->loop)
+ return;
+
+ birdloop_remove_socket(s->loop, s);
+ birdloop_add_socket(loop, s);
+}
+
+void
+sk_pause_rx(struct birdloop *loop, sock *s)
+{
+ ASSERT_DIE(birdloop_inside(loop));
+ s->rx_hook = NULL;
+ socket_changed(s);
+}
+
+void
+sk_resume_rx(struct birdloop *loop, sock *s, int (*hook)(sock *, uint))
+{
+ ASSERT_DIE(birdloop_inside(loop));
+ ASSERT_DIE(hook);
+ s->rx_hook = hook;
+ socket_changed(s);
+}
+
+static inline uint sk_want_events(sock *s)
+{ return (s->rx_hook ? POLLIN : 0) | (sk_tx_pending(s) ? POLLOUT : 0); }
+
+void
+sockets_prepare(struct birdloop *loop, struct pfd *pfd)
+{
+ node *n;
+ WALK_LIST(n, loop->sock_list)
+ {
+ sock *s = SKIP_BACK(sock, n, n);
+ uint w = sk_want_events(s);
+
+ if (!w)
+ {
+ s->index = -1;
+ continue;
+ }
+
+ s->index = pfd->pfd.used;
+ LOOP_TRACE(loop, "socket %p poll index is %d", s, s->index);
+
+ BUFFER_PUSH(pfd->pfd) = (struct pollfd) {
+ .fd = s->fd,
+ .events = sk_want_events(s),
+ };
+ BUFFER_PUSH(pfd->loop) = loop;
+ }
+}
+
+int sk_read(sock *s, int revents);
+int sk_write(sock *s);
+void sk_err(sock *s, int revents);
+
+static int
+sockets_fire(struct birdloop *loop)
+{
+ if (EMPTY_LIST(loop->sock_list))
+ return 0;
+
+ int repeat = 0;
+
+ times_update();
+
+ struct pollfd *pfd = loop->thread->pfd->pfd.data;
+ loop->sock_active = SKIP_BACK(sock, n, HEAD(loop->sock_list));
+
+ while (loop->sock_active)
+ {
+ sock *s = loop->sock_active;
+
+ int rev;
+ if ((s->index >= 0) && (rev = pfd[s->index].revents) && !(rev & POLLNVAL))
+ {
+ int e = 1;
+
+ if (rev & POLLOUT)
+ {
+ /* Write everything. */
+ while ((s == loop->sock_active) && (e = sk_write(s)))
+ ;
+
+ if (s != loop->sock_active)
+ continue;
+
+ if (!sk_tx_pending(s))
+ loop->thread->sock_changed++;
+ }
+
+ if (rev & POLLIN)
+ /* Read just one packet and request repeat. */
+ if ((s == loop->sock_active) && s->rx_hook)
+ if (sk_read(s, rev))
+ repeat++;
+
+ if (s != loop->sock_active)
+ continue;
+
+ if (!(rev & (POLLOUT | POLLIN)) && (rev & POLLERR))
+ sk_err(s, rev);
+
+ if (s != loop->sock_active)
+ continue;
+ }
+
+ loop->sock_active = sk_next(s);
+ }
+
+ return repeat;
+}
+
+/*
+ * Threads
+ */
+
+DEFINE_DOMAIN(resource);
+static DOMAIN(resource) birdloop_domain;
+static list birdloop_pickup;
+static list bird_thread_pickup;
+
+static _Thread_local struct bird_thread *this_thread;
+
+static void
+birdloop_set_thread(struct birdloop *loop, struct bird_thread *thr)
+{
+ struct bird_thread *old = loop->thread;
+ ASSERT_DIE(!thr != !old);
+
+ /* Signal our moving effort */
+ u32 ltt = atomic_fetch_or_explicit(&loop->thread_transition, LTT_MOVE, memory_order_acq_rel);
+ ASSERT_DIE((ltt & LTT_MOVE) == 0);
+
+ while (ltt & LTT_PING)
+ {
+ birdloop_yield();
+ ltt = atomic_load_explicit(&loop->thread_transition, memory_order_acquire);
+ ASSERT_DIE(ltt & LTT_MOVE);
+ }
+ /* Now we are free of running pings */
+
+ if (loop->thread = thr)
+ {
+ add_tail(&thr->loops, &loop->n);
+ thr->loop_count++;
+ }
+ else
+ {
+ old->loop_count--;
+
+ LOCK_DOMAIN(resource, birdloop_domain);
+ add_tail(&birdloop_pickup, &loop->n);
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+ }
+
+ /* Finished */
+ atomic_fetch_and_explicit(&loop->thread_transition, ~LTT_MOVE, memory_order_acq_rel);
+
+ /* Request to run by force */
+ ev_send_loop(loop->thread->meta, &loop->event);
+}
+
+static struct birdloop *
+birdloop_take(void)
+{
+ struct birdloop *loop = NULL;
+
+ LOCK_DOMAIN(resource, birdloop_domain);
+ if (!EMPTY_LIST(birdloop_pickup))
+ {
+ /* Take the first loop from the pickup list and unlock */
+ loop = SKIP_BACK(struct birdloop, n, HEAD(birdloop_pickup));
+ rem_node(&loop->n);
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ birdloop_set_thread(loop, this_thread);
+
+ /* This thread goes to the end of the pickup list */
+ LOCK_DOMAIN(resource, birdloop_domain);
+ rem_node(&this_thread->n);
+ add_tail(&bird_thread_pickup, &this_thread->n);
+
+ /* If there are more loops to be picked up, wakeup the next thread in order */
+ if (!EMPTY_LIST(birdloop_pickup))
+ wakeup_do_kick(SKIP_BACK(struct bird_thread, n, HEAD(bird_thread_pickup)));
+ }
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ return loop;
+}
+
+static void
+birdloop_drop(struct birdloop *loop)
+{
+ /* Remove loop from this thread's list */
+ rem_node(&loop->n);
+
+ /* Unset loop's thread */
+ if (birdloop_inside(loop))
+ birdloop_set_thread(loop, NULL);
+ else
+ {
+ birdloop_enter(loop);
+ birdloop_set_thread(loop, NULL);
+ birdloop_leave(loop);
+ }
+
+ /* Put loop into pickup list */
+ LOCK_DOMAIN(resource, birdloop_domain);
+ add_tail(&birdloop_pickup, &loop->n);
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+}
+
+static int
+poll_timeout(struct birdloop *loop)
+{
+ timer *t = timers_first(&loop->time);
+ if (!t)
+ return -1;
+
+ btime remains = tm_remains(t);
+ return remains TO_MS + ((remains TO_MS) MS < remains);
+}
+
+static void *
+bird_thread_main(void *arg)
+{
+ struct bird_thread *thr = this_thread = arg;
+
+ rcu_thread_start(&thr->rcu);
+ synchronize_rcu();
+
+ tmp_init(thr->pool);
+ init_list(&thr->loops);
+
+ thr->meta = birdloop_new_internal(thr->pool, DOMAIN_ORDER(meta), "Thread Meta", 0);
+ thr->meta->thread = thr;
+ birdloop_enter(thr->meta);
+
+ thr->sock_changed = 1;
+
+ struct pfd pfd;
+ BUFFER_INIT(pfd.pfd, thr->pool, 16);
+ BUFFER_INIT(pfd.loop, thr->pool, 16);
+ thr->pfd = &pfd;
+
+ while (1)
+ {
+ u64 thr_loop_start = ns_now();
+ int timeout;
+
+ /* Pickup new loops */
+ struct birdloop *loop = birdloop_take();
+ if (loop)
+ {
+ birdloop_enter(loop);
+ if (!EMPTY_LIST(loop->sock_list))
+ thr->sock_changed = 1;
+ birdloop_leave(loop);
+ }
+
+ /* Schedule all loops with timed out timers */
+ timers_fire(&thr->meta->time, 0);
+
+ /* Compute maximal time per loop */
+ u64 thr_before_run = ns_now();
+ if (thr->loop_count > 0)
+ thr->max_loop_time_ns = (thr->max_latency_ns / 2 - (thr_before_run - thr_loop_start)) / (u64) thr->loop_count;
+
+ /* Run all scheduled loops */
+ int more_events = ev_run_list(&thr->meta->event_list);
+ if (more_events)
+ {
+ THREAD_TRACE("More events to run");
+ timeout = 0;
+ }
+ else
+ {
+ timeout = poll_timeout(thr->meta);
+ if (timeout == -1)
+ THREAD_TRACE("No timers, no events");
+ else
+ THREAD_TRACE("Next timer in %d ms", timeout);
+ }
+
+ /* Run priority events before sleeping */
+ ev_run_list(&thr->priority_events);
+
+ /* Do we have to refresh sockets? */
+ if (thr->sock_changed)
+ {
+ thr->sock_changed = 0;
+
+ BUFFER_FLUSH(pfd.pfd);
+ BUFFER_FLUSH(pfd.loop);
+
+ pipe_pollin(&thr->wakeup, &pfd);
+
+ node *nn;
+ WALK_LIST2(loop, nn, thr->loops, n)
+ {
+ birdloop_enter(loop);
+ sockets_prepare(loop, &pfd);
+ birdloop_leave(loop);
+ }
+
+ ASSERT_DIE(pfd.loop.used == pfd.pfd.used);
+ }
+ /* Nothing to do in at least 5 seconds, flush local hot page cache */
+ else if (timeout > 5000)
+ flush_local_pages();
+
+poll_retry:;
+ int rv = poll(pfd.pfd.data, pfd.pfd.used, timeout);
+ if (rv < 0)
+ {
+ if (errno == EINTR || errno == EAGAIN)
+ goto poll_retry;
+ bug("poll in %p: %m", thr);
+ }
+
+ /* Drain wakeup fd */
+ if (pfd.pfd.data[0].revents & POLLIN)
+ {
+ ASSERT_DIE(rv > 0);
+ rv--;
+ wakeup_drain(thr);
+ }
+
+ atomic_fetch_and_explicit(&thr->meta->thread_transition, ~LTT_PING, memory_order_acq_rel);
+
+ /* Schedule loops with active sockets */
+ if (rv)
+ for (uint i = 1; i < pfd.pfd.used; i++)
+ if (pfd.pfd.data[i].revents)
+ {
+ LOOP_TRACE(pfd.loop.data[i], "socket id %d got revents=%d", i, pfd.pfd.data[i].revents);
+ ev_send_loop(thr->meta, &pfd.loop.data[i]->event);
+ }
+ }
+
+ bug("An infinite loop has ended.");
+}
+
+static void
+bird_thread_cleanup(void *_thr)
+{
+ struct bird_thread *thr = _thr;
+ ASSERT_DIE(birdloop_inside(&main_birdloop));
+
+ /* Thread attributes no longer needed */
+ pthread_attr_destroy(&thr->thread_attr);
+
+ /* Free all remaining memory */
+ rfree(thr->pool);
+}
+
+static struct bird_thread *
+bird_thread_start(btime max_latency)
+{
+ ASSERT_DIE(birdloop_inside(&main_birdloop));
+
+ pool *p = rp_new(&root_pool, "Thread");
+
+ struct bird_thread *thr = mb_allocz(p, sizeof(*thr));
+ thr->pool = p;
+ thr->cleanup_event = (event) { .hook = bird_thread_cleanup, .data = thr, };
+ thr->max_latency_ns = max_latency TO_NS;
+
+ wakeup_init(thr);
+ ev_init_list(&thr->priority_events, NULL, "Thread direct event list");
+
+ LOCK_DOMAIN(resource, birdloop_domain);
+ add_tail(&bird_thread_pickup, &thr->n);
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ int e = 0;
+
+ if (e = pthread_attr_init(&thr->thread_attr))
+ die("pthread_attr_init() failed: %M", e);
+
+ /* We don't have to worry about thread stack size so much.
+ if (e = pthread_attr_setstacksize(&thr->thread_attr, THREAD_STACK_SIZE))
+ die("pthread_attr_setstacksize(%u) failed: %M", THREAD_STACK_SIZE, e);
+ */
+
+ if (e = pthread_attr_setdetachstate(&thr->thread_attr, PTHREAD_CREATE_DETACHED))
+ die("pthread_attr_setdetachstate(PTHREAD_CREATE_DETACHED) failed: %M", e);
+
+ if (e = pthread_create(&thr->thread_id, &thr->thread_attr, bird_thread_main, thr))
+ die("pthread_create() failed: %M", e);
+
+ return thr;
+}
+
+static struct birdloop *thread_dropper;
+static event *thread_dropper_event;
+static uint thread_dropper_goal;
+
+static void
+bird_thread_shutdown(void * _ UNUSED)
+{
+ LOCK_DOMAIN(resource, birdloop_domain);
+ int dif = list_length(&bird_thread_pickup) - thread_dropper_goal;
+ struct birdloop *tdl_stop = NULL;
+
+ if (dif > 0)
+ ev_send_loop(thread_dropper, thread_dropper_event);
+ else
+ {
+ tdl_stop = thread_dropper;
+ thread_dropper = NULL;
+ }
+
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ DBG("Thread pickup size differs from dropper goal by %d%s\n", dif, tdl_stop ? ", stopping" : "");
+
+ if (tdl_stop)
+ {
+ birdloop_stop_self(tdl_stop, NULL, NULL);
+ return;
+ }
+
+ struct bird_thread *thr = this_thread;
+
+ /* Leave the thread-picker list to get no more loops */
+ LOCK_DOMAIN(resource, birdloop_domain);
+ rem_node(&thr->n);
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ /* Drop loops including the thread dropper itself */
+ while (!EMPTY_LIST(thr->loops))
+ birdloop_drop(HEAD(thr->loops));
+
+ /* Let others know about new loops */
+ if (!EMPTY_LIST(birdloop_pickup))
+ wakeup_do_kick(SKIP_BACK(struct bird_thread, n, HEAD(bird_thread_pickup)));
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ /* Leave the thread-dropper loop as we aren't going to return. */
+ birdloop_leave(thread_dropper);
+
+ /* Stop the meta loop */
+ birdloop_leave(thr->meta);
+ domain_free(thr->meta->time.domain);
+ rfree(thr->meta->pool);
+
+ /* Local pages not needed anymore */
+ flush_local_pages();
+
+ /* Unregister from RCU */
+ rcu_thread_stop(&thr->rcu);
+
+ /* Request thread cleanup from main loop */
+ ev_send_loop(&main_birdloop, &thr->cleanup_event);
+
+ /* Exit! */
+ pthread_exit(NULL);
+}
+
+
+void
+bird_thread_commit(struct config *new, struct config *old UNUSED)
+{
+ ASSERT_DIE(birdloop_inside(&main_birdloop));
+
+ if (new->shutdown)
+ return;
+
+ if (!new->thread_count)
+ new->thread_count = 1;
+
+ while (1)
+ {
+ LOCK_DOMAIN(resource, birdloop_domain);
+ int dif = list_length(&bird_thread_pickup) - (thread_dropper_goal = new->thread_count);
+ _Bool thread_dropper_running = !!thread_dropper;
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ if (dif < 0)
+ {
+ bird_thread_start(5 S);
+ continue;
+ }
+
+ if ((dif > 0) && !thread_dropper_running)
+ {
+ struct birdloop *tdl = birdloop_new(&root_pool, DOMAIN_ORDER(control), "Thread dropper");
+ event *tde = ev_new_init(tdl->pool, bird_thread_shutdown, NULL);
+
+ LOCK_DOMAIN(resource, birdloop_domain);
+ thread_dropper = tdl;
+ thread_dropper_event = tde;
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ ev_send_loop(thread_dropper, thread_dropper_event);
+ }
+
+ return;
+ }
+}
+
+
+DEFINE_DOMAIN(control);
+
+struct bird_thread_show_data {
+ cli *cli;
+ pool *pool;
+ DOMAIN(control) lock;
+ uint total;
+ uint done;
+ u8 show_loops;
+};
+
+static void
+bird_thread_show_cli_cont(struct cli *c UNUSED)
+{
+ /* Explicitly do nothing to prevent CLI from trying to parse another command. */
+}
+
+static int
+bird_thread_show_cli_cleanup(struct cli *c UNUSED)
+{
+ return 1; /* Defer the cleanup until the writeout is finished. */
+}
+
+static void
+bird_thread_show(void *data)
+{
+ struct bird_thread_show_data *tsd = data;
+
+ LOCK_DOMAIN(control, tsd->lock);
+ if (tsd->show_loops)
+ cli_printf(tsd->cli, -1026, "Thread %p", this_thread);
+
+ u64 total_time_ns = 0;
+ struct birdloop *loop;
+ WALK_LIST(loop, this_thread->loops)
+ {
+ if (tsd->show_loops)
+ cli_printf(tsd->cli, -1026, " Loop %s time: %t", domain_name(loop->time.domain), loop->total_time_spent_ns NS);
+ total_time_ns += loop->total_time_spent_ns;
+ }
+
+ tsd->done++;
+ int last = (tsd->done == tsd->total);
+
+ if (last)
+ {
+ tsd->cli->cont = NULL;
+ tsd->cli->cleanup = NULL;
+ }
+
+ if (tsd->show_loops)
+ cli_printf(tsd->cli, (last ? 1 : -1) * 1026, " Total time: %t", total_time_ns NS);
+ else
+ cli_printf(tsd->cli, (last ? 1 : -1) * 1026, "Thread %p time %t", this_thread, total_time_ns NS);
+
+ UNLOCK_DOMAIN(control, tsd->lock);
+
+ if (last)
+ {
+ the_bird_lock();
+
+ LOCK_DOMAIN(resource, birdloop_domain);
+ if (!EMPTY_LIST(birdloop_pickup))
+ if (tsd->show_loops)
+ {
+ cli_printf(tsd->cli, -1026, "Unassigned loops");
+ WALK_LIST(loop, birdloop_pickup)
+ cli_printf(tsd->cli, -1026, " Loop %s time: %t", domain_name(loop->time.domain), loop->total_time_spent_ns NS);
+ }
+ else
+ {
+ uint count = 0;
+ u64 total_time_ns = 0;
+ WALK_LIST(loop, birdloop_pickup)
+ {
+ count++;
+ total_time_ns += loop->total_time_spent_ns;
+ }
+ cli_printf(tsd->cli, -1026, "Unassigned loops: %d, total time %t", count, total_time_ns NS);
+ }
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+
+ cli_write_trigger(tsd->cli);
+ DOMAIN_FREE(control, tsd->lock);
+ rfree(tsd->pool);
+
+ the_bird_unlock();
+ }
+}
+
+
+void
+cmd_show_threads(int show_loops)
+{
+ pool *p = rp_new(&root_pool, "Show Threads");
+
+ struct bird_thread_show_data *tsd = mb_allocz(p, sizeof(struct bird_thread_show_data));
+ tsd->lock = DOMAIN_NEW(control, "Show Threads");
+ tsd->cli = this_cli;
+ tsd->pool = p;
+ tsd->show_loops = show_loops;
+
+ this_cli->cont = bird_thread_show_cli_cont;
+ this_cli->cleanup = bird_thread_show_cli_cleanup;
+
+ LOCK_DOMAIN(control, tsd->lock);
+ LOCK_DOMAIN(resource, birdloop_domain);
+
+ struct bird_thread *thr;
+ WALK_LIST(thr, bird_thread_pickup)
+ {
+ tsd->total++;
+ ev_send(&thr->priority_events, ev_new_init(p, bird_thread_show, tsd));
+ wakeup_do_kick(thr);
+ }
+
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+ UNLOCK_DOMAIN(control, tsd->lock);
+}
+
+/*
+ * Birdloop
+ */
+
+static struct bird_thread main_thread;
+struct birdloop main_birdloop = { .thread = &main_thread, };
+
+static void birdloop_enter_locked(struct birdloop *loop);
+
+void
+birdloop_init(void)
+{
+ ns_init();
+
+ birdloop_domain = DOMAIN_NEW(resource, "Loop Pickup");
+ init_list(&birdloop_pickup);
+ init_list(&bird_thread_pickup);
+
+ wakeup_init(main_birdloop.thread);
+
+ main_birdloop.time.domain = the_bird_domain.the_bird;
+ main_birdloop.time.loop = &main_birdloop;
+
+ times_update();
+ timers_init(&main_birdloop.time, &root_pool);
+
+ birdloop_enter_locked(&main_birdloop);
+}
+
+static void
+birdloop_stop_internal(struct birdloop *loop)
+{
+ LOOP_TRACE(loop, "Stopping");
+
+ /* Block incoming pings */
+ u32 ltt = atomic_load_explicit(&loop->thread_transition, memory_order_acquire);
+ while (!atomic_compare_exchange_strong_explicit(
+ &loop->thread_transition, &ltt, LTT_PING,
+ memory_order_acq_rel, memory_order_acquire))
+ ;
+
+ /* Flush remaining events */
+ ASSERT_DIE(!ev_run_list(&loop->event_list));
+
+ /* Drop timers */
+ timer *t;
+ while (t = timers_first(&loop->time))
+ tm_stop(t);
+
+ /* Drop sockets */
+ sock *s;
+ WALK_LIST_FIRST2(s, n, loop->sock_list)
+ birdloop_remove_socket(loop, s);
+
+ /* Unschedule from Meta */
+ ev_postpone(&loop->event);
+ tm_stop(&loop->timer);
+
+ /* Remove from thread loop list */
+ rem_node(&loop->n);
+ loop->thread = NULL;
+
+ /* Leave the loop context without causing any other fuss */
+ ASSERT_DIE(!ev_active(&loop->event));
+ loop->ping_pending = 0;
+ birdloop_leave(loop);
+
+ /* Request local socket reload */
+ this_thread->sock_changed++;
+
+ /* Tail-call the stopped hook */
+ loop->stopped(loop->stop_data);
+}
+
+static void
+birdloop_run(void *_loop)
+{
+ /* Run priority events before the loop is executed */
+ ev_run_list(&this_thread->priority_events);
+
+ u64 start_time = ns_now();
+ u64 end_time = start_time + this_thread->max_loop_time_ns;
+
+ struct birdloop *loop = _loop;
+ birdloop_enter(loop);
+
+ u64 locked_time = ns_now(), task_done_time;
+ if (locked_time > end_time)
+ LOOP_WARN(loop, "locked %luns after its scheduled end time", locked_time - end_time);
+
+ uint repeat, loop_runs = 0;
+ do {
+ repeat = 0;
+ LOOP_TRACE(loop, "Regular run");
+ loop_runs++;
+
+ if (loop->stopped)
+ /* Birdloop left inside the helper function */
+ return birdloop_stop_internal(loop);
+
+ /* Process sockets */
+ repeat += sockets_fire(loop);
+
+ /* Run timers */
+ timers_fire(&loop->time, 0);
+
+ /* Run flag handlers */
+ repeat += birdloop_process_flags(loop);
+
+ /* Run events */
+ repeat += ev_run_list(&loop->event_list);
+
+ /* Check end time */
+ } while (((task_done_time = ns_now()) < end_time) && repeat);
+
+ /* Request meta timer */
+ timer *t = timers_first(&loop->time);
+ if (t)
+ tm_start_in(&loop->timer, tm_remains(t), this_thread->meta);
+ else
+ tm_stop(&loop->timer);
+
+ /* Request re-run if needed */
+ if (repeat)
+ ev_send_loop(this_thread->meta, &loop->event);
+
+ /* Collect socket change requests */
+ this_thread->sock_changed += loop->sock_changed;
+ loop->sock_changed = 0;
+
+ birdloop_leave(loop);
+}
+
+static void
+birdloop_run_timer(timer *tm)
+{
+ struct birdloop *loop = tm->data;
+ LOOP_TRACE(loop, "Timer ready, requesting run");
+ ev_send_loop(loop->thread->meta, &loop->event);
+}
+
+static struct birdloop *
+birdloop_new_internal(pool *pp, uint order, const char *name, int request_pickup)
+{
+ struct domain_generic *dg = domain_new(name, order);
+
+ pool *p = rp_new(pp, name);
+ struct birdloop *loop = mb_allocz(p, sizeof(struct birdloop));
+ loop->pool = p;
+
+ loop->time.domain = dg;
+ loop->time.loop = loop;
+
+ atomic_store_explicit(&loop->thread_transition, 0, memory_order_relaxed);
+
+ birdloop_enter(loop);
+
+ ev_init_list(&loop->event_list, loop, name);
+ timers_init(&loop->time, p);
+ sockets_init(loop);
+
+ loop->event = (event) { .hook = birdloop_run, .data = loop, };
+ loop->timer = (timer) { .hook = birdloop_run_timer, .data = loop, };
+
+ if (request_pickup)
+ {
+ LOCK_DOMAIN(resource, birdloop_domain);
+ add_tail(&birdloop_pickup, &loop->n);
+ wakeup_do_kick(SKIP_BACK(struct bird_thread, n, HEAD(bird_thread_pickup)));
+ UNLOCK_DOMAIN(resource, birdloop_domain);
+ }
+ else
+ loop->n.next = loop->n.prev = &loop->n;
+
+ birdloop_leave(loop);
+
+ return loop;
+}
+
+struct birdloop *
+birdloop_new(pool *pp, uint order, const char *name)
+{
+ return birdloop_new_internal(pp, order, name, 1);
+}
+
+static void
+birdloop_do_stop(struct birdloop *loop, void (*stopped)(void *data), void *data)
+{
+ LOOP_TRACE(loop, "Stop requested");
+
+ loop->stopped = stopped;
+ loop->stop_data = data;
+
+ birdloop_do_ping(loop);
+}
+
+void
+birdloop_stop(struct birdloop *loop, void (*stopped)(void *data), void *data)
+{
+ DG_LOCK(loop->time.domain);
+ birdloop_do_stop(loop, stopped, data);
+ DG_UNLOCK(loop->time.domain);
+}
+
+void
+birdloop_stop_self(struct birdloop *loop, void (*stopped)(void *data), void *data)
+{
+ ASSERT_DIE(loop == birdloop_current);
+ ASSERT_DIE(DG_IS_LOCKED(loop->time.domain));
+
+ birdloop_do_stop(loop, stopped, data);
+}
+
+void
+birdloop_free(struct birdloop *loop)
+{
+ ASSERT_DIE(loop->thread == NULL);
+
+ domain_free(loop->time.domain);
+ rfree(loop->pool);
+}
+
+static void
+birdloop_enter_locked(struct birdloop *loop)
+{
+ ASSERT_DIE(DG_IS_LOCKED(loop->time.domain));
+ ASSERT_DIE(!birdloop_inside(loop));
+
+ /* Store the old context */
+ loop->prev_loop = birdloop_current;
+
+ /* Put the new context */
+ birdloop_current = loop;
+}
+
+void
+birdloop_enter(struct birdloop *loop)
+{
+ DG_LOCK(loop->time.domain);
+ return birdloop_enter_locked(loop);
+}
+
+static void
+birdloop_leave_locked(struct birdloop *loop)
+{
+ /* Check the current context */
+ ASSERT_DIE(birdloop_current == loop);
+
+ /* Send pending pings */
+ if (loop->ping_pending)
+ {
+ LOOP_TRACE(loop, "sending pings on leave");
+ loop->ping_pending = 0;
+ birdloop_do_ping(loop);
+ }
+
+ /* Restore the old context */
+ birdloop_current = loop->prev_loop;
+}
+
+void
+birdloop_leave(struct birdloop *loop)
+{
+ birdloop_leave_locked(loop);
+ DG_UNLOCK(loop->time.domain);
+}
+
+void
+birdloop_mask_wakeups(struct birdloop *loop)
+{
+ ASSERT_DIE(birdloop_wakeup_masked == NULL);
+ birdloop_wakeup_masked = loop;
+}
+
+void
+birdloop_unmask_wakeups(struct birdloop *loop)
+{
+ ASSERT_DIE(birdloop_wakeup_masked == loop);
+ birdloop_wakeup_masked = NULL;
+ if (birdloop_wakeup_masked_count)
+ wakeup_do_kick(loop->thread);
+
+ birdloop_wakeup_masked_count = 0;
+}
+
+void
+birdloop_yield(void)
+{
+ usleep(100);
+}
diff --git a/sysdep/unix/io-loop.h b/sysdep/unix/io-loop.h
new file mode 100644
index 00000000..c531d43e
--- /dev/null
+++ b/sysdep/unix/io-loop.h
@@ -0,0 +1,93 @@
+/*
+ * BIRD -- I/O and event loop
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_SYSDEP_UNIX_IO_LOOP_H_
+#define _BIRD_SYSDEP_UNIX_IO_LOOP_H_
+
+#include "lib/rcu.h"
+
+#include <pthread.h>
+
+struct pipe
+{
+ int fd[2];
+};
+
+struct pfd {
+ BUFFER(struct pollfd) pfd;
+ BUFFER(struct birdloop *) loop;
+};
+
+void sockets_prepare(struct birdloop *, struct pfd *);
+void socket_changed(struct birdsock *);
+
+void pipe_new(struct pipe *);
+void pipe_pollin(struct pipe *, struct pfd *);
+void pipe_drain(struct pipe *);
+void pipe_kick(struct pipe *);
+
+struct birdloop
+{
+ node n;
+
+ event event;
+ timer timer;
+
+ pool *pool;
+
+ struct timeloop time;
+ event_list event_list;
+ list sock_list;
+ struct birdsock *sock_active;
+ int sock_num;
+ uint sock_changed;
+
+ uint ping_pending;
+
+ _Atomic u32 thread_transition;
+#define LTT_PING 1
+#define LTT_MOVE 2
+ _Atomic u32 flags;
+ struct birdloop_flag_handler *flag_handler;
+
+ void (*stopped)(void *data);
+ void *stop_data;
+
+ struct birdloop *prev_loop;
+
+ struct bird_thread *thread;
+
+ u64 total_time_spent_ns;
+};
+
+struct bird_thread
+{
+ node n;
+
+ struct pipe wakeup;
+ event_list priority_events;
+
+ struct birdloop *meta;
+
+ pthread_t thread_id;
+ pthread_attr_t thread_attr;
+
+ struct rcu_thread rcu;
+
+ list loops;
+ pool *pool;
+ struct pfd *pfd;
+
+ event cleanup_event;
+
+ int sock_changed;
+ uint loop_count;
+
+ u64 max_latency_ns;
+ u64 max_loop_time_ns;
+};
+
+#endif
diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c
index e131ca41..88d187a4 100644
--- a/sysdep/unix/io.c
+++ b/sysdep/unix/io.c
@@ -36,12 +36,14 @@
#include "lib/resource.h"
#include "lib/socket.h"
#include "lib/event.h"
+#include "lib/locking.h"
#include "lib/timer.h"
#include "lib/string.h"
#include "nest/iface.h"
#include "conf/conf.h"
#include "sysdep/unix/unix.h"
+#include "sysdep/unix/io-loop.h"
#include CONFIG_INCLUDE_SYSIO_H
/* Maximum number of calls of tx handler for one socket in one
@@ -74,7 +76,7 @@ rf_free(resource *r)
}
static void
-rf_dump(resource *r)
+rf_dump(resource *r, unsigned indent UNUSED)
{
struct rfile *a = (struct rfile *) r;
@@ -122,12 +124,16 @@ rf_fileno(struct rfile *f)
btime boot_time;
+
void
-times_init(struct timeloop *loop)
+times_update(void)
{
struct timespec ts;
int rv;
+ btime old_time = current_time();
+ btime old_real_time = current_real_time();
+
rv = clock_gettime(CLOCK_MONOTONIC, &ts);
if (rv < 0)
die("Monotonic clock is missing");
@@ -135,42 +141,33 @@ times_init(struct timeloop *loop)
if ((ts.tv_sec < 0) || (((u64) ts.tv_sec) > ((u64) 1 << 40)))
log(L_WARN "Monotonic clock is crazy");
- loop->last_time = ts.tv_sec S + ts.tv_nsec NS;
- loop->real_time = 0;
-}
-
-void
-times_update(struct timeloop *loop)
-{
- struct timespec ts;
- int rv;
-
- rv = clock_gettime(CLOCK_MONOTONIC, &ts);
- if (rv < 0)
- die("clock_gettime: %m");
-
btime new_time = ts.tv_sec S + ts.tv_nsec NS;
- if (new_time < loop->last_time)
+ if (new_time < old_time)
log(L_ERR "Monotonic clock is broken");
- loop->last_time = new_time;
- loop->real_time = 0;
-}
-
-void
-times_update_real_time(struct timeloop *loop)
-{
- struct timespec ts;
- int rv;
-
rv = clock_gettime(CLOCK_REALTIME, &ts);
if (rv < 0)
die("clock_gettime: %m");
- loop->real_time = ts.tv_sec S + ts.tv_nsec NS;
-}
+ btime new_real_time = ts.tv_sec S + ts.tv_nsec NS;
+
+ if (!atomic_compare_exchange_strong_explicit(
+ &last_time,
+ &old_time,
+ new_time,
+ memory_order_acq_rel,
+ memory_order_relaxed))
+ DBG("Time update collision: last_time");
+ if (!atomic_compare_exchange_strong_explicit(
+ &real_time,
+ &old_real_time,
+ new_real_time,
+ memory_order_acq_rel,
+ memory_order_relaxed))
+ DBG("Time update collision: real_time");
+}
/**
* DOC: Sockets
@@ -725,11 +722,7 @@ sk_log_error(sock *s, const char *p)
* Actual struct birdsock code
*/
-static list sock_list;
-static struct birdsock *current_sock;
-static struct birdsock *stored_sock;
-
-static inline sock *
+sock *
sk_next(sock *s)
{
if (!s->n.next->next)
@@ -777,8 +770,7 @@ sk_ssh_free(sock *s)
if (ssh->channel)
{
- if (ssh_channel_is_open(ssh->channel))
- ssh_channel_close(ssh->channel);
+ ssh_channel_close(ssh->channel);
ssh_channel_free(ssh->channel);
ssh->channel = NULL;
}
@@ -792,6 +784,7 @@ sk_ssh_free(sock *s)
}
#endif
+
static void
sk_free(resource *r)
{
@@ -804,20 +797,10 @@ sk_free(resource *r)
sk_ssh_free(s);
#endif
- if (s->fd < 0)
- return;
-
- /* FIXME: we should call sk_stop() for SKF_THREAD sockets */
- if (!(s->flags & SKF_THREAD))
- {
- if (s == current_sock)
- current_sock = sk_next(s);
- if (s == stored_sock)
- stored_sock = sk_next(s);
- rem_node(&s->n);
- }
+ if (s->loop)
+ birdloop_remove_socket(s->loop, s);
- if (s->type != SK_SSH && s->type != SK_SSH_ACTIVE)
+ if (s->fd >= 0 && s->type != SK_SSH && s->type != SK_SSH_ACTIVE)
close(s->fd);
s->fd = -1;
@@ -868,7 +851,7 @@ sk_reallocate(sock *s)
}
static void
-sk_dump(resource *r)
+sk_dump(resource *r, unsigned indent UNUSED)
{
sock *s = (sock *) r;
static char *sk_type_names[] = { "TCP<", "TCP>", "TCP", "UDP", NULL, "IP", NULL, "MAGIC", "UNIX<", "UNIX", "SSH>", "SSH", "DEL!" };
@@ -1030,12 +1013,6 @@ sk_setup(sock *s)
}
static void
-sk_insert(sock *s)
-{
- add_tail(&sock_list, &s->n);
-}
-
-static void
sk_tcp_connected(sock *s)
{
sockaddr sa;
@@ -1108,7 +1085,8 @@ sk_passive_connected(sock *s, int type)
return 1;
}
- sk_insert(t);
+ birdloop_add_socket(s->loop, t);
+
sk_alloc_bufs(t);
s->rx_hook(t, 0);
return 1;
@@ -1153,34 +1131,45 @@ sk_ssh_connect(sock *s)
{
int server_identity_is_ok = 1;
+#ifdef HAVE_SSH_OLD_SERVER_VALIDATION_API
+#define ssh_session_is_known_server ssh_is_server_known
+#define SSH_KNOWN_HOSTS_OK SSH_SERVER_KNOWN_OK
+#define SSH_KNOWN_HOSTS_UNKNOWN SSH_SERVER_NOT_KNOWN
+#define SSH_KNOWN_HOSTS_CHANGED SSH_SERVER_KNOWN_CHANGED
+#define SSH_KNOWN_HOSTS_NOT_FOUND SSH_SERVER_FILE_NOT_FOUND
+#define SSH_KNOWN_HOSTS_ERROR SSH_SERVER_ERROR
+#define SSH_KNOWN_HOSTS_OTHER SSH_SERVER_FOUND_OTHER
+#endif
+
/* Check server identity */
- switch (ssh_is_server_known(s->ssh->session))
+ switch (ssh_session_is_known_server(s->ssh->session))
{
#define LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s,msg,args...) log(L_WARN "SSH Identity %s@%s:%u: " msg, (s)->ssh->username, (s)->host, (s)->dport, ## args);
- case SSH_SERVER_KNOWN_OK:
+ case SSH_KNOWN_HOSTS_OK:
/* The server is known and has not changed. */
break;
- case SSH_SERVER_NOT_KNOWN:
+ case SSH_KNOWN_HOSTS_UNKNOWN:
LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s, "The server is unknown, its public key was not found in the known host file %s", s->ssh->server_hostkey_path);
+ server_identity_is_ok = 0;
break;
- case SSH_SERVER_KNOWN_CHANGED:
+ case SSH_KNOWN_HOSTS_CHANGED:
LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s, "The server key has changed. Either you are under attack or the administrator changed the key.");
server_identity_is_ok = 0;
break;
- case SSH_SERVER_FILE_NOT_FOUND:
+ case SSH_KNOWN_HOSTS_NOT_FOUND:
LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s, "The known host file %s does not exist", s->ssh->server_hostkey_path);
server_identity_is_ok = 0;
break;
- case SSH_SERVER_ERROR:
+ case SSH_KNOWN_HOSTS_ERROR:
LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s, "Some error happened");
server_identity_is_ok = 0;
break;
- case SSH_SERVER_FOUND_OTHER:
+ case SSH_KNOWN_HOSTS_OTHER:
LOG_WARN_ABOUT_SSH_SERVER_VALIDATION(s, "The server gave use a key of a type while we had an other type recorded. " \
"It is a possible attack.");
server_identity_is_ok = 0;
@@ -1311,6 +1300,7 @@ sk_open_ssh(sock *s)
/**
* sk_open - open a socket
+ * @loop: loop
* @s: socket
*
* This function takes a socket resource created by sk_new() and
@@ -1320,7 +1310,7 @@ sk_open_ssh(sock *s)
* Result: 0 for success, -1 for an error.
*/
int
-sk_open(sock *s)
+sk_open(sock *s, struct birdloop *loop)
{
int af = AF_UNSPEC;
int fd = -1;
@@ -1473,9 +1463,7 @@ sk_open(sock *s)
sk_alloc_bufs(s);
}
- if (!(s->flags & SKF_THREAD))
- sk_insert(s);
-
+ birdloop_add_socket(loop, s);
return 0;
err:
@@ -1485,7 +1473,7 @@ err:
}
int
-sk_open_unix(sock *s, char *name)
+sk_open_unix(sock *s, struct birdloop *loop, char *name)
{
struct sockaddr_un sa;
int fd;
@@ -1512,7 +1500,7 @@ sk_open_unix(sock *s, char *name)
return -1;
s->fd = fd;
- sk_insert(s);
+ birdloop_add_socket(loop, s);
return 0;
}
@@ -1638,6 +1626,13 @@ sk_recvmsg(sock *s)
static inline void reset_tx_buffer(sock *s) { s->ttx = s->tpos = s->tbuf; }
+_Bool
+sk_tx_pending(sock *s)
+{
+ return s->ttx != s->tpos;
+}
+
+
static int
sk_maybe_write(sock *s)
{
@@ -1648,7 +1643,7 @@ sk_maybe_write(sock *s)
case SK_TCP:
case SK_MAGIC:
case SK_UNIX:
- while (s->ttx != s->tpos)
+ while (sk_tx_pending(s))
{
e = write(s->fd, s->ttx, s->tpos - s->ttx);
@@ -1670,7 +1665,7 @@ sk_maybe_write(sock *s)
#ifdef HAVE_LIBSSH
case SK_SSH:
- while (s->ttx != s->tpos)
+ while (sk_tx_pending(s))
{
e = ssh_channel_write(s->ssh->channel, s->ttx, s->tpos - s->ttx);
@@ -1753,7 +1748,12 @@ sk_send(sock *s, unsigned len)
{
s->ttx = s->tbuf;
s->tpos = s->tbuf + len;
- return sk_maybe_write(s);
+
+ int e = sk_maybe_write(s);
+ if (e == 0) /* Trigger thread poll reload to poll this socket's write. */
+ socket_changed(s);
+
+ return e;
}
/**
@@ -1800,7 +1800,7 @@ call_rx_hook(sock *s, int size)
if (s->rx_hook(s, size))
{
/* We need to be careful since the socket could have been deleted by the hook */
- if (current_sock == s)
+ if (s->loop->sock_active == s)
s->rpos = s->rbuf;
}
}
@@ -1964,7 +1964,7 @@ sk_write_noflush(sock *s)
#endif
default:
- if (s->ttx != s->tpos && sk_maybe_write(s) > 0)
+ if (sk_tx_pending(s) && sk_maybe_write(s) > 0)
{
if (s->tx_hook)
s->tx_hook(s);
@@ -2010,11 +2010,11 @@ sk_dump_all(void)
sock *s;
debug("Open sockets:\n");
- WALK_LIST(n, sock_list)
+ WALK_LIST(n, main_birdloop.sock_list)
{
s = SKIP_BACK(sock, n, n);
debug("%p ", s);
- sk_dump(&s->r);
+ sk_dump(&s->r, 3);
}
debug("\n");
}
@@ -2037,30 +2037,17 @@ struct event_log_entry
static struct event_log_entry event_log[EVENT_LOG_LENGTH];
static struct event_log_entry *event_open;
static int event_log_pos, event_log_num, watchdog_active;
-static btime last_time;
+static btime last_io_time;
static btime loop_time;
static void
io_update_time(void)
{
- struct timespec ts;
- int rv;
-
- /*
- * This is third time-tracking procedure (after update_times() above and
- * times_update() in BFD), dedicated to internal event log and latency
- * tracking. Hopefully, we consolidate these sometimes.
- */
-
- rv = clock_gettime(CLOCK_MONOTONIC, &ts);
- if (rv < 0)
- die("clock_gettime: %m");
-
- last_time = ts.tv_sec S + ts.tv_nsec NS;
+ last_io_time = current_time();
if (event_open)
{
- event_open->duration = last_time - event_open->timestamp;
+ event_open->duration = last_io_time - event_open->timestamp;
if (event_open->duration > config->latency_limit)
log(L_WARN "Event 0x%p 0x%p took %u.%03u ms",
@@ -2089,7 +2076,7 @@ io_log_event(void *hook, void *data)
en->hook = hook;
en->data = data;
- en->timestamp = last_time;
+ en->timestamp = last_io_time;
en->duration = 0;
event_log_num++;
@@ -2117,14 +2104,14 @@ io_log_dump(void)
struct event_log_entry *en = event_log + (event_log_pos + i) % EVENT_LOG_LENGTH;
if (en->hook)
log(L_DEBUG " Event 0x%p 0x%p at %8d for %d ms", en->hook, en->data,
- (int) ((last_time - en->timestamp) TO_MS), (int) (en->duration TO_MS));
+ (int) ((last_io_time - en->timestamp) TO_MS), (int) (en->duration TO_MS));
}
}
void
watchdog_sigalrm(int sig UNUSED)
{
- /* Update last_time and duration, but skip latency check */
+ /* Update last_io_time and duration, but skip latency check */
config->latency_limit = 0xffffffff;
io_update_time();
@@ -2139,7 +2126,7 @@ watchdog_start1(void)
{
io_update_time();
- loop_time = last_time;
+ loop_time = last_io_time;
}
static inline void
@@ -2147,7 +2134,7 @@ watchdog_start(void)
{
io_update_time();
- loop_time = last_time;
+ loop_time = last_io_time;
event_log_num = 0;
if (config->watchdog_timeout)
@@ -2168,7 +2155,7 @@ watchdog_stop(void)
watchdog_active = 0;
}
- btime duration = last_time - loop_time;
+ btime duration = last_io_time - loop_time;
if (duration > config->watchdog_warning)
log(L_WARN "I/O loop cycle took %u.%03u ms for %d events",
(uint) (duration TO_MS), (uint) (duration % 1000), event_log_num);
@@ -2182,9 +2169,10 @@ watchdog_stop(void)
void
io_init(void)
{
- init_list(&sock_list);
- init_list(&global_event_list);
- init_list(&global_work_list);
+ init_list(&main_birdloop.sock_list);
+ ev_init_list(&global_event_list, &main_birdloop, "Global event list");
+ ev_init_list(&global_work_list, &main_birdloop, "Global work list");
+ ev_init_list(&main_birdloop.event_list, &main_birdloop, "Global fast event list");
krt_io_init();
// XXX init_times();
// XXX update_times();
@@ -2198,64 +2186,42 @@ static int short_loops = 0;
#define SHORT_LOOP_MAX 10
#define WORK_EVENTS_MAX 10
+sock *stored_sock;
+
void
io_loop(void)
{
int poll_tout, timeout;
- int nfds, events, pout;
+ int events, pout;
timer *t;
- sock *s;
- node *n;
- int fdmax = 256;
- struct pollfd *pfd = xmalloc(fdmax * sizeof(struct pollfd));
+ struct pfd pfd;
+ BUFFER_INIT(pfd.pfd, &root_pool, 16);
+ BUFFER_INIT(pfd.loop, &root_pool, 16);
watchdog_start1();
for(;;)
{
- times_update(&main_timeloop);
+ times_update();
events = ev_run_list(&global_event_list);
events = ev_run_list_limited(&global_work_list, WORK_EVENTS_MAX) || events;
- timers_fire(&main_timeloop);
+ events = ev_run_list(&main_birdloop.event_list) || events;
+ timers_fire(&main_birdloop.time, 1);
io_close_event();
// FIXME
poll_tout = (events ? 0 : 3000); /* Time in milliseconds */
- if (t = timers_first(&main_timeloop))
+ if (t = timers_first(&main_birdloop.time))
{
- times_update(&main_timeloop);
+ times_update();
timeout = (tm_remains(t) TO_MS) + 1;
poll_tout = MIN(poll_tout, timeout);
}
- nfds = 0;
- WALK_LIST(n, sock_list)
- {
- pfd[nfds] = (struct pollfd) { .fd = -1 }; /* everything other set to 0 by this */
- s = SKIP_BACK(sock, n, n);
- if (s->rx_hook)
- {
- pfd[nfds].fd = s->fd;
- pfd[nfds].events |= POLLIN;
- }
- if (s->tx_hook && s->ttx != s->tpos)
- {
- pfd[nfds].fd = s->fd;
- pfd[nfds].events |= POLLOUT;
- }
- if (pfd[nfds].fd != -1)
- {
- s->index = nfds;
- nfds++;
- }
- else
- s->index = -1;
+ BUFFER_FLUSH(pfd.pfd);
+ BUFFER_FLUSH(pfd.loop);
- if (nfds >= fdmax)
- {
- fdmax *= 2;
- pfd = xrealloc(pfd, fdmax * sizeof(struct pollfd));
- }
- }
+ pipe_pollin(&main_birdloop.thread->wakeup, &pfd);
+ sockets_prepare(&main_birdloop, &pfd);
/*
* Yes, this is racy. But even if the signal comes before this test
@@ -2286,103 +2252,108 @@ io_loop(void)
/* And finally enter poll() to find active sockets */
watchdog_stop();
- pout = poll(pfd, nfds, poll_tout);
+ birdloop_leave(&main_birdloop);
+ pout = poll(pfd.pfd.data, pfd.pfd.used, poll_tout);
+ birdloop_enter(&main_birdloop);
watchdog_start();
if (pout < 0)
{
if (errno == EINTR || errno == EAGAIN)
continue;
- die("poll: %m");
+ bug("poll: %m");
}
if (pout)
{
- times_update(&main_timeloop);
+ if (pfd.pfd.data[0].revents & POLLIN)
+ {
+ /* IO loop reload requested */
+ pipe_drain(&main_birdloop.thread->wakeup);
+ atomic_fetch_and_explicit(&main_birdloop.thread_transition, ~LTT_PING, memory_order_acq_rel);
+ continue;
+ }
+
+ times_update();
/* guaranteed to be non-empty */
- current_sock = SKIP_BACK(sock, n, HEAD(sock_list));
+ main_birdloop.sock_active = SKIP_BACK(sock, n, HEAD(main_birdloop.sock_list));
- while (current_sock)
+ while (main_birdloop.sock_active)
+ {
+ sock *s = main_birdloop.sock_active;
+ if (s->index != -1)
{
- sock *s = current_sock;
- if (s->index == -1)
- {
- current_sock = sk_next(s);
- goto next;
- }
-
int e;
int steps;
steps = MAX_STEPS;
- if (s->fast_rx && (pfd[s->index].revents & POLLIN) && s->rx_hook)
+ if (s->fast_rx && (pfd.pfd.data[s->index].revents & POLLIN) && s->rx_hook)
do
{
steps--;
io_log_event(s->rx_hook, s->data);
- e = sk_read(s, pfd[s->index].revents);
- if (s != current_sock)
- goto next;
+ e = sk_read(s, pfd.pfd.data[s->index].revents);
}
- while (e && s->rx_hook && steps);
+ while (e && (main_birdloop.sock_active == s) && s->rx_hook && steps);
+
+ if (s != main_birdloop.sock_active)
+ continue;
steps = MAX_STEPS;
- if (pfd[s->index].revents & POLLOUT)
+ if (pfd.pfd.data[s->index].revents & POLLOUT)
do
{
steps--;
io_log_event(s->tx_hook, s->data);
e = sk_write(s);
- if (s != current_sock)
- goto next;
}
- while (e && steps);
+ while (e && (main_birdloop.sock_active == s) && steps);
- current_sock = sk_next(s);
- next: ;
+ if (s != main_birdloop.sock_active)
+ continue;
}
+ main_birdloop.sock_active = sk_next(s);
+ }
+
short_loops++;
if (events && (short_loops < SHORT_LOOP_MAX))
continue;
short_loops = 0;
int count = 0;
- current_sock = stored_sock;
- if (current_sock == NULL)
- current_sock = SKIP_BACK(sock, n, HEAD(sock_list));
+ main_birdloop.sock_active = stored_sock;
+ if (main_birdloop.sock_active == NULL)
+ main_birdloop.sock_active = SKIP_BACK(sock, n, HEAD(main_birdloop.sock_list));
- while (current_sock && count < MAX_RX_STEPS)
+ while (main_birdloop.sock_active && count < MAX_RX_STEPS)
{
- sock *s = current_sock;
+ sock *s = main_birdloop.sock_active;
if (s->index == -1)
- {
- current_sock = sk_next(s);
- goto next2;
- }
+ goto next2;
- if (!s->fast_rx && (pfd[s->index].revents & POLLIN) && s->rx_hook)
+ if (!s->fast_rx && (pfd.pfd.data[s->index].revents & POLLIN) && s->rx_hook)
{
count++;
io_log_event(s->rx_hook, s->data);
- sk_read(s, pfd[s->index].revents);
- if (s != current_sock)
- goto next2;
+ sk_read(s, pfd.pfd.data[s->index].revents);
+ if (s != main_birdloop.sock_active)
+ continue;
}
- if (pfd[s->index].revents & (POLLHUP | POLLERR))
+ if (pfd.pfd.data[s->index].revents & (POLLHUP | POLLERR))
{
- sk_err(s, pfd[s->index].revents);
- if (s != current_sock)
- goto next2;
+ sk_err(s, pfd.pfd.data[s->index].revents);
+ if (s != main_birdloop.sock_active)
+ continue;
}
- current_sock = sk_next(s);
next2: ;
+ main_birdloop.sock_active = sk_next(s);
}
- stored_sock = current_sock;
+ stored_sock = main_birdloop.sock_active;
}
}
}
diff --git a/sysdep/unix/krt.Y b/sysdep/unix/krt.Y
index 95b54d65..4ce9a328 100644
--- a/sysdep/unix/krt.Y
+++ b/sysdep/unix/krt.Y
@@ -29,7 +29,7 @@ kif_set_preferred(ip_addr ip)
CF_DECLS
-CF_KEYWORDS(KERNEL, PERSIST, SCAN, TIME, LEARN, DEVICE, ROUTES, GRACEFUL, RESTART, KRT_SOURCE, KRT_METRIC, MERGE, PATHS)
+CF_KEYWORDS(KERNEL, PERSIST, SCAN, TIME, LEARN, DEVICE, ROUTES, GRACEFUL, RESTART, MERGE, PATHS)
CF_KEYWORDS(INTERFACE, PREFERRED)
%type <i> kern_mp_limit
@@ -122,9 +122,6 @@ kif_iface:
kif_iface_start iface_patt_list_nopx kif_iface_opt_list;
-dynamic_attr: KRT_SOURCE { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_KRT_SOURCE); } ;
-dynamic_attr: KRT_METRIC { $$ = f_new_dynamic_attr(EAF_TYPE_INT, T_INT, EA_KRT_METRIC); } ;
-
CF_CODE
CF_END
diff --git a/sysdep/unix/krt.c b/sysdep/unix/krt.c
index 9f95247f..9e6ddb45 100644
--- a/sysdep/unix/krt.c
+++ b/sysdep/unix/krt.c
@@ -53,7 +53,7 @@
#include "nest/bird.h"
#include "nest/iface.h"
-#include "nest/route.h"
+#include "nest/rt.h"
#include "nest/protocol.h"
#include "filter/filter.h"
#include "conf/conf.h"
@@ -163,6 +163,15 @@ kif_shutdown(struct proto *P)
return PS_DOWN;
}
+static void
+kif_cleanup(struct proto *p)
+{
+ if (p->debug & D_EVENTS)
+ log(L_TRACE "%s: Flushing interfaces", p->name);
+ if_start_update();
+ if_end_update();
+}
+
static int
kif_reconfigure(struct proto *p, struct proto_config *new)
{
@@ -232,13 +241,13 @@ kif_copy_config(struct proto_config *dest, struct proto_config *src)
struct protocol proto_unix_iface = {
.name = "Device",
.template = "device%d",
- .class = PROTOCOL_DEVICE,
.proto_size = sizeof(struct kif_proto),
.config_size = sizeof(struct kif_config),
.preconfig = kif_preconfig,
.init = kif_init,
.start = kif_start,
.shutdown = kif_shutdown,
+ .cleanup = kif_cleanup,
.reconfigure = kif_reconfigure,
.copy_config = kif_copy_config
};
@@ -258,14 +267,14 @@ static inline void
krt_trace_in(struct krt_proto *p, rte *e, char *msg)
{
if (p->p.debug & D_PACKETS)
- log(L_TRACE "%s: %N: %s", p->p.name, e->net->n.addr, msg);
+ log(L_TRACE "%s: %N: %s", p->p.name, e->net, msg);
}
static inline void
krt_trace_in_rl(struct tbf *f, struct krt_proto *p, rte *e, char *msg)
{
if (p->p.debug & D_PACKETS)
- log_rl(f, L_TRACE "%s: %N: %s", p->p.name, e->net->n.addr, msg);
+ log_rl(f, L_TRACE "%s: %N: %s", p->p.name, e->net, msg);
}
/*
@@ -287,259 +296,40 @@ static struct tbf rl_alien = TBF_DEFAULT_LOG_LIMITS;
static inline u32
krt_metric(rte *a)
{
- eattr *ea = ea_find(a->attrs->eattrs, EA_KRT_METRIC);
+ eattr *ea = ea_find(a->attrs, &ea_krt_metric);
return ea ? ea->u.data : 0;
}
-static inline int
-krt_same_key(rte *a, rte *b)
-{
- return (krt_metric(a) == krt_metric(b));
-}
-
-static inline int
-krt_uptodate(rte *a, rte *b)
-{
- return (a->attrs == b->attrs);
-}
-
-static void
-krt_learn_announce_update(struct krt_proto *p, rte *e)
-{
- net *n = e->net;
- rta *aa = rta_clone(e->attrs);
- rte *ee = rte_get_temp(aa, p->p.main_source);
- rte_update(&p->p, n->n.addr, ee);
-}
-
-static void
-krt_learn_announce_delete(struct krt_proto *p, net *n)
-{
- rte_update(&p->p, n->n.addr, NULL);
-}
-
static void
krt_learn_alien_attr(struct channel *c, rte *e)
{
- ASSERT(!e->attrs->cached);
- e->attrs->pref = c->preference;
-
- e->attrs = rta_lookup(e->attrs);
+ ea_set_attr_u32(&e->attrs, &ea_gen_preference, 0, c->preference);
}
/* Called when alien route is discovered during scan */
static void
krt_learn_scan(struct krt_proto *p, rte *e)
{
- net *n0 = e->net;
- net *n = net_get(p->krt_table, n0->n.addr);
- rte *m, **mm;
+ rte e0 = {
+ .attrs = e->attrs,
+ .src = rt_get_source(&p->p, krt_metric(e)),
+ };
- krt_learn_alien_attr(p->p.main_channel, e);
+ krt_learn_alien_attr(p->p.main_channel, &e0);
- for(mm=&n->routes; m = *mm; mm=&m->next)
- if (krt_same_key(m, e))
- break;
- if (m)
- {
- if (krt_uptodate(m, e))
- {
- krt_trace_in_rl(&rl_alien, p, e, "[alien] seen");
- rte_free(e);
- m->pflags |= KRT_REF_SEEN;
- }
- else
- {
- krt_trace_in(p, e, "[alien] updated");
- *mm = m->next;
- rte_free(m);
- m = NULL;
- }
- }
- else
- krt_trace_in(p, e, "[alien] created");
- if (!m)
- {
- e->next = n->routes;
- n->routes = e;
- e->pflags |= KRT_REF_SEEN;
- }
-}
-
-static void
-krt_learn_prune(struct krt_proto *p)
-{
- struct fib *fib = &p->krt_table->fib;
- struct fib_iterator fit;
-
- KRT_TRACE(p, D_EVENTS, "Pruning inherited routes");
-
- FIB_ITERATE_INIT(&fit, fib);
-again:
- FIB_ITERATE_START(fib, &fit, net, n)
- {
- rte *e, **ee, *best, **pbest, *old_best;
-
- /*
- * Note that old_best may be NULL even if there was an old best route in
- * the previous step, because it might be replaced in krt_learn_scan().
- * But in that case there is a new valid best route.
- */
-
- old_best = NULL;
- best = NULL;
- pbest = NULL;
- ee = &n->routes;
- while (e = *ee)
- {
- if (e->pflags & KRT_REF_BEST)
- old_best = e;
-
- if (!(e->pflags & KRT_REF_SEEN))
- {
- *ee = e->next;
- rte_free(e);
- continue;
- }
-
- if (!best || krt_metric(best) > krt_metric(e))
- {
- best = e;
- pbest = ee;
- }
-
- e->pflags &= ~(KRT_REF_SEEN | KRT_REF_BEST);
- ee = &e->next;
- }
- if (!n->routes)
- {
- DBG("%I/%d: deleting\n", n->n.prefix, n->n.pxlen);
- if (old_best)
- krt_learn_announce_delete(p, n);
-
- FIB_ITERATE_PUT(&fit);
- fib_delete(fib, n);
- goto again;
- }
-
- best->pflags |= KRT_REF_BEST;
- *pbest = best->next;
- best->next = n->routes;
- n->routes = best;
-
- if ((best != old_best) || p->reload)
- {
- DBG("%I/%d: announcing (metric=%d)\n", n->n.prefix, n->n.pxlen, krt_metric(best));
- krt_learn_announce_update(p, best);
- }
- else
- DBG("%I/%d: uptodate (metric=%d)\n", n->n.prefix, n->n.pxlen, krt_metric(best));
- }
- FIB_ITERATE_END;
-
- p->reload = 0;
+ rte_update(p->p.main_channel, e->net, &e0, e0.src);
+ rt_unlock_source(e0.src);
}
static void
krt_learn_async(struct krt_proto *p, rte *e, int new)
{
- net *n0 = e->net;
- net *n = net_get(p->krt_table, n0->n.addr);
- rte *g, **gg, *best, **bestp, *old_best;
-
- krt_learn_alien_attr(p->p.main_channel, e);
-
- old_best = n->routes;
- for(gg=&n->routes; g = *gg; gg = &g->next)
- if (krt_same_key(g, e))
- break;
if (new)
- {
- if (g)
- {
- if (krt_uptodate(g, e))
- {
- krt_trace_in(p, e, "[alien async] same");
- rte_free(e);
- return;
- }
- krt_trace_in(p, e, "[alien async] updated");
- *gg = g->next;
- rte_free(g);
- }
- else
- krt_trace_in(p, e, "[alien async] created");
+ return krt_learn_scan(p, e);
- e->next = n->routes;
- n->routes = e;
- }
- else if (!g)
- {
- krt_trace_in(p, e, "[alien async] delete failed");
- rte_free(e);
- return;
- }
- else
- {
- krt_trace_in(p, e, "[alien async] removed");
- *gg = g->next;
- rte_free(e);
- rte_free(g);
- }
- best = n->routes;
- bestp = &n->routes;
- for(gg=&n->routes; g=*gg; gg=&g->next)
- {
- if (krt_metric(best) > krt_metric(g))
- {
- best = g;
- bestp = gg;
- }
-
- g->pflags &= ~KRT_REF_BEST;
- }
-
- if (best)
- {
- best->pflags |= KRT_REF_BEST;
- *bestp = best->next;
- best->next = n->routes;
- n->routes = best;
- }
-
- if (best != old_best)
- {
- DBG("krt_learn_async: distributing change\n");
- if (best)
- krt_learn_announce_update(p, best);
- else
- krt_learn_announce_delete(p, n);
- }
-}
-
-static void
-krt_learn_init(struct krt_proto *p)
-{
- if (KRT_CF->learn)
- {
- struct rtable_config *cf = mb_allocz(p->p.pool, sizeof(struct rtable_config));
- cf->name = "Inherited";
- cf->addr_type = p->p.net_type;
- cf->internal = 1;
-
- p->krt_table = rt_setup(p->p.pool, cf);
- }
-}
-
-static void
-krt_dump(struct proto *P)
-{
- struct krt_proto *p = (struct krt_proto *) P;
-
- if (!KRT_CF->learn)
- return;
- debug("KRT: Table of inheritable routes\n");
- rt_dump(p->krt_table);
+ struct rte_src *src = rt_get_source(&p->p, krt_metric(e));
+ rte_update(p->p.main_channel, e->net, NULL, src);
+ rt_unlock_source(src);
}
#endif
@@ -551,40 +341,60 @@ krt_dump(struct proto *P)
static inline int
krt_is_installed(struct krt_proto *p, net *n)
{
- return n->routes && bmap_test(&p->p.main_channel->export_map, n->routes->id);
+ return n->routes && bmap_test(&p->p.main_channel->export_map, n->routes->rte.id);
}
-static void
-krt_flush_routes(struct krt_proto *p)
+static uint
+rte_feed_count(net *n)
{
- struct rtable *t = p->p.main_channel->table;
+ uint count = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTE_OR_NULL(e)))
+ count++;
+ return count;
+}
- KRT_TRACE(p, D_EVENTS, "Flushing kernel routes");
- FIB_WALK(&t->fib, net, n)
+static void
+rte_feed_obtain(net *n, const rte **feed, uint count)
+{
+ uint i = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTE_OR_NULL(e)))
{
- if (krt_is_installed(p, n))
- {
- /* FIXME: this does not work if gw is changed in export filter */
- krt_replace_rte(p, n, NULL, n->routes);
- }
+ ASSERT_DIE(i < count);
+ feed[i++] = &e->rte;
}
- FIB_WALK_END;
+ ASSERT_DIE(i == count);
}
static struct rte *
-krt_export_net(struct krt_proto *p, net *net, rte **rt_free)
+krt_export_net(struct krt_proto *p, net *net)
{
+ /* FIXME: Here we are calling filters in table-locked context when exporting
+ * to kernel. Here BIRD can crash if the user requested ROA check in kernel
+ * export filter. It doesn't make much sense to write the filters like this,
+ * therefore we may keep this unfinished piece of work here for later as it
+ * won't really affect anybody. */
+ ASSERT_DIE(RT_IS_LOCKED(p->p.main_channel->table));
+
struct channel *c = p->p.main_channel;
const struct filter *filter = c->out_filter;
- rte *rt;
if (c->ra_mode == RA_MERGED)
- return rt_export_merged(c, net, rt_free, krt_filter_lp, 1);
+ {
+ uint count = rte_feed_count(net);
+ if (!count)
+ return NULL;
+
+ const rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(net, feed, count);
+ return rt_export_merged(c, feed, count, krt_filter_lp, 1);
+ }
- rt = net->routes;
- *rt_free = NULL;
+ static _Thread_local rte rt;
+ rt = net->routes->rte;
- if (!rte_is_valid(rt))
+ if (!rte_is_valid(&rt))
return NULL;
if (filter == FILTER_REJECT)
@@ -595,33 +405,26 @@ krt_export_net(struct krt_proto *p, net *net, rte **rt_free)
if (filter == FILTER_ACCEPT)
goto accept;
- if (f_run(filter, &rt, krt_filter_lp, FF_SILENT) > F_ACCEPT)
+ if (f_run(filter, &rt, FF_SILENT) > F_ACCEPT)
goto reject;
accept:
- if (rt != net->routes)
- *rt_free = rt;
- return rt;
+ return &rt;
reject:
- if (rt != net->routes)
- rte_free(rt);
return NULL;
}
static int
krt_same_dest(rte *k, rte *e)
{
- rta *ka = k->attrs, *ea = e->attrs;
+ ea_list *ka = k->attrs, *ea = e->attrs;
- if (ka->dest != ea->dest)
- return 0;
-
- if (ka->dest == RTD_UNICAST)
- return nexthop_same(&(ka->nh), &(ea->nh));
+ eattr *nhea_k = ea_find(ka, &ea_gen_nexthop);
+ eattr *nhea_e = ea_find(ea, &ea_gen_nexthop);
- return 1;
+ return (!nhea_k == !nhea_e) && adata_same(nhea_k->u.ptr, nhea_e->u.ptr);
}
/*
@@ -632,41 +435,48 @@ krt_same_dest(rte *k, rte *e)
void
krt_got_route(struct krt_proto *p, rte *e, s8 src)
{
- rte *new = NULL, *rt_free = NULL;
- net *n = e->net;
+ rte *new = NULL;
e->pflags = 0;
#ifdef KRT_ALLOW_LEARN
switch (src)
{
case KRT_SRC_KERNEL:
- goto ignore;
+ krt_trace_in(p, e, "ignored");
+ return;
case KRT_SRC_REDIRECT:
- goto delete;
+ krt_trace_in(p, e, "deleting");
+ krt_replace_rte(p, e->net, NULL, e);
+ return;
case KRT_SRC_ALIEN:
if (KRT_CF->learn)
krt_learn_scan(p, e);
else
- {
- krt_trace_in_rl(&rl_alien, p, e, "[alien] ignored");
- rte_free(e);
- }
+ krt_trace_in_rl(&rl_alien, p, e, "[alien] ignored");
return;
}
#endif
+
/* The rest is for KRT_SRC_BIRD (or KRT_SRC_UNKNOWN) */
+ RT_LOCKED(p->p.main_channel->table, tab)
+ {
+
+ /* Deleting all routes if flush is requested */
+ if (p->flush_routes)
+ goto delete;
/* We wait for the initial feed to have correct installed state */
if (!p->ready)
goto ignore;
- if (!krt_is_installed(p, n))
+ net *net = net_find(tab, e->net);
+ if (!net || !krt_is_installed(p, net))
goto delete;
- new = krt_export_net(p, n, &rt_free);
+ new = krt_export_net(p, net);
/* Rejected by filters */
if (!new)
@@ -699,19 +509,16 @@ ignore:
update:
krt_trace_in(p, new, "updating");
- krt_replace_rte(p, n, new, e);
+ krt_replace_rte(p, e->net, new, e);
goto done;
delete:
krt_trace_in(p, e, "deleting");
- krt_replace_rte(p, n, NULL, e);
+ krt_replace_rte(p, e->net, NULL, e);
goto done;
-done:
- rte_free(e);
-
- if (rt_free)
- rte_free(rt_free);
+done:;
+ }
lp_flush(krt_filter_lp);
}
@@ -725,43 +532,47 @@ krt_init_scan(struct krt_proto *p)
static void
krt_prune(struct krt_proto *p)
{
- struct rtable *t = p->p.main_channel->table;
+ RT_LOCKED(p->p.main_channel->table, t)
+ {
KRT_TRACE(p, D_EVENTS, "Pruning table %s", t->name);
FIB_WALK(&t->fib, net, n)
{
- if (p->ready && krt_is_installed(p, n) && !bmap_test(&p->seen_map, n->routes->id))
+ if (p->ready && krt_is_installed(p, n) && !bmap_test(&p->seen_map, n->routes->rte.id))
{
- rte *rt_free = NULL;
- rte *new = krt_export_net(p, n, &rt_free);
+ rte *new = krt_export_net(p, n);
if (new)
{
krt_trace_in(p, new, "installing");
- krt_replace_rte(p, n, new, NULL);
+ krt_replace_rte(p, n->n.addr, new, NULL);
}
- if (rt_free)
- rte_free(rt_free);
-
lp_flush(krt_filter_lp);
}
}
FIB_WALK_END;
-#ifdef KRT_ALLOW_LEARN
- if (KRT_CF->learn)
- krt_learn_prune(p);
-#endif
-
if (p->ready)
p->initialized = 1;
+
+ }
+}
+
+static void
+krt_flush_routes(struct krt_proto *p)
+{
+ KRT_TRACE(p, D_EVENTS, "Flushing kernel routes");
+ p->flush_routes = 1;
+ krt_init_scan(p);
+ krt_do_scan(p);
+ /* No prune! */
+ p->flush_routes = 0;
}
void
krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
{
- net *net = e->net;
e->pflags = 0;
switch (src)
@@ -774,7 +585,7 @@ krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
if (new)
{
krt_trace_in(p, e, "[redirect] deleting");
- krt_replace_rte(p, net, NULL, e);
+ krt_replace_rte(p, e->net, NULL, e);
}
/* If !new, it is probably echo of our deletion */
break;
@@ -788,7 +599,6 @@ krt_got_route_async(struct krt_proto *p, rte *e, int new, s8 src)
}
#endif
}
- rte_free(e);
}
@@ -910,9 +720,12 @@ krt_scan_timer_kick(struct krt_proto *p)
static int
krt_preexport(struct channel *C, rte *e)
{
- // struct krt_proto *p = (struct krt_proto *) P;
- if (e->src->proto == C->proto)
+ if (e->src->owner == &C->proto->sources)
+#ifdef CONFIG_SINGLE_ROUTE
+ return 1;
+#else
return -1;
+#endif
if (!krt_capable(e))
return -1;
@@ -921,8 +734,8 @@ krt_preexport(struct channel *C, rte *e)
}
static void
-krt_rt_notify(struct proto *P, struct channel *ch UNUSED, net *net,
- rte *new, rte *old)
+krt_rt_notify(struct proto *P, struct channel *ch UNUSED, const net_addr *net,
+ rte *new, const rte *old)
{
struct krt_proto *p = (struct krt_proto *) P;
@@ -930,14 +743,8 @@ krt_rt_notify(struct proto *P, struct channel *ch UNUSED, net *net,
return;
#ifdef CONFIG_SINGLE_ROUTE
- /*
- * Implicit withdraw - when the imported kernel route becomes the best one,
- * we know that the previous one exported to the kernel was already removed,
- * but if we processed the update as usual, we would send withdraw to the
- * kernel, which would remove the new imported route instead.
- */
- rte *best = net->routes;
- if (!new && best && (best->src->proto == P))
+ /* Got the same route as we imported. Keep it, do nothing. */
+ if (new && new->src->owner == &P->sources)
return;
#endif
@@ -985,6 +792,14 @@ krt_feed_end(struct channel *C)
krt_scan_timer_kick(p);
}
+static int
+krt_rte_better(const rte *new, const rte *old)
+{
+ u32 n = ea_get_int(new->attrs, &ea_krt_metric, IGP_METRIC_UNKNOWN);
+ u32 o = ea_get_int(old->attrs, &ea_krt_metric, IGP_METRIC_UNKNOWN);
+
+ return (n < o);
+}
/*
* Protocol glue
@@ -1026,6 +841,10 @@ krt_postconfig(struct proto_config *CF)
krt_sys_postconfig(cf);
}
+struct rte_owner_class krt_rte_owner_class = {
+ .rte_better = krt_rte_better,
+};
+
static struct proto *
krt_init(struct proto_config *CF)
{
@@ -1036,10 +855,12 @@ krt_init(struct proto_config *CF)
p->p.preexport = krt_preexport;
p->p.rt_notify = krt_rt_notify;
- p->p.if_notify = krt_if_notify;
+ p->p.iface_sub.if_notify = krt_if_notify;
p->p.reload_routes = krt_reload_routes;
p->p.feed_end = krt_feed_end;
+ p->p.sources.class = &krt_rte_owner_class;
+
krt_sys_init(p);
return &p->p;
}
@@ -1064,10 +885,6 @@ krt_start(struct proto *P)
bmap_init(&p->seen_map, p->p.pool, 1024);
add_tail(&krt_proto_list, &p->krt_node);
-#ifdef KRT_ALLOW_LEARN
- krt_learn_init(p);
-#endif
-
if (!krt_sys_start(p))
{
rem_node(&p->krt_node);
@@ -1147,24 +964,15 @@ krt_copy_config(struct proto_config *dest, struct proto_config *src)
krt_sys_copy_config(d, s);
}
-static int
-krt_get_attr(const eattr *a, byte *buf, int buflen)
-{
- switch (a->id)
- {
- case EA_KRT_SOURCE:
- bsprintf(buf, "source");
- return GA_NAME;
-
- case EA_KRT_METRIC:
- bsprintf(buf, "metric");
- return GA_NAME;
-
- default:
- return krt_sys_get_attr(a, buf, buflen);
- }
-}
+struct ea_class ea_krt_source = {
+ .name = "krt_source",
+ .type = T_INT,
+};
+struct ea_class ea_krt_metric = {
+ .name = "krt_metric",
+ .type = T_INT,
+};
#ifdef CONFIG_IP6_SADR_KERNEL
#define MAYBE_IP6_SADR NB_IP6_SADR
@@ -1181,7 +989,6 @@ krt_get_attr(const eattr *a, byte *buf, int buflen)
struct protocol proto_unix_kernel = {
.name = "Kernel",
.template = "kernel%d",
- .class = PROTOCOL_KERNEL,
.preference = DEF_PREF_INHERITED,
.channel_mask = NB_IP | MAYBE_IP6_SADR | MAYBE_MPLS,
.proto_size = sizeof(struct krt_proto),
@@ -1193,14 +1000,15 @@ struct protocol proto_unix_kernel = {
.shutdown = krt_shutdown,
.reconfigure = krt_reconfigure,
.copy_config = krt_copy_config,
- .get_attr = krt_get_attr,
-#ifdef KRT_ALLOW_LEARN
- .dump = krt_dump,
-#endif
};
void
krt_build(void)
{
proto_build(&proto_unix_kernel);
+
+ EA_REGISTER_ALL(
+ &ea_krt_source,
+ &ea_krt_metric,
+ );
}
diff --git a/sysdep/unix/krt.h b/sysdep/unix/krt.h
index 18a206e6..9f7ebb4f 100644
--- a/sysdep/unix/krt.h
+++ b/sysdep/unix/krt.h
@@ -21,8 +21,7 @@ struct kif_proto;
#define KRT_DEFAULT_ECMP_LIMIT 16
-#define EA_KRT_SOURCE EA_CODE(PROTOCOL_KERNEL, 0)
-#define EA_KRT_METRIC EA_CODE(PROTOCOL_KERNEL, 1)
+extern struct ea_class ea_krt_source, ea_krt_metric;
#define KRT_REF_SEEN 0x1 /* Seen in table */
#define KRT_REF_BEST 0x2 /* Best in table */
@@ -51,10 +50,6 @@ struct krt_proto {
struct proto p;
struct krt_state sys; /* Sysdep state */
-#ifdef KRT_ALLOW_LEARN
- struct rtable *krt_table; /* Internal table of inherited routes */
-#endif
-
timer *scan_timer;
struct bmap sync_map; /* Keeps track which exported routes were successfully written to kernel */
struct bmap seen_map; /* Routes seen during last periodic scan */
@@ -63,6 +58,7 @@ struct krt_proto {
byte ready; /* Initial feed has been finished */
byte initialized; /* First scan has been finished */
byte reload; /* Next scan is doing reload */
+ byte flush_routes; /* Scanning to flush */
};
extern pool *krt_pool;
@@ -141,7 +137,7 @@ void krt_sys_copy_config(struct krt_config *, struct krt_config *);
int krt_capable(rte *e);
void krt_do_scan(struct krt_proto *);
-void krt_replace_rte(struct krt_proto *p, net *n, rte *new, rte *old);
+void krt_replace_rte(struct krt_proto *p, const net_addr *n, rte *new, const rte *old);
int krt_sys_get_attr(const eattr *a, byte *buf, int buflen);
diff --git a/sysdep/unix/log.c b/sysdep/unix/log.c
index 53122aee..be7f8adf 100644
--- a/sysdep/unix/log.c
+++ b/sysdep/unix/log.c
@@ -15,6 +15,7 @@
* user's manual.
*/
+#include <stdatomic.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -36,8 +37,10 @@ static FILE *dbgf;
static list *current_log_list;
static char *current_syslog_name; /* NULL -> syslog closed */
+static _Atomic uint max_thread_id = ATOMIC_VAR_INIT(1);
+static _Thread_local uint this_thread_id;
-#ifdef USE_PTHREADS
+#define THIS_THREAD_ID (this_thread_id ?: (this_thread_id = atomic_fetch_add_explicit(&max_thread_id, 1, memory_order_acq_rel)))
#include <pthread.h>
@@ -49,15 +52,6 @@ static pthread_t main_thread;
void main_thread_init(void) { main_thread = pthread_self(); }
static int main_thread_self(void) { return pthread_equal(pthread_self(), main_thread); }
-#else
-
-static inline void log_lock(void) { }
-static inline void log_unlock(void) { }
-void main_thread_init(void) { }
-static int main_thread_self(void) { return 1; }
-
-#endif
-
#ifdef HAVE_SYSLOG_H
#include <sys/syslog.h>
@@ -190,7 +184,7 @@ log_commit(int class, buffer *buf)
l->pos += msg_len;
}
- fprintf(l->fh, "%s <%s> ", tbuf, class_names[class]);
+ fprintf(l->fh, "%s [%04x] <%s> ", tbuf, THIS_THREAD_ID, class_names[class]);
}
fputs(buf->start, l->fh);
fputc('\n', l->fh);
@@ -300,6 +294,8 @@ die(const char *msg, ...)
exit(1);
}
+static struct timespec dbg_time_start;
+
/**
* debug - write to debug output
* @msg: a printf-like message
@@ -312,12 +308,34 @@ debug(const char *msg, ...)
{
#define MAX_DEBUG_BUFSIZE 16384
va_list args;
- char buf[MAX_DEBUG_BUFSIZE];
+ char buf[MAX_DEBUG_BUFSIZE], *pos = buf;
+ int max = MAX_DEBUG_BUFSIZE;
va_start(args, msg);
if (dbgf)
{
- if (bvsnprintf(buf, MAX_DEBUG_BUFSIZE, msg, args) < 0)
+#if 0
+ struct timespec dbg_time;
+ clock_gettime(CLOCK_MONOTONIC, &dbg_time);
+ uint nsec;
+ uint sec;
+
+ if (dbg_time.tv_nsec > dbg_time_start.tv_nsec)
+ {
+ nsec = dbg_time.tv_nsec - dbg_time_start.tv_nsec;
+ sec = dbg_time.tv_sec - dbg_time_start.tv_sec;
+ }
+ else
+ {
+ nsec = 1000000000 + dbg_time.tv_nsec - dbg_time_start.tv_nsec;
+ sec = dbg_time.tv_sec - dbg_time_start.tv_sec - 1;
+ }
+
+ int n = bsnprintf(pos, max, "%u.%09u: [%04x] ", sec, nsec, THIS_THREAD_ID);
+ pos += n;
+ max -= n;
+#endif
+ if (bvsnprintf(pos, max, msg, args) < 0)
bug("Extremely long debug output, split it.");
fputs(buf, dbgf);
@@ -438,6 +456,8 @@ done:
void
log_init_debug(char *f)
{
+ clock_gettime(CLOCK_MONOTONIC, &dbg_time_start);
+
dbg_fd = -1;
if (dbgf && dbgf != stderr)
fclose(dbgf);
diff --git a/sysdep/unix/main.c b/sysdep/unix/main.c
index 627d7a4a..79db32d3 100644
--- a/sysdep/unix/main.c
+++ b/sysdep/unix/main.c
@@ -28,9 +28,10 @@
#include "lib/resource.h"
#include "lib/socket.h"
#include "lib/event.h"
+#include "lib/locking.h"
#include "lib/timer.h"
#include "lib/string.h"
-#include "nest/route.h"
+#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
#include "nest/cli.h"
@@ -51,12 +52,12 @@ async_dump(void)
{
debug("INTERNAL STATE DUMP\n\n");
- rdump(&root_pool);
+ rdump(&root_pool, 0);
sk_dump_all();
// XXXX tm_dump_all();
if_dump_all();
neigh_dump_all();
- rta_dump_all();
+ ea_dump_all();
rt_dump_all();
protos_dump_all();
@@ -199,9 +200,11 @@ sysdep_preconfig(struct config *c)
}
int
-sysdep_commit(struct config *new, struct config *old UNUSED)
+sysdep_commit(struct config *new, struct config *old)
{
log_switch(0, &new->logfiles, new->syslog_name);
+ bird_thread_commit(new, old);
+
return 0;
}
@@ -401,7 +404,8 @@ static char *path_control_socket = PATH_CONTROL_SOCKET;
static void
cli_write(cli *c)
{
- sock *s = c->priv;
+ sock *s = c->sock;
+ ASSERT_DIE(c->sock);
while (c->tx_pos)
{
@@ -425,7 +429,9 @@ cli_write(cli *c)
void
cli_write_trigger(cli *c)
{
- sock *s = c->priv;
+ sock *s = c->sock;
+ if (!s)
+ return;
if (s->tbuf == NULL)
cli_write(c);
@@ -440,7 +446,8 @@ cli_tx(sock *s)
int
cli_get_command(cli *c)
{
- sock *s = c->priv;
+ sock *s = c->sock;
+ ASSERT_DIE(c->sock);
byte *t = s->rbuf;
byte *tend = s->rpos;
byte *d = c->rx_pos;
@@ -489,6 +496,7 @@ cli_err(sock *s, int err)
else
log(L_INFO "CLI connection closed");
}
+
cli_free(s->data);
}
@@ -534,7 +542,7 @@ cli_init_unix(uid_t use_uid, gid_t use_gid)
/* Return value intentionally ignored */
unlink(path_control_socket);
- if (sk_open_unix(s, path_control_socket) < 0)
+ if (sk_open_unix(s, &main_birdloop, path_control_socket) < 0)
die("Cannot create control socket %s: %m", path_control_socket);
if (use_uid || use_gid)
@@ -885,16 +893,19 @@ main(int argc, char **argv)
dmalloc_debug(0x2f03d00);
#endif
+ times_update();
parse_args(argc, argv);
log_switch(1, NULL, NULL);
+ the_bird_lock();
+
random_init();
net_init();
resource_init();
- timer_init();
+ birdloop_init();
olock_init();
- io_init();
rt_init();
+ io_init();
if_init();
// roa_init();
config_init();
@@ -924,6 +935,8 @@ main(int argc, char **argv)
if (parse_and_exit)
exit(0);
+ flush_local_pages();
+
if (!run_in_foreground)
{
pid_t pid = fork();
@@ -939,6 +952,7 @@ main(int argc, char **argv)
dup2(0, 2);
}
+
main_thread_init();
write_pid_file();
diff --git a/sysdep/unix/unix.h b/sysdep/unix/unix.h
index ad85d1ea..606b79cd 100644
--- a/sysdep/unix/unix.h
+++ b/sysdep/unix/unix.h
@@ -9,6 +9,9 @@
#ifndef _BIRD_UNIX_H_
#define _BIRD_UNIX_H_
+#include "nest/bird.h"
+#include "lib/io-loop.h"
+
#include <sys/socket.h>
#include <signal.h>
@@ -16,6 +19,7 @@ struct pool;
struct iface;
struct birdsock;
struct rfile;
+struct config;
/* main.c */
@@ -32,6 +36,8 @@ void cmd_reconfig_undo(void);
void cmd_reconfig_status(void);
void cmd_shutdown(void);
void cmd_graceful_restart(void);
+void cmd_show_threads(int);
+void bird_thread_commit(struct config *new, struct config *old);
#define UNIX_DEFAULT_CONFIGURE_TIMEOUT 300
@@ -107,7 +113,7 @@ extern volatile sig_atomic_t async_shutdown_flag;
void io_init(void);
void io_loop(void);
void io_log_dump(void);
-int sk_open_unix(struct birdsock *s, char *name);
+int sk_open_unix(struct birdsock *s, struct birdloop *, char *name);
struct rfile *rf_open(struct pool *, const char *name, const char *mode);
void *rf_file(struct rfile *f);
int rf_fileno(struct rfile *f);