summaryrefslogtreecommitdiff
path: root/nest
diff options
context:
space:
mode:
Diffstat (limited to 'nest')
-rw-r--r--nest/a-path_test.c17
-rw-r--r--nest/a-set_test.c17
-rw-r--r--nest/bfd.h12
-rw-r--r--nest/cli.c8
-rw-r--r--nest/cmds.c13
-rw-r--r--nest/config.Y20
-rw-r--r--nest/iface.c119
-rw-r--r--nest/iface.h18
-rw-r--r--nest/limit.h49
-rw-r--r--nest/neighbor.c143
-rw-r--r--nest/proto-hooks.c40
-rw-r--r--nest/proto.c1368
-rw-r--r--nest/protocol.h220
-rw-r--r--nest/route.h531
-rw-r--r--nest/rt-attr.c433
-rw-r--r--nest/rt-dev.c19
-rw-r--r--nest/rt-show.c103
-rw-r--r--nest/rt-table.c3228
18 files changed, 3943 insertions, 2415 deletions
diff --git a/nest/a-path_test.c b/nest/a-path_test.c
index 9ed0a786..2533dbae 100644
--- a/nest/a-path_test.c
+++ b/nest/a-path_test.c
@@ -12,6 +12,7 @@
#include "nest/route.h"
#include "nest/attrs.h"
#include "lib/resource.h"
+#include "lib/io-loop.h"
#define TESTS_NUM 30
#define AS_PATH_LENGTH 1000
@@ -23,8 +24,6 @@
static int
t_as_path_match(void)
{
- resource_init();
-
int round;
for (round = 0; round < TESTS_NUM; round++)
{
@@ -70,8 +69,6 @@ t_as_path_match(void)
static int
t_path_format(void)
{
- resource_init();
-
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
@@ -116,8 +113,6 @@ count_asn_in_array(const u32 *array, u32 asn)
static int
t_path_include(void)
{
- resource_init();
-
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
@@ -161,8 +156,6 @@ t_path_include(void)
static int
t_as_path_converting(void)
{
- resource_init();
-
struct adata empty_as_path = {};
struct adata *as_path = &empty_as_path;
struct linpool *lp = lp_new_default(&root_pool);
@@ -204,10 +197,18 @@ t_as_path_converting(void)
}
#endif
+void resource_sys_init(void);
+void io_init(void);
+
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
+ resource_sys_init();
+ resource_init();
+ the_bird_lock();
+ birdloop_init();
+ io_init();
bt_test_suite(t_as_path_match, "Testing AS path matching and some a-path utilities.");
bt_test_suite(t_path_format, "Testing formating as path into byte buffer");
diff --git a/nest/a-set_test.c b/nest/a-set_test.c
index 96b6a727..f8f6e781 100644
--- a/nest/a-set_test.c
+++ b/nest/a-set_test.c
@@ -13,6 +13,7 @@
#include "nest/route.h"
#include "nest/attrs.h"
#include "lib/resource.h"
+#include "lib/io-loop.h"
#define SET_SIZE 10
static const struct adata *set_sequence; /* <0; SET_SIZE) */
@@ -71,7 +72,6 @@ t_set_int_contains(void)
{
int i;
- resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
bt_assert(int_set_get_size(set_sequence) == SET_SIZE);
@@ -92,7 +92,6 @@ t_set_int_contains(void)
static int
t_set_int_union(void)
{
- resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
const struct adata *set_union;
@@ -111,7 +110,6 @@ t_set_int_union(void)
static int
t_set_int_format(void)
{
- resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE_FOR_FORMAT_OUTPUT);
bt_assert(int_set_format(set_sequence, 0, 0, buf, BUFFER_SIZE) == 0);
@@ -132,7 +130,6 @@ t_set_int_format(void)
static int
t_set_int_delete(void)
{
- resource_init();
generate_set_sequence(SET_TYPE_INT, SET_SIZE);
const struct adata *deleting_sequence = set_sequence;
@@ -160,7 +157,6 @@ t_set_ec_contains(void)
{
u32 i;
- resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
bt_assert(ec_set_get_size(set_sequence) == SET_SIZE);
@@ -181,7 +177,6 @@ t_set_ec_contains(void)
static int
t_set_ec_union(void)
{
- resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
const struct adata *set_union;
@@ -200,8 +195,6 @@ t_set_ec_union(void)
static int
t_set_ec_format(void)
{
- resource_init();
-
const struct adata empty_as_path = {};
set_sequence = set_sequence_same = set_sequence_higher = set_random = &empty_as_path;
lp = lp_new_default(&root_pool);
@@ -222,7 +215,6 @@ t_set_ec_format(void)
static int
t_set_ec_delete(void)
{
- resource_init();
generate_set_sequence(SET_TYPE_EC, SET_SIZE);
const struct adata *deleting_sequence = set_sequence;
@@ -240,10 +232,17 @@ t_set_ec_delete(void)
return 1;
}
+
+void resource_sys_init(void);
+
int
main(int argc, char *argv[])
{
bt_init(argc, argv);
+ resource_sys_init();
+ resource_init();
+ the_bird_lock();
+ birdloop_init();
bt_test_suite(t_set_int_contains, "Testing sets of integers: contains, get_data");
bt_test_suite(t_set_int_format, "Testing sets of integers: format");
diff --git a/nest/bfd.h b/nest/bfd.h
index 37561266..c91d6648 100644
--- a/nest/bfd.h
+++ b/nest/bfd.h
@@ -23,6 +23,11 @@ struct bfd_options {
u8 mode;
};
+struct bfd_session_state {
+ u8 state;
+ u8 diag;
+};
+
struct bfd_request {
resource r;
node n;
@@ -35,12 +40,12 @@ struct bfd_request {
void (*hook)(struct bfd_request *);
void *data;
+ event event;
struct bfd_session *session;
-
+ struct bfd_session_state old_state;
u8 state;
u8 diag;
- u8 old_state;
u8 down;
};
@@ -51,13 +56,12 @@ struct bfd_request {
#define BFD_STATE_INIT 2
#define BFD_STATE_UP 3
-
static inline struct bfd_options * bfd_new_options(void)
{ return cfg_allocz(sizeof(struct bfd_options)); }
#ifdef CONFIG_BFD
-struct bfd_request * bfd_request_session(pool *p, ip_addr addr, ip_addr local, struct iface *iface, struct iface *vrf, void (*hook)(struct bfd_request *), void *data, const struct bfd_options *opts);
+struct bfd_request * bfd_request_session(pool *p, ip_addr addr, ip_addr local, struct iface *iface, struct iface *vrf, void (*hook)(struct bfd_request *), void *data, struct event_list *list, const struct bfd_options *opts);
void bfd_update_request(struct bfd_request *req, const struct bfd_options *opts);
static inline void cf_check_bfd(int use UNUSED) { }
diff --git a/nest/cli.c b/nest/cli.c
index b54a0d76..7e5d2151 100644
--- a/nest/cli.c
+++ b/nest/cli.c
@@ -262,7 +262,7 @@ cli_command(struct cli *c)
log(L_TRACE "CLI: %s", c->rx_buf);
bzero(&f, sizeof(f));
f.mem = c->parser_pool;
- f.pool = rp_new(c->pool, "Config");
+ f.pool = rp_new(c->pool, &main_birdloop, "Config");
init_list(&f.symbols);
cf_read_hook = cli_cmd_read_hook;
cli_rh_pos = c->rx_buf;
@@ -308,7 +308,7 @@ cli_event(void *data)
cli *
cli_new(void *priv)
{
- pool *p = rp_new(cli_pool, "CLI");
+ pool *p = rp_new(cli_pool, &main_birdloop, "CLI");
cli *c = mb_alloc(p, sizeof(cli));
bzero(c, sizeof(cli));
@@ -413,7 +413,7 @@ cli_free(cli *c)
c->cleanup(c);
if (c == cmd_reconfig_stored_cli)
cmd_reconfig_stored_cli = NULL;
- rfree(c->pool);
+ rp_free(c->pool, &root_pool);
}
/**
@@ -425,7 +425,7 @@ cli_free(cli *c)
void
cli_init(void)
{
- cli_pool = rp_new(&root_pool, "CLI");
+ cli_pool = rp_new(&root_pool, &main_birdloop, "CLI");
init_list(&cli_log_hooks);
cli_log_inited = 1;
}
diff --git a/nest/cmds.c b/nest/cmds.c
index 1a16f9c7..77c92077 100644
--- a/nest/cmds.c
+++ b/nest/cmds.c
@@ -114,15 +114,10 @@ cmd_show_memory(void)
{
cli_msg(-1018, "BIRD memory usage");
cli_msg(-1018, "%-17s Effective Overhead", "");
- print_size("Routing tables:", rmemsize(rt_table_pool));
- print_size("Route attributes:", rmemsize(rta_pool));
- print_size("Protocols:", rmemsize(proto_pool));
- struct resmem total = rmemsize(&root_pool);
-#ifdef HAVE_MMAP
- print_size("Standby memory:", (struct resmem) { .overhead = get_page_size() * pages_kept });
- total.overhead += get_page_size() * pages_kept;
-#endif
- print_size("Total:", total);
+ print_size("Routing tables:", rp_memsize(rt_table_pool));
+ print_size("Route attributes:", rp_memsize(rta_pool));
+ print_size("Protocols:", rp_memsize(proto_pool));
+ print_size("Total:", rp_memsize(&root_pool));
cli_msg(0, "");
}
diff --git a/nest/config.Y b/nest/config.Y
index 7ead8589..f9ed0e69 100644
--- a/nest/config.Y
+++ b/nest/config.Y
@@ -111,7 +111,7 @@ proto_postconfig(void)
CF_DECLS
-CF_KEYWORDS(ROUTER, ID, HOSTNAME, PROTOCOL, TEMPLATE, PREFERENCE, DISABLED, DEBUG, ALL, OFF, DIRECT)
+CF_KEYWORDS(ROUTER, ID, HOSTNAME, PROTOCOL, TEMPLATE, PREFERENCE, DISABLED, DEBUG, ALL, OFF, DIRECT, PIPE)
CF_KEYWORDS(INTERFACE, IMPORT, EXPORT, FILTER, NONE, VRF, DEFAULT, TABLE, STATES, ROUTES, FILTERS)
CF_KEYWORDS(IPV4, IPV6, VPN4, VPN6, ROA4, ROA6, FLOW4, FLOW6, SADR, MPLS)
CF_KEYWORDS(RECEIVE, LIMIT, ACTION, WARN, BLOCK, RESTART, DISABLE, KEEP, FILTERED, RPKI)
@@ -128,7 +128,7 @@ CF_KEYWORDS(CHECK, LINK)
/* For r_args_channel */
CF_KEYWORDS(IPV4, IPV4_MC, IPV4_MPLS, IPV6, IPV6_MC, IPV6_MPLS, IPV6_SADR, VPN4, VPN4_MC, VPN4_MPLS, VPN6, VPN6_MC, VPN6_MPLS, ROA4, ROA6, FLOW4, FLOW6, MPLS, PRI, SEC)
-CF_ENUM(T_ENUM_RTS, RTS_, DUMMY, STATIC, INHERIT, DEVICE, STATIC_DEVICE, REDIRECT,
+CF_ENUM(T_ENUM_RTS, RTS_, STATIC, INHERIT, DEVICE, STATIC_DEVICE, REDIRECT,
RIP, OSPF, OSPF_IA, OSPF_EXT1, OSPF_EXT2, BGP, PIPE, BABEL)
CF_ENUM(T_ENUM_SCOPE, SCOPE_, HOST, LINK, SITE, ORGANIZATION, UNIVERSE, UNDEFINED)
CF_ENUM(T_ENUM_RTD, RTD_, UNICAST, BLACKHOLE, UNREACHABLE, PROHIBIT)
@@ -266,8 +266,8 @@ proto_item:
| MRTDUMP mrtdump_mask { this_proto->mrtdump = $2; }
| ROUTER ID idval { this_proto->router_id = $3; }
| DESCRIPTION text { this_proto->dsc = $2; }
- | VRF text { this_proto->vrf = if_get_by_name($2); this_proto->vrf_set = 1; }
- | VRF DEFAULT { this_proto->vrf = NULL; this_proto->vrf_set = 1; }
+ | VRF text { this_proto->vrf = if_get_by_name($2); }
+ | VRF DEFAULT { this_proto->vrf = &default_vrf; }
;
@@ -348,6 +348,7 @@ debug_default:
DEBUG PROTOCOLS debug_mask { new_config->proto_default_debug = $3; }
| DEBUG CHANNELS debug_mask { new_config->channel_default_debug = $3; }
| DEBUG COMMANDS expr { new_config->cli_debug = $3; }
+ | DEBUG TABLES bool { new_config->table_debug = $3; }
;
/* MRTDUMP PROTOCOLS is in systep/unix/config.Y */
@@ -433,6 +434,7 @@ proto: dev_proto '}' ;
dev_proto_start: proto_start DIRECT {
this_proto = proto_config_new(&proto_device, $1);
init_list(&DIRECT_CFG->iface_list);
+ this_proto->late_if_feed = 1;
}
;
@@ -643,12 +645,12 @@ r_args:
}
| r_args IMPORT TABLE channel_arg {
if (!$4->in_table) cf_error("No import table in channel %s.%s", $4->proto->name, $4->name);
- rt_show_add_table($$, $4->in_table);
+ rt_show_add_table($$, $4->in_table->tab);
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args EXPORT TABLE channel_arg {
if (!$4->out_table) cf_error("No export table in channel %s.%s", $4->proto->name, $4->name);
- rt_show_add_table($$, $4->out_table);
+ rt_show_add_table($$, $4->out_table->tab);
$$->tables_defined_by = RSD_TDB_DIRECT;
}
| r_args FILTER filter {
@@ -808,7 +810,7 @@ sym_args:
CF_CLI_HELP(DUMP, ..., [[Dump debugging information]])
CF_CLI(DUMP RESOURCES,,, [[Dump all allocated resource]])
-{ rdump(&root_pool); cli_msg(0, ""); } ;
+{ rp_dump(&root_pool); cli_msg(0, ""); } ;
CF_CLI(DUMP SOCKETS,,, [[Dump open sockets]])
{ sk_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP EVENTS,,, [[Dump event log]])
@@ -819,8 +821,10 @@ CF_CLI(DUMP NEIGHBORS,,, [[Dump neighbor cache]])
{ neigh_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ATTRIBUTES,,, [[Dump attribute cache]])
{ rta_dump_all(); cli_msg(0, ""); } ;
-CF_CLI(DUMP ROUTES,,, [[Dump routing table]])
+CF_CLI(DUMP ROUTES,,, [[Dump routes]])
{ rt_dump_all(); cli_msg(0, ""); } ;
+CF_CLI(DUMP TABLES,,, [[Dump table connections]])
+{ rt_dump_hooks_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP PROTOCOLS,,, [[Dump protocol information]])
{ protos_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
diff --git a/nest/iface.c b/nest/iface.c
index 682340c5..78f4eb0f 100644
--- a/nest/iface.c
+++ b/nest/iface.c
@@ -36,9 +36,12 @@
static pool *if_pool;
-list iface_list;
+DOMAIN(attrs) iface_domain;
+list global_iface_list;
+struct iface default_vrf;
static void if_recalc_preferred(struct iface *i);
+static void ifa_delete_locked(struct ifa *a);
/**
* ifa_dump - dump interface address
@@ -49,6 +52,7 @@ static void if_recalc_preferred(struct iface *i);
void
ifa_dump(struct ifa *a)
{
+ IFACE_LEGACY_ACCESS;
debug("\t%I, net %N bc %I -> %I%s%s%s%s\n", a->ip, &a->prefix, a->brd, a->opposite,
(a->flags & IA_PRIMARY) ? " PRIMARY" : "",
(a->flags & IA_SECONDARY) ? " SEC" : "",
@@ -68,6 +72,7 @@ if_dump(struct iface *i)
{
struct ifa *a;
+ IFACE_LEGACY_ACCESS;
debug("IF%d: %s", i->index, i->name);
if (i->flags & IF_SHUTDOWN)
debug(" SHUTDOWN");
@@ -109,8 +114,9 @@ if_dump_all(void)
{
struct iface *i;
+ IFACE_LEGACY_ACCESS;
debug("Known network interfaces:\n");
- WALK_LIST(i, iface_list)
+ WALK_LIST(i, global_iface_list)
if_dump(i);
debug("Router ID: %08x\n", config->router_id);
}
@@ -147,7 +153,7 @@ ifa_send_notify(struct proto *p, unsigned c, struct ifa *a)
{
if (p->ifa_notify &&
(p->proto_state != PS_DOWN) &&
- (!p->vrf_set || p->vrf == a->iface->master))
+ (!p->vrf || p->vrf == a->iface->master))
{
if (p->debug & D_IFACES)
log(L_TRACE "%s < address %N on interface %s %s",
@@ -165,7 +171,8 @@ ifa_notify_change_(unsigned c, struct ifa *a)
DBG("IFA change notification (%x) for %s:%I\n", c, a->iface->name, a->ip);
WALK_LIST(p, proto_list)
- ifa_send_notify(p, c, a);
+ PROTO_LOCKED_FROM_MAIN(p)
+ ifa_send_notify(p, c, a);
}
static inline void
@@ -185,7 +192,7 @@ if_send_notify(struct proto *p, unsigned c, struct iface *i)
{
if (p->if_notify &&
(p->proto_state != PS_DOWN) &&
- (!p->vrf_set || p->vrf == i->master))
+ (!p->vrf || p->vrf == i->master))
{
if (p->debug & D_IFACES)
log(L_TRACE "%s < interface %s %s", p->name, i->name,
@@ -225,7 +232,8 @@ if_notify_change(unsigned c, struct iface *i)
ifa_notify_change_(IF_CHANGE_DOWN, a);
WALK_LIST(p, proto_list)
- if_send_notify(p, c, i);
+ PROTO_LOCKED_FROM_MAIN(p)
+ if_send_notify(p, c, i);
if (c & IF_CHANGE_UP)
WALK_LIST(a, i->addrs)
@@ -243,7 +251,7 @@ if_recalc_flags(struct iface *i UNUSED, uint flags)
{
if ((flags & IF_ADMIN_UP) &&
!(flags & (IF_SHUTDOWN | IF_TMP_DOWN)) &&
- !(i->master_index && !i->master))
+ !(i->master_index && i->master == &default_vrf))
flags |= IF_UP;
else
flags &= ~IF_UP;
@@ -301,7 +309,13 @@ if_update(struct iface *new)
struct iface *i;
unsigned c;
- WALK_LIST(i, iface_list)
+ if (!new->master)
+ new->master = &default_vrf;
+
+ IFACE_LEGACY_ACCESS;
+ IFACE_LOCK;
+
+ WALK_LIST(i, global_iface_list)
if (!strcmp(new->name, i->name))
{
new->flags = if_recalc_flags(new, new->flags);
@@ -322,6 +336,8 @@ if_update(struct iface *new)
}
if_copy(i, new);
+ IFACE_UNLOCK;
+
if (c)
if_notify_change(c, i);
@@ -334,7 +350,9 @@ if_update(struct iface *new)
newif:
init_list(&i->neighbors);
i->flags |= IF_UPDATED | IF_TMP_DOWN; /* Tmp down as we don't have addresses yet */
- add_tail(&iface_list, &i->n);
+ add_tail(&global_iface_list, &i->n);
+ IFACE_UNLOCK;
+
return i;
}
@@ -344,7 +362,8 @@ if_start_update(void)
struct iface *i;
struct ifa *a;
- WALK_LIST(i, iface_list)
+ IFACE_LEGACY_ACCESS;
+ WALK_LIST(i, global_iface_list)
{
i->flags &= ~IF_UPDATED;
WALK_LIST(a, i->addrs)
@@ -355,6 +374,8 @@ if_start_update(void)
void
if_end_partial_update(struct iface *i)
{
+ IFACE_LEGACY_ACCESS;
+
if (i->flags & IF_NEEDS_RECALC)
if_recalc_preferred(i);
@@ -368,7 +389,8 @@ if_end_update(void)
struct iface *i;
struct ifa *a, *b;
- WALK_LIST(i, iface_list)
+ IFACE_LEGACY_ACCESS;
+ WALK_LIST(i, global_iface_list)
{
if (!(i->flags & IF_UPDATED))
if_change_flags(i, (i->flags & ~IF_ADMIN_UP) | IF_SHUTDOWN);
@@ -376,7 +398,11 @@ if_end_update(void)
{
WALK_LIST_DELSAFE(a, b, i->addrs)
if (!(a->flags & IA_UPDATED))
- ifa_delete(a);
+ {
+ IFACE_LOCK;
+ ifa_delete_locked(a);
+ IFACE_UNLOCK;
+ }
if_end_partial_update(i);
}
}
@@ -385,6 +411,7 @@ if_end_update(void)
void
if_flush_ifaces(struct proto *p)
{
+ IFACE_LEGACY_ACCESS;
if (p->debug & D_EVENTS)
log(L_TRACE "%s: Flushing interfaces", p->name);
if_start_update();
@@ -404,10 +431,12 @@ if_feed_baby(struct proto *p)
struct iface *i;
struct ifa *a;
+ IFACE_LEGACY_ACCESS;
+
if (!p->if_notify && !p->ifa_notify) /* shortcut */
return;
DBG("Announcing interfaces to new protocol %s\n", p->name);
- WALK_LIST(i, iface_list)
+ WALK_LIST(i, global_iface_list)
{
if_send_notify(p, IF_CHANGE_CREATE | ((i->flags & IF_UP) ? IF_CHANGE_UP : 0), i);
if (i->flags & IF_UP)
@@ -429,9 +458,15 @@ if_find_by_index(unsigned idx)
{
struct iface *i;
- WALK_LIST(i, iface_list)
+ IFACE_LOCK;
+ WALK_LIST(i, global_iface_list)
if (i->index == idx && !(i->flags & IF_SHUTDOWN))
+ {
+ IFACE_UNLOCK;
return i;
+ }
+
+ IFACE_UNLOCK;
return NULL;
}
@@ -448,9 +483,15 @@ if_find_by_name(const char *name)
{
struct iface *i;
- WALK_LIST(i, iface_list)
+ IFACE_LOCK;
+ WALK_LIST(i, global_iface_list)
if (!strcmp(i->name, name) && !(i->flags & IF_SHUTDOWN))
+ {
+ IFACE_UNLOCK;
return i;
+ }
+
+ IFACE_UNLOCK;
return NULL;
}
@@ -459,17 +500,21 @@ if_get_by_name(const char *name)
{
struct iface *i;
- WALK_LIST(i, iface_list)
+ IFACE_LEGACY_ACCESS;
+
+ WALK_LIST(i, global_iface_list)
if (!strcmp(i->name, name))
return i;
/* No active iface, create a dummy */
+ IFACE_LOCK;
i = mb_allocz(if_pool, sizeof(struct iface));
strncpy(i->name, name, sizeof(i->name)-1);
i->flags = IF_SHUTDOWN;
init_list(&i->addrs);
init_list(&i->neighbors);
- add_tail(&iface_list, &i->n);
+ add_tail(&global_iface_list, &i->n);
+ IFACE_UNLOCK;
return i;
}
@@ -555,7 +600,9 @@ if_recalc_all_preferred_addresses(void)
{
struct iface *i;
- WALK_LIST(i, iface_list)
+ IFACE_LEGACY_ACCESS;
+
+ WALK_LIST(i, global_iface_list)
{
if_recalc_preferred(i);
@@ -585,6 +632,8 @@ ifa_update(struct ifa *a)
struct iface *i = a->iface;
struct ifa *b;
+ IFACE_LEGACY_ACCESS;
+
WALK_LIST(b, i->addrs)
if (ifa_same(b, a))
{
@@ -603,10 +652,12 @@ ifa_update(struct ifa *a)
if ((a->prefix.type == NET_IP4) && (i->flags & IF_BROADCAST) && ipa_zero(a->brd))
log(L_WARN "Missing broadcast address for interface %s", i->name);
+ IFACE_LOCK;
b = mb_alloc(if_pool, sizeof(struct ifa));
memcpy(b, a, sizeof(struct ifa));
add_tail(&i->addrs, &b->n);
b->flags |= IA_UPDATED;
+ IFACE_UNLOCK;
i->flags |= IF_NEEDS_RECALC;
if (i->flags & IF_UP)
@@ -631,6 +682,17 @@ ifa_delete(struct ifa *a)
WALK_LIST(b, i->addrs)
if (ifa_same(b, a))
{
+ IFACE_LOCK;
+ ifa_delete_locked(b);
+ IFACE_UNLOCK;
+ return;
+ }
+}
+
+static void
+ifa_delete_locked(struct ifa *b)
+{
+ struct iface *i = b->iface;
rem_node(&b->n);
if (b->flags & IA_PRIMARY)
@@ -653,7 +715,6 @@ ifa_delete(struct ifa *a)
mb_free(b);
return;
- }
}
u32
@@ -662,8 +723,10 @@ if_choose_router_id(struct iface_patt *mask, u32 old_id)
struct iface *i;
struct ifa *a, *b;
+ IFACE_LEGACY_ACCESS;
+
b = NULL;
- WALK_LIST(i, iface_list)
+ WALK_LIST(i, global_iface_list)
{
if (!(i->flags & IF_ADMIN_UP) ||
(i->flags & IF_SHUTDOWN))
@@ -709,8 +772,10 @@ if_choose_router_id(struct iface_patt *mask, u32 old_id)
void
if_init(void)
{
- if_pool = rp_new(&root_pool, "Interfaces");
- init_list(&iface_list);
+ iface_domain = DOMAIN_NEW(attrs, "Interfaces");
+ if_pool = rp_new(&root_pool, &main_birdloop, "Interfaces");
+ init_list(&global_iface_list);
+ strcpy(default_vrf.name, "default");
neigh_init(if_pool);
}
@@ -837,13 +902,15 @@ if_show(void)
struct ifa *a;
char *type;
- WALK_LIST(i, iface_list)
+ IFACE_LEGACY_ACCESS;
+
+ WALK_LIST(i, global_iface_list)
{
if (i->flags & IF_SHUTDOWN)
continue;
char mbuf[16 + sizeof(i->name)] = {};
- if (i->master)
+ if (i->master != &default_vrf)
bsprintf(mbuf, " master=%s", i->master->name);
else if (i->master_index)
bsprintf(mbuf, " master=#%u", i->master_index);
@@ -879,8 +946,10 @@ if_show_summary(void)
{
struct iface *i;
+ IFACE_LEGACY_ACCESS;
+
cli_msg(-2005, "%-10s %-6s %-18s %s", "Interface", "State", "IPv4 address", "IPv6 address");
- WALK_LIST(i, iface_list)
+ WALK_LIST(i, global_iface_list)
{
byte a4[IPA_MAX_TEXT_LENGTH + 17];
byte a6[IPA_MAX_TEXT_LENGTH + 17];
diff --git a/nest/iface.h b/nest/iface.h
index 1189cdd4..87ad86cf 100644
--- a/nest/iface.h
+++ b/nest/iface.h
@@ -9,10 +9,20 @@
#ifndef _BIRD_IFACE_H_
#define _BIRD_IFACE_H_
+#include "lib/event.h"
#include "lib/lists.h"
#include "lib/ip.h"
+#include "lib/locking.h"
-extern list iface_list;
+DEFINE_DOMAIN(attrs);
+extern list global_iface_list;
+extern DOMAIN(attrs) iface_domain;
+
+#define IFACE_LEGACY_ACCESS ASSERT_DIE(birdloop_inside(&main_birdloop))
+
+#define IFACE_LOCK LOCK_DOMAIN(attrs, iface_domain)
+#define IFACE_UNLOCK UNLOCK_DOMAIN(attrs, iface_domain)
+#define ASSERT_IFACE_LOCKED ASSERT_DIE(DOMAIN_IS_LOCKED(attrs, iface_domain))
struct proto;
struct pool;
@@ -28,6 +38,8 @@ struct ifa { /* Interface address */
unsigned flags; /* Analogous to iface->flags */
};
+extern struct iface default_vrf;
+
struct iface {
node n;
char name[16];
@@ -129,6 +141,7 @@ typedef struct neighbor {
struct ifa *ifa; /* Ifa on related iface */
struct iface *iface; /* Interface it's connected to */
struct iface *ifreq; /* Requested iface, NULL for any */
+ struct event event; /* Notification event */
struct proto *proto; /* Protocol this belongs to */
void *data; /* Protocol-specific data */
uint aux; /* Protocol-specific data */
@@ -140,13 +153,14 @@ typedef struct neighbor {
#define NEF_STICKY 1
#define NEF_ONLINK 2
#define NEF_IFACE 4 /* Entry for whole iface */
+#define NEF_NOTIFY_MAIN 0x100 /* Notify from main_birdloop context */
neighbor *neigh_find(struct proto *p, ip_addr a, struct iface *ifa, uint flags);
void neigh_dump(neighbor *);
void neigh_dump_all(void);
-void neigh_prune(void);
+void neigh_prune(struct proto *p);
void neigh_if_up(struct iface *);
void neigh_if_down(struct iface *);
void neigh_if_link(struct iface *);
diff --git a/nest/limit.h b/nest/limit.h
new file mode 100644
index 00000000..5838ad3b
--- /dev/null
+++ b/nest/limit.h
@@ -0,0 +1,49 @@
+/*
+ * BIRD Internet Routing Daemon -- Limits
+ *
+ * (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2021 Maria Matejka <mq@jmq.cz>
+ *
+ * Can be freely distributed and used under the terms of the GNU GPL.
+ */
+
+#ifndef _BIRD_LIMIT_H_
+#define _BIRD_LIMIT_H_
+
+struct limit {
+ u32 max;
+ u32 count;
+ int (*action)(struct limit *, void *data);
+};
+
+static inline int limit_do_action(struct limit *l, void *data)
+{
+ return l->action ? l->action(l, data) : 1;
+}
+
+static inline int limit_push(struct limit *l, void *data)
+{
+ if ((l->count >= l->max) && limit_do_action(l, data))
+ return 1;
+
+ l->count++;
+ return 0;
+}
+
+static inline void limit_pop(struct limit *l)
+{
+ --l->count;
+}
+
+static inline void limit_reset(struct limit *l)
+{
+ l->count = 0;
+}
+
+static inline void limit_update(struct limit *l, void *data, u32 max)
+{
+ if (l->count > (l->max = max))
+ limit_do_action(l, data);
+}
+
+#endif
diff --git a/nest/neighbor.c b/nest/neighbor.c
index 1a31fb79..7b951366 100644
--- a/nest/neighbor.c
+++ b/nest/neighbor.c
@@ -59,6 +59,9 @@
static slab *neigh_slab;
static list neigh_hash_table[NEIGH_HASH_SIZE], sticky_neigh_list;
+static void neigh_do_notify(void *);
+static void neigh_do_notify_main(void *);
+static void neigh_free(neighbor *n);
static inline uint
neigh_hash(struct proto *p, ip_addr a, struct iface *i)
@@ -142,7 +145,7 @@ if_connected(ip_addr a, struct iface *i, struct ifa **ap, uint flags)
}
static inline int
-if_connected_any(ip_addr a, struct iface *vrf, uint vrf_set, struct iface **iface, struct ifa **addr, uint flags)
+if_connected_any(ip_addr a, struct iface *vrf, struct iface **iface, struct ifa **addr, uint flags)
{
struct iface *i;
struct ifa *b;
@@ -152,8 +155,8 @@ if_connected_any(ip_addr a, struct iface *vrf, uint vrf_set, struct iface **ifac
*addr = NULL;
/* Prefer SCOPE_HOST or longer prefix */
- WALK_LIST(i, iface_list)
- if ((!vrf_set || vrf == i->master) && ((s = if_connected(a, i, &b, flags)) >= 0))
+ WALK_LIST(i, global_iface_list)
+ if ((!vrf || vrf == i->master) && ((s = if_connected(a, i, &b, flags)) >= 0))
if (scope_better(s, scope) || (scope_remote(s, scope) && ifa_better(b, *addr)))
{
*iface = i;
@@ -216,28 +219,34 @@ neigh_find(struct proto *p, ip_addr a, struct iface *iface, uint flags)
struct iface *ifreq = iface;
struct ifa *addr = NULL;
+ IFACE_LOCK;
WALK_LIST(n, neigh_hash_table[h]) /* Search the cache */
if ((n->proto == p) && ipa_equal(n->addr, a) && (n->ifreq == iface))
+ {
+ IFACE_UNLOCK;
return n;
+ }
+
+#define NOT_FOUND goto not_found
if (flags & NEF_IFACE)
{
if (ipa_nonzero(a) || !iface)
- return NULL;
+ NOT_FOUND;
}
else
{
class = ipa_classify(a);
if (class < 0) /* Invalid address */
- return NULL;
+ NOT_FOUND;
if (((class & IADDR_SCOPE_MASK) == SCOPE_HOST) ||
(((class & IADDR_SCOPE_MASK) == SCOPE_LINK) && !iface) ||
!(class & IADDR_HOST))
- return NULL; /* Bad scope or a somecast */
+ NOT_FOUND; /* Bad scope or a somecast */
}
if ((flags & NEF_ONLINK) && !iface)
- return NULL;
+ NOT_FOUND;
if (iface)
{
@@ -245,13 +254,13 @@ neigh_find(struct proto *p, ip_addr a, struct iface *iface, uint flags)
iface = (scope < 0) ? NULL : iface;
}
else
- scope = if_connected_any(a, p->vrf, p->vrf_set, &iface, &addr, flags);
+ scope = if_connected_any(a, p->vrf, &iface, &addr, flags);
/* scope < 0 means i don't know neighbor */
/* scope >= 0 <=> iface != NULL */
if ((scope < 0) && !(flags & NEF_STICKY))
- return NULL;
+ NOT_FOUND;
n = sl_allocz(neigh_slab);
add_tail(&neigh_hash_table[h], &n->n);
@@ -264,7 +273,36 @@ neigh_find(struct proto *p, ip_addr a, struct iface *iface, uint flags)
n->flags = flags;
n->scope = scope;
+ ASSERT_DIE(birdloop_inside(p->loop));
+
+ if (flags & NEF_NOTIFY_MAIN)
+ n->event = (event) {
+ .hook = neigh_do_notify_main,
+ .data = n,
+ .list = &global_event_list,
+ };
+ else if (p->loop == &main_birdloop)
+ n->event = (event) {
+ .hook = neigh_do_notify,
+ .data = n,
+ .list = &global_event_list,
+ };
+ else
+ {
+ birdloop_link(p->loop);
+ n->event = (event) {
+ .hook = neigh_do_notify,
+ .data = n,
+ .list = birdloop_event_list(p->loop),
+ };
+ }
+
+ IFACE_UNLOCK;
return n;
+
+not_found:
+ IFACE_UNLOCK;
+ return NULL;
}
/**
@@ -298,18 +336,46 @@ neigh_dump_all(void)
neighbor *n;
int i;
+ IFACE_LOCK;
+
debug("Known neighbors:\n");
for(i=0; i<NEIGH_HASH_SIZE; i++)
WALK_LIST(n, neigh_hash_table[i])
neigh_dump(n);
debug("\n");
+
+ IFACE_UNLOCK;
}
static inline void
neigh_notify(neighbor *n)
{
- if (n->proto->neigh_notify && (n->proto->proto_state != PS_STOP))
+ if (!n->proto->neigh_notify)
+ return;
+
+ ev_send(n->event.list, &n->event);
+}
+
+static void
+neigh_do_notify_main(void *data)
+{
+ neighbor *n = data;
+ PROTO_LOCKED_FROM_MAIN(n->proto)
+ neigh_do_notify(data);
+}
+
+static void
+neigh_do_notify(void *data)
+{
+ neighbor *n = data;
+
+ ASSERT_DIE(birdloop_inside(n->proto->loop));
+
+ if (n->proto->proto_state != PS_STOP)
n->proto->neigh_notify(n);
+
+ if ((n->scope < 0) && !(n->flags & NEF_STICKY))
+ neigh_free(n);
}
static void
@@ -340,11 +406,21 @@ neigh_down(neighbor *n)
neigh_notify(n);
}
-static inline void
+static void
neigh_free(neighbor *n)
{
+ ASSERT_DIE(birdloop_inside(n->proto->loop));
+
+ if (n->flags & NEF_NOTIFY_MAIN)
+ ASSERT_DIE(birdloop_inside(&main_birdloop));
+
rem_node(&n->n);
rem_node(&n->if_n);
+
+ if (n->event.list != &global_event_list)
+ birdloop_unlink(n->proto->loop);
+
+ ev_postpone(&n->event);
sl_free(neigh_slab, n);
}
@@ -360,6 +436,8 @@ neigh_free(neighbor *n)
void
neigh_update(neighbor *n, struct iface *iface)
{
+ ASSERT_IFACE_LOCKED;
+
struct proto *p = n->proto;
struct ifa *ifa = NULL;
int scope = -1;
@@ -369,7 +447,7 @@ neigh_update(neighbor *n, struct iface *iface)
return;
/* VRF-bound neighbors ignore changes in other VRFs */
- if (p->vrf_set && (p->vrf != iface->master))
+ if (p->vrf && (p->vrf != iface->master))
return;
scope = if_connected(n->addr, iface, &ifa, n->flags);
@@ -379,7 +457,7 @@ neigh_update(neighbor *n, struct iface *iface)
{
/* When neighbor is going down, try to respawn it on other ifaces */
if ((scope < 0) && (n->scope >= 0) && !n->ifreq && (n->flags & NEF_STICKY))
- scope = if_connected_any(n->addr, p->vrf, p->vrf_set, &iface, &ifa, n->flags);
+ scope = if_connected_any(n->addr, p->vrf, &iface, &ifa, n->flags);
}
else
{
@@ -406,12 +484,6 @@ neigh_update(neighbor *n, struct iface *iface)
if (n->scope >= 0)
neigh_down(n);
- if ((n->scope < 0) && !(n->flags & NEF_STICKY))
- {
- neigh_free(n);
- return;
- }
-
if (scope >= 0)
neigh_up(n, iface, ifa, scope);
}
@@ -433,14 +505,18 @@ neigh_if_up(struct iface *i)
neighbor *n;
node *x, *y;
+ IFACE_LOCK;
+
/* Update neighbors that might be better off with the new iface */
- WALK_LIST(ii, iface_list)
+ WALK_LIST(ii, global_iface_list)
if (!EMPTY_LIST(ii->neighbors) && (ii != i) && if_intersect(i, ii))
WALK_LIST2_DELSAFE(n, x, y, ii->neighbors, if_n)
neigh_update(n, i);
WALK_LIST2_DELSAFE(n, x, y, sticky_neigh_list, if_n)
neigh_update(n, i);
+
+ IFACE_UNLOCK;
}
/**
@@ -457,8 +533,12 @@ neigh_if_down(struct iface *i)
neighbor *n;
node *x, *y;
+ IFACE_LOCK;
+
WALK_LIST2_DELSAFE(n, x, y, i->neighbors, if_n)
neigh_update(n, i);
+
+ IFACE_UNLOCK;
}
/**
@@ -474,8 +554,12 @@ neigh_if_link(struct iface *i)
neighbor *n;
node *x, *y;
+ IFACE_LOCK;
+
WALK_LIST2_DELSAFE(n, x, y, i->neighbors, if_n)
neigh_notify(n);
+
+ IFACE_UNLOCK;
}
/**
@@ -495,8 +579,10 @@ neigh_ifa_up(struct ifa *a)
neighbor *n;
node *x, *y;
+ IFACE_LOCK;
+
/* Update neighbors that might be better off with the new ifa */
- WALK_LIST(ii, iface_list)
+ WALK_LIST(ii, global_iface_list)
if (!EMPTY_LIST(ii->neighbors) && ifa_intersect(a, ii))
WALK_LIST2_DELSAFE(n, x, y, ii->neighbors, if_n)
neigh_update(n, i);
@@ -504,6 +590,8 @@ neigh_ifa_up(struct ifa *a)
/* Wake up all sticky neighbors that are reachable now */
WALK_LIST2_DELSAFE(n, x, y, sticky_neigh_list, if_n)
neigh_update(n, i);
+
+ IFACE_UNLOCK;
}
void
@@ -513,10 +601,14 @@ neigh_ifa_down(struct ifa *a)
neighbor *n;
node *x, *y;
+ IFACE_LOCK;
+
/* Update all neighbors whose scope has changed */
WALK_LIST2_DELSAFE(n, x, y, i->neighbors, if_n)
if (n->ifa == a)
neigh_update(n, i);
+
+ IFACE_UNLOCK;
}
static inline void
@@ -536,16 +628,21 @@ neigh_prune_one(neighbor *n)
* is shut down to get rid of all its heritage.
*/
void
-neigh_prune(void)
+neigh_prune(struct proto *p)
{
neighbor *n;
node *m;
int i;
+ IFACE_LOCK;
+
DBG("Pruning neighbors\n");
for(i=0; i<NEIGH_HASH_SIZE; i++)
WALK_LIST_DELSAFE(n, m, neigh_hash_table[i])
- neigh_prune_one(n);
+ if (n->proto == p)
+ neigh_prune_one(n);
+
+ IFACE_UNLOCK;
}
/**
diff --git a/nest/proto-hooks.c b/nest/proto-hooks.c
index bc88b4b4..716ce86c 100644
--- a/nest/proto-hooks.c
+++ b/nest/proto-hooks.c
@@ -76,16 +76,6 @@ void dump(struct proto *p)
{ DUMMY; }
/**
- * dump_attrs - dump protocol-dependent attributes
- * @e: a route entry
- *
- * This hook dumps all attributes in the &rte which belong to this
- * protocol to the debug output.
- */
-void dump_attrs(rte *e)
-{ DUMMY; }
-
-/**
* start - request instance startup
* @p: protocol instance
*
@@ -228,36 +218,6 @@ void neigh_notify(neighbor *neigh)
{ DUMMY; }
/**
- * make_tmp_attrs - convert embedded attributes to temporary ones
- * @e: route entry
- * @pool: linear pool to allocate attribute memory in
- *
- * This hook is called by the routing table functions if they need
- * to convert the protocol attributes embedded directly in the &rte
- * to temporary extended attributes in order to distribute them
- * to other protocols or to filters. make_tmp_attrs() creates
- * an &ea_list in the linear pool @pool, fills it with values of the
- * temporary attributes and returns a pointer to it.
- */
-ea_list *make_tmp_attrs(rte *e, struct linpool *pool)
-{ DUMMY; }
-
-/**
- * store_tmp_attrs - convert temporary attributes to embedded ones
- * @e: route entry
- * @attrs: temporary attributes to be converted
- *
- * This hook is an exact opposite of make_tmp_attrs() -- it takes
- * a list of extended attributes and converts them to attributes
- * embedded in the &rte corresponding to this protocol.
- *
- * You must be prepared for any of the attributes being missing
- * from the list and use default values instead.
- */
-void store_tmp_attrs(rte *e, ea_list *attrs)
-{ DUMMY; }
-
-/**
* preexport - pre-filtering decisions before route export
* @p: protocol instance the route is going to be exported to
* @e: the route in question
diff --git a/nest/proto.c b/nest/proto.c
index 31ee1fa1..2546e812 100644
--- a/nest/proto.c
+++ b/nest/proto.c
@@ -15,6 +15,7 @@
#include "lib/event.h"
#include "lib/timer.h"
#include "lib/string.h"
+#include "lib/coro.h"
#include "conf/conf.h"
#include "nest/route.h"
#include "nest/iface.h"
@@ -31,7 +32,6 @@ struct protocol *class_to_protocol[PROTOCOL__MAX];
#define CD(c, msg, args...) ({ if (c->debug & D_STATES) log(L_TRACE "%s.%s: " msg, c->proto->name, c->name ?: "?", ## args); })
#define PD(p, msg, args...) ({ if (p->debug & D_STATES) log(L_TRACE "%s: " msg, p->name, ## args); })
-static timer *proto_shutdown_timer;
static timer *gr_wait_timer;
#define GRS_NONE 0
@@ -43,24 +43,34 @@ static int graceful_restart_state;
static u32 graceful_restart_locks;
static char *p_states[] = { "DOWN", "START", "UP", "STOP" };
-static char *c_states[] = { "DOWN", "START", "UP", "FLUSHING" };
-static char *e_states[] = { "DOWN", "FEEDING", "READY" };
+static char *c_states[] = { "DOWN", "START", "UP", "STOP", "RESTART" };
extern struct protocol proto_unix_iface;
-static void channel_request_reload(struct channel *c);
-static void proto_shutdown_loop(timer *);
+static void channel_aux_request_refeed(struct channel_aux_table *cat);
static void proto_rethink_goal(struct proto *p);
static char *proto_state_name(struct proto *p);
-static void channel_verify_limits(struct channel *c);
-static inline void channel_reset_limit(struct channel_limit *l);
-
+static void channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
+static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
+static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
+static void channel_feed_end(struct channel *c);
+static void channel_export_stopped(struct rt_export_request *req);
static inline int proto_is_done(struct proto *p)
-{ return (p->proto_state == PS_DOWN) && (p->active_channels == 0); }
+{ return (p->proto_state == PS_DOWN) && proto_is_inactive(p); }
+
+static inline event_list *proto_event_list(struct proto *p)
+{ return p->loop == &main_birdloop ? &global_event_list : birdloop_event_list(p->loop); }
+
+static inline event_list *proto_work_list(struct proto *p)
+{ return p->loop == &main_birdloop ? &global_work_list : birdloop_event_list(p->loop); }
+
+static inline void proto_send_event(struct proto *p)
+{ ev_send(proto_event_list(p), p->event); }
+
static inline int channel_is_active(struct channel *c)
-{ return (c->channel_state == CS_START) || (c->channel_state == CS_UP); }
+{ return (c->channel_state != CS_DOWN); }
static inline int channel_reloadable(struct channel *c)
{ return c->proto->reload_routes && c->reloadable; }
@@ -68,10 +78,48 @@ static inline int channel_reloadable(struct channel *c)
static inline void
channel_log_state_change(struct channel *c)
{
- if (c->export_state)
- CD(c, "State changed to %s/%s", c_states[c->channel_state], e_states[c->export_state]);
- else
- CD(c, "State changed to %s", c_states[c->channel_state]);
+ CD(c, "State changed to %s", c_states[c->channel_state]);
+}
+
+void
+channel_import_log_state_change(struct rt_import_request *req, u8 state)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ CD(c, "Channel import state changed to %s", rt_import_state_name(state));
+}
+
+void
+channel_export_log_state_change(struct rt_export_request *req, u8 state)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ CD(c, "Channel export state changed to %s", rt_export_state_name(state));
+
+ switch (state)
+ {
+ case TES_FEEDING:
+ if (c->out_table)
+ rt_refresh_begin(&c->out_table->push);
+ else if (c->proto->feed_begin)
+ c->proto->feed_begin(c, !c->refeeding);
+ break;
+ case TES_READY:
+ channel_feed_end(c);
+ break;
+ }
+}
+
+static void
+channel_dump_import_req(struct rt_import_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ debug(" Channel %s.%s import request %p\n", c->proto->name, c->name, req);
+}
+
+static void
+channel_dump_export_req(struct rt_export_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
}
static void
@@ -111,7 +159,7 @@ proto_cf_find_channel(struct proto_config *pc, uint net_type)
* Returns pointer to channel or NULL
*/
struct channel *
-proto_find_channel_by_table(struct proto *p, struct rtable *t)
+proto_find_channel_by_table(struct proto *p, rtable *t)
{
struct channel *c;
@@ -141,6 +189,16 @@ proto_find_channel_by_name(struct proto *p, const char *n)
return NULL;
}
+rte * channel_preimport(struct rt_import_request *req, rte *new, rte *old);
+rte * channel_in_preimport(struct rt_import_request *req, rte *new, rte *old);
+
+void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+
+
/**
* proto_add_channel - connect protocol to a routing table
* @p: protocol instance
@@ -166,11 +224,17 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->proto = p;
c->table = cf->table->table;
+ RT_LOCKED(c->table, t)
+ rt_lock_table(t);
+
c->in_filter = cf->in_filter;
c->out_filter = cf->out_filter;
- c->rx_limit = cf->rx_limit;
- c->in_limit = cf->in_limit;
- c->out_limit = cf->out_limit;
+
+ channel_init_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
+ channel_init_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
+ channel_init_limit(c, &c->out_limit, PLD_OUT, &cf->out_limit);
+
+ c->rte_update_pool = lp_new_default(proto_pool);
c->net_type = cf->net_type;
c->ra_mode = cf->ra_mode;
@@ -181,7 +245,6 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->rpki_reload = cf->rpki_reload;
c->channel_state = CS_DOWN;
- c->export_state = ES_DOWN;
c->last_state_change = current_time();
c->reloadable = 1;
@@ -203,6 +266,9 @@ proto_remove_channel(struct proto *p UNUSED, struct channel *c)
CD(c, "Removed", c->name);
+ RT_LOCKED(c->table, t)
+ rt_unlock_table(t);
+
rem_node(&c->n);
mb_free(c);
}
@@ -223,7 +289,7 @@ proto_pause_channels(struct proto *p)
struct channel *c;
WALK_LIST(c, p->channels)
if (!c->disabled && channel_is_active(c))
- channel_set_state(c, CS_START);
+ channel_set_state(c, CS_PAUSE);
}
static void
@@ -232,7 +298,7 @@ proto_stop_channels(struct proto *p)
struct channel *c;
WALK_LIST(c, p->channels)
if (!c->disabled && channel_is_active(c))
- channel_set_state(c, CS_FLUSHING);
+ channel_set_state(c, CS_STOP);
}
static void
@@ -244,96 +310,25 @@ proto_remove_channels(struct proto *p)
}
static void
-channel_schedule_feed(struct channel *c, int initial)
-{
- // DBG("%s: Scheduling meal\n", p->name);
- ASSERT(c->channel_state == CS_UP);
-
- c->export_state = ES_FEEDING;
- c->refeeding = !initial;
-
- ev_schedule_work(c->feed_event);
-}
-
-static void
-channel_feed_loop(void *ptr)
-{
- struct channel *c = ptr;
-
- if (c->export_state != ES_FEEDING)
- return;
-
- /* Start feeding */
- if (!c->feed_active)
- {
- if (c->proto->feed_begin)
- c->proto->feed_begin(c, !c->refeeding);
-
- c->refeed_pending = 0;
- }
-
- // DBG("Feeding protocol %s continued\n", p->name);
- if (!rt_feed_channel(c))
- {
- ev_schedule_work(c->feed_event);
- return;
- }
-
- /* Reset export limit if the feed ended with acceptable number of exported routes */
- struct channel_limit *l = &c->out_limit;
- if (c->refeeding &&
- (l->state == PLS_BLOCKED) &&
- (c->refeed_count <= l->limit) &&
- (c->stats.exp_routes <= l->limit))
- {
- log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, l->limit);
- channel_reset_limit(&c->out_limit);
-
- /* Continue in feed - it will process routing table again from beginning */
- c->refeed_count = 0;
- ev_schedule_work(c->feed_event);
- return;
- }
-
- // DBG("Feeding protocol %s finished\n", p->name);
- c->export_state = ES_READY;
- channel_log_state_change(c);
-
- if (c->proto->feed_end)
- c->proto->feed_end(c);
-
- /* Restart feeding */
- if (c->refeed_pending)
- channel_request_feeding(c);
-}
-
-
-static void
-channel_roa_in_changed(struct rt_subscription *s)
+channel_roa_in_changed(void *_data)
{
- struct channel *c = s->data;
- int active = c->reload_event && ev_active(c->reload_event);
+ struct channel *c = _data;
- CD(c, "Reload triggered by RPKI change%s", active ? " - already active" : "");
+ CD(c, "Reload triggered by RPKI change");
- if (!active)
- channel_request_reload(c);
- else
- c->reload_pending = 1;
+ channel_request_reload(c);
}
static void
-channel_roa_out_changed(struct rt_subscription *s)
+channel_roa_out_changed(void *_data)
{
- struct channel *c = s->data;
- int active = (c->export_state == ES_FEEDING);
+ struct channel *c = _data;
+ CD(c, "Feeding triggered by RPKI change");
- CD(c, "Feeding triggered by RPKI change%s", active ? " - already active" : "");
+ c->refeed_pending = 1;
- if (!active)
- channel_request_feeding(c);
- else
- c->refeed_pending = 1;
+ if (c->out_req.hook)
+ rt_stop_export(&c->out_req, channel_export_stopped);
}
/* Temporary code, subscriptions should be changed to resources */
@@ -345,14 +340,14 @@ struct roa_subscription {
static int
channel_roa_is_subscribed(struct channel *c, rtable *tab, int dir)
{
- void (*hook)(struct rt_subscription *) =
+ void (*hook)(void *) =
dir ? channel_roa_in_changed : channel_roa_out_changed;
struct roa_subscription *s;
node *n;
WALK_LIST2(s, n, c->roa_subscriptions, roa_node)
- if ((s->s.tab == tab) && (s->s.hook == hook))
+ if ((s->s.tab == tab) && (s->s.event->hook == hook))
return 1;
return 0;
@@ -366,9 +361,9 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir)
return;
struct roa_subscription *s = mb_allocz(c->proto->pool, sizeof(struct roa_subscription));
+ s->s.event = ev_new_init(c->proto->pool, dir ? channel_roa_in_changed : channel_roa_out_changed, c);
+ s->s.event->list = proto_work_list(c->proto);
- s->s.hook = dir ? channel_roa_in_changed : channel_roa_out_changed;
- s->s.data = c;
rt_subscribe(tab, &s->s);
add_tail(&c->roa_subscriptions, &s->roa_node);
@@ -379,6 +374,7 @@ channel_roa_unsubscribe(struct roa_subscription *s)
{
rt_unsubscribe(&s->s);
rem_node(&s->roa_node);
+ rfree(s->s.event);
mb_free(s);
}
@@ -386,7 +382,7 @@ static void
channel_roa_subscribe_filter(struct channel *c, int dir)
{
const struct filter *f = dir ? c->in_filter : c->out_filter;
- struct rtable *tab;
+ rtable *tab;
int valid = 1, found = 0;
if ((f == FILTER_ACCEPT) || (f == FILTER_REJECT))
@@ -445,119 +441,554 @@ channel_roa_unsubscribe_all(struct channel *c)
}
static void
-channel_start_export(struct channel *c)
+channel_start_import(struct channel *c)
{
+ if (c->in_req.hook)
+ {
+ log(L_WARN "%s.%s: Attempted to start channel's already started import", c->proto->name, c->name);
+ return;
+ }
+
+ int nlen = strlen(c->name) + strlen(c->proto->name) + 2;
+ char *rn = mb_allocz(c->proto->pool, nlen);
+ bsprintf(rn, "%s.%s", c->proto->name, c->name);
+
+ c->in_req = (struct rt_import_request) {
+ .name = rn,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_dump_import_req,
+ .log_state_change = channel_import_log_state_change,
+ .preimport = channel_preimport,
+ };
+
ASSERT(c->channel_state == CS_UP);
- ASSERT(c->export_state == ES_DOWN);
- channel_schedule_feed(c, 1); /* Sets ES_FEEDING */
+ channel_reset_limit(c, &c->rx_limit, PLD_RX);
+ channel_reset_limit(c, &c->in_limit, PLD_IN);
+
+ memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
+
+ DBG("%s.%s: Channel start import req=%p\n", c->proto->name, c->name, &c->in_req);
+ rt_request_import(c->table, &c->in_req);
}
static void
-channel_stop_export(struct channel *c)
+channel_start_export(struct channel *c)
{
- /* Need to abort feeding */
- if (c->export_state == ES_FEEDING)
- rt_feed_channel_abort(c);
+ if (c->out_req.hook)
+ {
+ c->restart_export = 1;
+ log(L_WARN "%s.%s: Fast channel export restart", c->proto->name, c->name);
+ return;
+ }
+
+ ASSERT(c->channel_state == CS_UP);
+ int nlen = strlen(c->name) + strlen(c->proto->name) + 2;
+ char *rn = mb_allocz(c->proto->pool, nlen);
+ bsprintf(rn, "%s.%s", c->proto->name, c->name);
+
+ c->out_req = (struct rt_export_request) {
+ .name = rn,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_dump_export_req,
+ .log_state_change = channel_export_log_state_change,
+ };
+
+ bmap_init(&c->export_map, c->proto->pool, 1024);
+ bmap_init(&c->export_reject_map, c->proto->pool, 1024);
+
+ channel_reset_limit(c, &c->out_limit, PLD_OUT);
- c->export_state = ES_DOWN;
- c->stats.exp_routes = 0;
- bmap_reset(&c->export_map, 1024);
+ memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
+
+ switch (c->ra_mode) {
+ case RA_OPTIMAL:
+ c->out_req.export_one = rt_notify_optimal;
+ break;
+ case RA_ANY:
+ c->out_req.export_one = rt_notify_any;
+ c->out_req.export_bulk = rt_feed_any;
+ break;
+ case RA_ACCEPTED:
+ c->out_req.export_bulk = rt_notify_accepted;
+ break;
+ case RA_MERGED:
+ c->out_req.export_bulk = rt_notify_merged;
+ break;
+ default:
+ bug("Unknown route announcement mode");
+ }
+
+ DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
+ rt_request_export(c->table, &c->out_req);
}
+static void
+channel_check_stopped(struct channel *c)
+{
+ switch (c->channel_state)
+ {
+ case CS_STOP:
+ if (c->out_req.hook || c->in_req.hook || c->out_table || c->in_table)
+ return;
+
+ channel_set_state(c, CS_DOWN);
+ proto_send_event(c->proto);
+
+ break;
+ case CS_PAUSE:
+ if (c->out_req.hook)
+ return;
+
+ channel_set_state(c, CS_START);
+ break;
+ default:
+ bug("Stopped channel in a bad state: %d", c->channel_state);
+ }
+
+ DBG("%s.%s: Channel requests/hooks stopped (in state %s)\n", c->proto->name, c->name, c_states[c->channel_state]);
+}
-/* Called by protocol for reload from in_table */
void
-channel_schedule_reload(struct channel *c)
+channel_import_stopped(void *_c)
{
- ASSERT(c->channel_state == CS_UP);
+ struct channel *c = _c;
+
+ c->in_req.hook = NULL;
- rt_reload_channel_abort(c);
- ev_schedule_work(c->reload_event);
+ mb_free(c->in_req.name);
+ c->in_req.name = NULL;
+
+ channel_check_stopped(c);
}
static void
-channel_reload_loop(void *ptr)
+channel_export_stopped(struct rt_export_request *req)
{
- struct channel *c = ptr;
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
- /* Start reload */
- if (!c->reload_active)
- c->reload_pending = 0;
+ /* The hook has already stopped */
+ req->hook = NULL;
- if (!rt_reload_channel(c))
+ if (c->refeed_pending)
{
- ev_schedule_work(c->reload_event);
+ c->refeeding = 1;
+ c->refeed_pending = 0;
+
+ bmap_reset(&c->export_map, 1024);
+ bmap_reset(&c->export_reject_map, 1024);
+
+ rt_request_export(c->table, req);
return;
}
- /* Restart reload */
- if (c->reload_pending)
- channel_request_reload(c);
+ mb_free(c->out_req.name);
+ c->out_req.name = NULL;
+
+ bmap_free(&c->export_map);
+ bmap_free(&c->export_reject_map);
+
+ if (c->restart_export)
+ {
+ c->restart_export = 0;
+ channel_start_export(c);
+ }
+ else
+ channel_check_stopped(c);
+}
+
+static void
+channel_feed_end(struct channel *c)
+{
+ struct rt_export_request *req = &c->out_req;
+
+ /* Reset export limit if the feed ended with acceptable number of exported routes */
+ struct limit *l = &c->out_limit;
+ if (c->refeeding &&
+ (c->limit_active & (1 << PLD_OUT)) &&
+ (c->refeed_count <= l->max) &&
+ (l->count <= l->max))
+ {
+ log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, l->max);
+ channel_reset_limit(c, &c->out_limit, PLD_OUT);
+
+ c->refeed_pending = 1;
+ rt_stop_export(req, channel_export_stopped);
+ return;
+ }
+
+ if (c->out_table)
+ rt_refresh_end(&c->out_table->push);
+ else if (c->proto->feed_end)
+ c->proto->feed_end(c);
+
+ if (c->refeed_pending)
+ rt_stop_export(req, channel_export_stopped);
+}
+
+#define CHANNEL_AUX_TABLE_DUMP_REQ(inout, imex, pgimex, pushget) static void \
+ channel_##inout##_##pushget##_dump_req(struct rt_##pgimex##_request *req) { \
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, pushget, req); \
+ debug(" Channel %s.%s " #imex " table " #pushget " request %p\n", cat->c->proto->name, cat->c->name, req); }
+
+CHANNEL_AUX_TABLE_DUMP_REQ(in, import, import, push)
+CHANNEL_AUX_TABLE_DUMP_REQ(in, import, export, get)
+CHANNEL_AUX_TABLE_DUMP_REQ(out, export, import, push)
+CHANNEL_AUX_TABLE_DUMP_REQ(out, export, export, get)
+
+#undef CHANNEL_AUX_TABLE_DUMP_REQ
+
+static uint channel_aux_imex(struct channel_aux_table *cat)
+{
+ if (cat->c->in_table == cat)
+ return 0;
+ else if (cat->c->out_table == cat)
+ return 1;
+ else
+ bug("Channel aux table must be in_table or out_table");
}
static void
-channel_reset_import(struct channel *c)
+channel_aux_stopped(void *data)
{
- /* Need to abort feeding */
- ev_postpone(c->reload_event);
- rt_reload_channel_abort(c);
+ struct channel_aux_table *cat;
+
+ RT_LOCKED((rtable *) data, t)
+ cat = t->config->owner;
- rt_prune_sync(c->in_table, 1);
+ ASSERT_DIE(cat->push.hook == NULL);
+ ASSERT_DIE(cat->get.hook == NULL);
+ ASSERT_DIE(cat->stop_pending);
+
+ struct channel *c = cat->c;
+
+ if (channel_aux_imex(cat))
+ c->out_table = NULL;
+ else
+ c->in_table = NULL;
+
+ mb_free(cat);
+ channel_check_stopped(c);
}
static void
-channel_reset_export(struct channel *c)
+channel_aux_import_stopped(void *_cat)
{
- /* Just free the routes */
- rt_prune_sync(c->out_table, 1);
+ struct channel_aux_table *cat = _cat;
+
+ cat->push.hook = NULL;
+
+ if (!cat->get.hook)
+ RT_LOCKED(cat->tab, t)
+ {
+ t->delete = channel_aux_stopped;
+ rt_unlock_table(t);
+ }
}
-/* Called by protocol to activate in_table */
-void
-channel_setup_in_table(struct channel *c)
+static void
+channel_aux_export_stopped(struct rt_export_request *req)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ req->hook = NULL;
+
+ if (cat->refeed_pending && !cat->stop_pending)
+ {
+ cat->refeed_pending = 0;
+ rt_request_export(cat->tab, req);
+
+ return;
+ }
+
+ if (!cat->push.hook)
+ RT_LOCKED(cat->tab, t)
+ {
+ t->delete = channel_aux_stopped;
+ rt_unlock_table(t);
+ }
+}
+
+static void
+channel_aux_stop(struct channel_aux_table *cat)
+{
+ ASSERT_DIE(!cat->stop_pending);
+
+ cat->stop_pending = 1;
+
+ RT_LOCKED(cat->tab, t)
+ rt_lock_table(t);
+
+ cat->push_stopped = (event) {
+ .hook = channel_aux_import_stopped,
+ .data = cat,
+ .list = proto_event_list(cat->c->proto),
+ };
+
+ rt_stop_import(&cat->push, &cat->push_stopped);
+ rt_stop_export(&cat->get, channel_aux_export_stopped);
+}
+
+static void
+channel_push_log_state_change(struct rt_import_request *req, u8 state)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
+ const char *imex = channel_aux_imex(cat) ? "export" : "import";
+ CD(cat->c, "Channel %s table import state changed to %s", imex, rt_import_state_name(state));
+}
+
+static void
+channel_get_log_state_change(struct rt_export_request *req, u8 state)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ const char *imex = channel_aux_imex(cat) ? "export" : "import";
+ CD(cat->c, "Channel %s table export state changed to %s", imex, rt_export_state_name(state));
+
+ switch (state)
+ {
+ case TES_FEEDING:
+ if (imex && cat->c->proto->feed_begin)
+ cat->c->proto->feed_begin(cat->c, !cat->c->refeeding);
+ else if (!imex)
+ rt_refresh_begin(&cat->c->in_req);
+ break;
+
+ case TES_READY:
+ if (imex && cat->c->proto->feed_end)
+ cat->c->proto->feed_end(cat->c);
+ else if (!imex)
+ rt_refresh_end(&cat->c->in_req);
+
+ if (cat->refeed_pending)
+ rt_stop_export(&cat->get, channel_aux_export_stopped);
+
+ break;
+ }
+}
+
+void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
+
+static int
+channel_aux_export_one_any(struct rt_export_request *req, struct rt_pending_export *rpe, rte **new, rte **old)
+{
+ struct rte_src *src = rpe->new ? rpe->new->rte.src : rpe->old->rte.src;
+ *old = RTES_OR_NULL(rpe->old);
+ struct rte_storage *new_stored;
+
+ while (rpe)
+ {
+ new_stored = rpe->new;
+ rpe_mark_seen(req->hook, rpe);
+ rpe = rpe_next(rpe, src);
+ }
+
+ *new = RTES_CLONE(new_stored, *new);
+
+ return (*new || *old) && (&new_stored->rte != *old);
+}
+
+static int
+channel_aux_export_one_best(struct rt_export_request *req, struct rt_pending_export *rpe, rte **new, rte **old)
+{
+ *old = RTES_OR_NULL(rpe->old_best);
+ struct rte_storage *new_stored;
+
+ while (rpe)
+ {
+ new_stored = rpe->new_best;
+ rpe_mark_seen(req->hook, rpe);
+ rpe = rpe_next(rpe, NULL);
+ }
+
+ *new = RTES_CLONE(new_stored, *new);
+
+ return (*new || *old) && (&new_stored->rte != *old);
+}
+
+static void
+channel_in_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+
+ rte n0, *new = &n0, *old;
+ if (channel_aux_export_one_any(req, rpe, &new, &old))
+ rte_update_direct(cat->c, net, new, old ? old->src : new->src);
+}
+
+static void
+channel_in_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+
+ rte n0, *new = &n0, *old;
+ if (channel_aux_export_one_best(req, rpe, &new, &old))
+ rte_update_direct(cat->c, net, new, old ? old->src : new->src);
+}
+
+static void
+channel_in_export_bulk_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
{
- struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ for (uint i=0; i<count; i++)
+ {
+ rte n0 = *feed[i];
+ rte_update_direct(cat->c, net, &n0, n0.src);
+ }
+}
- cf->name = "import";
- cf->addr_type = c->net_type;
- cf->internal = 1;
+static void
+channel_in_export_bulk_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ if (!count)
+ return;
+
+ rte n0 = *feed[0];
+ rte_update_direct(cat->c, net, &n0, n0.src);
+}
+
+void do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old);
+
+static void
+channel_out_export_one_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ rte n0, *new = &n0, *old;
+ if (channel_aux_export_one_any(req, rpe, &new, &old))
+ do_rt_notify_direct(cat->c, net, new, old);
+}
+
+static void
+channel_out_export_one_best(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ rte n0, *new = &n0, *old;
+ if (channel_aux_export_one_best(req, rpe, &new, &old))
+ do_rt_notify_direct(cat->c, net, new, old);
+}
+
+static void
+channel_out_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
+{
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, get, req);
+ if (cat->c->ra_mode != RA_ANY)
+ ASSERT_DIE(count <= 1);
+
+ for (uint i=0; i<count; i++)
+ {
+ rte n0 = *feed[i];
+ do_rt_notify_direct(cat->c, net, &n0, NULL);
+ }
+}
+
+/* Called by protocol to activate in_table */
+void
+channel_setup_in_table(struct channel *c, int best)
+{
+ int nlen = sizeof("import") + strlen(c->name) + strlen(c->proto->name) + 3;
+
+ struct {
+ struct channel_aux_table cat;
+ struct rtable_config tab_cf;
+ char name[0];
+ } *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
+
+ bsprintf(cat->name, "%s.%s.import", c->proto->name, c->name);
+
+ cat->tab_cf.owner = cat;
+ cat->tab_cf.name = cat->name;
+ cat->tab_cf.addr_type = c->net_type;
+ cat->tab_cf.cork_limit = 4 * page_size / sizeof(struct rt_pending_export);
+
+ c->in_table = &cat->cat;
+ c->in_table->push = (struct rt_import_request) {
+ .name = cat->name,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_in_push_dump_req,
+ .log_state_change = channel_push_log_state_change,
+ .preimport = channel_in_preimport,
+ };
+ c->in_table->get = (struct rt_export_request) {
+ .name = cat->name,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_in_get_dump_req,
+ .log_state_change = channel_get_log_state_change,
+ .export_one = best ? channel_in_export_one_best : channel_in_export_one_any,
+ .export_bulk = best ? channel_in_export_bulk_best : channel_in_export_bulk_any,
+ };
- c->in_table = rt_setup(c->proto->pool, cf);
+ c->in_table->c = c;
+ c->in_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
- c->reload_event = ev_new_init(c->proto->pool, channel_reload_loop, c);
+ rt_request_import(c->in_table->tab, &c->in_table->push);
+ rt_request_export(c->in_table->tab, &c->in_table->get);
}
/* Called by protocol to activate out_table */
void
channel_setup_out_table(struct channel *c)
{
- struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config));
- cf->name = "export";
- cf->addr_type = c->net_type;
- cf->internal = 1;
+ int nlen = sizeof("export") + strlen(c->name) + strlen(c->proto->name) + 3;
+
+ struct {
+ struct channel_aux_table cat;
+ struct rtable_config tab_cf;
+ char name[0];
+ } *cat = mb_allocz(c->proto->pool, sizeof(*cat) + nlen);
- c->out_table = rt_setup(c->proto->pool, cf);
+ bsprintf(cat->name, "%s.%s.export", c->proto->name, c->name);
+
+ cat->tab_cf.owner = cat;
+ cat->tab_cf.name = cat->name;
+ cat->tab_cf.addr_type = c->net_type;
+ cat->tab_cf.cork_limit = 4 * page_size / sizeof(struct rt_pending_export);
+
+ c->out_table = &cat->cat;
+ c->out_table->push = (struct rt_import_request) {
+ .name = cat->name,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_out_push_dump_req,
+ .log_state_change = channel_push_log_state_change,
+ };
+ c->out_table->get = (struct rt_export_request) {
+ .name = cat->name,
+ .list = proto_work_list(c->proto),
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_out_get_dump_req,
+ .log_state_change = channel_get_log_state_change,
+ .export_one = (c->ra_mode == RA_ANY) ? channel_out_export_one_any : channel_out_export_one_best,
+ .export_bulk = channel_out_export_bulk,
+ };
+
+ c->out_table->c = c;
+ c->out_table->tab = rt_setup(c->proto->pool, &cat->tab_cf);
+
+ rt_request_import(c->out_table->tab, &c->out_table->push);
+ rt_request_export(c->out_table->tab, &c->out_table->get);
}
+static void
+channel_aux_request_refeed(struct channel_aux_table *cat)
+{
+ if (cat->stop_pending)
+ return;
+
+ cat->refeed_pending = 1;
+ rt_stop_export(&cat->get, channel_aux_export_stopped);
+}
static void
channel_do_start(struct channel *c)
{
- rt_lock_table(c->table);
- add_tail(&c->table->channels, &c->table_node);
c->proto->active_channels++;
- c->feed_event = ev_new_init(c->proto->pool, channel_feed_loop, c);
-
- bmap_init(&c->export_map, c->proto->pool, 1024);
- memset(&c->stats, 0, sizeof(struct proto_stats));
-
- channel_reset_limit(&c->rx_limit);
- channel_reset_limit(&c->in_limit);
- channel_reset_limit(&c->out_limit);
-
CALL(c->channel->start, c);
+
+ channel_start_import(c);
}
static void
@@ -572,9 +1003,38 @@ channel_do_up(struct channel *c)
}
static void
-channel_do_flush(struct channel *c)
+channel_do_pause(struct channel *c)
+{
+ /* Stop export */
+ if (c->out_req.hook)
+ {
+ rt_stop_export(&c->out_req, channel_export_stopped);
+ c->refeeding = 0;
+ }
+
+ channel_roa_unsubscribe_all(c);
+}
+
+static void
+channel_do_stop(struct channel *c)
{
- rt_schedule_prune(c->table);
+ /* Drop auxiliary tables */
+ if (c->in_table)
+ channel_aux_stop(c->in_table);
+
+ if (c->out_table)
+ channel_aux_stop(c->out_table);
+
+ /* Stop import */
+ if (c->in_req.hook)
+ {
+ c->in_stopped = (event) {
+ .hook = channel_import_stopped,
+ .data = c,
+ .list = proto_event_list(c->proto),
+ };
+ rt_stop_import(&c->in_req, &c->in_stopped);
+ }
c->gr_wait = 0;
if (c->gr_lock)
@@ -582,48 +1042,30 @@ channel_do_flush(struct channel *c)
CALL(c->channel->shutdown, c);
- /* This have to be done in here, as channel pool is freed before channel_do_down() */
- bmap_free(&c->export_map);
- c->in_table = NULL;
- c->reload_event = NULL;
- c->out_table = NULL;
-
channel_roa_unsubscribe_all(c);
}
static void
channel_do_down(struct channel *c)
{
- ASSERT(!c->feed_active && !c->reload_active);
+ ASSERT(!c->out_req.hook && !c->in_req.hook && !c->out_table && !c->in_table);
- rem_node(&c->table_node);
- rt_unlock_table(c->table);
c->proto->active_channels--;
- if ((c->stats.imp_routes + c->stats.filt_routes) != 0)
- log(L_ERR "%s: Channel %s is down but still has some routes", c->proto->name, c->name);
-
- // bmap_free(&c->export_map);
- memset(&c->stats, 0, sizeof(struct proto_stats));
-
- c->in_table = NULL;
- c->reload_event = NULL;
- c->out_table = NULL;
-
- /* The in_table and out_table are going to be freed by freeing their resource pools. */
+ memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
+ memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
CALL(c->channel->cleanup, c);
/* Schedule protocol shutddown */
if (proto_is_done(c->proto))
- ev_schedule(c->proto->event);
+ proto_send_event(c->proto);
}
void
channel_set_state(struct channel *c, uint state)
{
uint cs = c->channel_state;
- uint es = c->export_state;
DBG("%s reporting channel %s state transition %s -> %s\n", c->proto->name, c->name, c_states[cs], c_states[state]);
if (state == cs)
@@ -635,24 +1077,15 @@ channel_set_state(struct channel *c, uint state)
switch (state)
{
case CS_START:
- ASSERT(cs == CS_DOWN || cs == CS_UP);
+ ASSERT(cs == CS_DOWN || cs == CS_PAUSE);
if (cs == CS_DOWN)
channel_do_start(c);
- if (es != ES_DOWN)
- channel_stop_export(c);
-
- if (c->in_table && (cs == CS_UP))
- channel_reset_import(c);
-
- if (c->out_table && (cs == CS_UP))
- channel_reset_export(c);
-
break;
case CS_UP:
- ASSERT(cs == CS_DOWN || cs == CS_START);
+ ASSERT(cs == CS_DOWN || cs == CS_START || cs == CS_PAUSE);
if (cs == CS_DOWN)
channel_do_start(c);
@@ -663,23 +1096,24 @@ channel_set_state(struct channel *c, uint state)
channel_do_up(c);
break;
- case CS_FLUSHING:
- ASSERT(cs == CS_START || cs == CS_UP);
+ case CS_PAUSE:
+ ASSERT(cs == CS_UP);
- if (es != ES_DOWN)
- channel_stop_export(c);
+ if (cs == CS_UP)
+ channel_do_pause(c);
+ break;
- if (c->in_table && (cs == CS_UP))
- channel_reset_import(c);
+ case CS_STOP:
+ ASSERT(cs == CS_UP || cs == CS_START || cs == CS_PAUSE);
- if (c->out_table && (cs == CS_UP))
- channel_reset_export(c);
+ if (cs == CS_UP)
+ channel_do_pause(c);
- channel_do_flush(c);
+ channel_do_stop(c);
break;
case CS_DOWN:
- ASSERT(cs == CS_FLUSHING);
+ ASSERT(cs == CS_STOP);
channel_do_down(c);
break;
@@ -701,50 +1135,62 @@ channel_set_state(struct channel *c, uint state)
* completed, it will switch back to ES_READY. This function can be called
* even when feeding is already running, in that case it is restarted.
*/
-void
-channel_request_feeding(struct channel *c)
+static void
+channel_request_table_feeding(struct channel *c)
{
- ASSERT(c->channel_state == CS_UP);
+ ASSERT(c->out_req.hook);
- CD(c, "Feeding requested");
+ c->refeed_pending = 1;
+ rt_stop_export(&c->out_req, channel_export_stopped);
+}
- /* Do nothing if we are still waiting for feeding */
- if (c->export_state == ES_DOWN)
+void
+channel_request_feeding(struct channel *c)
+{
+ if (c->gr_wait || !c->proto->rt_notify)
return;
- /* If we are already feeding, we want to restart it */
- if (c->export_state == ES_FEEDING)
- {
- /* Unless feeding is in initial state */
- if (!c->feed_active)
- return;
-
- rt_feed_channel_abort(c);
- }
+ CD(c, "Refeed requested");
- /* Track number of exported routes during refeed */
- c->refeed_count = 0;
+ ASSERT_DIE(c->out_req.hook);
- channel_schedule_feed(c, 0); /* Sets ES_FEEDING */
- channel_log_state_change(c);
+ if (c->out_table)
+ channel_aux_request_refeed(c->out_table);
+ else
+ channel_request_table_feeding(c);
}
-static void
+void
channel_request_reload(struct channel *c)
{
- ASSERT(c->channel_state == CS_UP);
+ ASSERT(c->in_req.hook);
ASSERT(channel_reloadable(c));
CD(c, "Reload requested");
- c->proto->reload_routes(c);
+ if (c->in_table)
+ channel_aux_request_refeed(c->in_table);
+ else
+ c->proto->reload_routes(c);
+}
- /*
- * Should this be done before reload_routes() hook?
- * Perhaps, but routes are updated asynchronously.
- */
- channel_reset_limit(&c->rx_limit);
- channel_reset_limit(&c->in_limit);
+void
+channel_refresh_begin(struct channel *c)
+{
+ CD(c, "Channel route refresh begin");
+ if (c->in_table)
+ rt_refresh_begin(&c->in_table->push);
+ else
+ rt_refresh_begin(&c->in_req);
+}
+
+void
+channel_refresh_end(struct channel *c)
+{
+ if (c->in_table)
+ rt_refresh_end(&c->in_table->push);
+ else
+ rt_refresh_end(&c->in_req);
}
const struct channel_class channel_basic = {
@@ -847,19 +1293,19 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
/* Reconfigure channel fields */
c->in_filter = cf->in_filter;
c->out_filter = cf->out_filter;
- c->rx_limit = cf->rx_limit;
- c->in_limit = cf->in_limit;
- c->out_limit = cf->out_limit;
+
+ channel_update_limit(c, &c->rx_limit, PLD_RX, &cf->rx_limit);
+ channel_update_limit(c, &c->in_limit, PLD_IN, &cf->in_limit);
+ channel_update_limit(c, &c->out_limit, PLD_OUT, &cf->out_limit);
// c->ra_mode = cf->ra_mode;
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
c->debug = cf->debug;
+ c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
c->in_keep_filtered = cf->in_keep_filtered;
c->rpki_reload = cf->rpki_reload;
- channel_verify_limits(c);
-
/* Execute channel-specific reconfigure hook */
if (c->channel->reconfigure && !c->channel->reconfigure(c, cf, &import_changed, &export_changed))
return 0;
@@ -902,7 +1348,7 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
channel_request_reload(c);
if (export_changed)
- channel_request_feeding(c);
+ channel_request_table_feeding(c);
done:
CD(c, "Reconfigured");
@@ -950,34 +1396,50 @@ proto_configure_channel(struct proto *p, struct channel **pc, struct channel_con
return 1;
}
+static void
+proto_cleanup(struct proto *p)
+{
+ p->active = 0;
+ proto_log_state_change(p);
+ proto_rethink_goal(p);
+}
static void
-proto_event(void *ptr)
+proto_loop_stopped(void *ptr)
{
struct proto *p = ptr;
- if (p->do_start)
- {
- if_feed_baby(p);
- p->do_start = 0;
- }
+ ASSERT_DIE(birdloop_inside(&main_birdloop));
+
+ p->loop = &main_birdloop;
+ p->pool = NULL;
+ p->event->list = NULL;
+
+ proto_cleanup(p);
+}
+
+static void
+proto_event(void *ptr)
+{
+ struct proto *p = ptr;
if (p->do_stop)
{
if (p->proto == &proto_unix_iface)
if_flush_ifaces(p);
+
p->do_stop = 0;
}
if (proto_is_done(p))
- {
- if (p->proto->cleanup)
- p->proto->cleanup(p);
-
- p->active = 0;
- proto_log_state_change(p);
- proto_rethink_goal(p);
- }
+ if (p->loop != &main_birdloop)
+ birdloop_stop_self(p->loop, proto_loop_stopped, p);
+ else
+ {
+ rp_free(p->pool, proto_pool);
+ p->pool = NULL;
+ proto_cleanup(p);
+ }
}
@@ -1018,10 +1480,10 @@ proto_init(struct proto_config *c, node *n)
struct protocol *pr = c->protocol;
struct proto *p = pr->init(c);
+ p->loop = &main_birdloop;
p->proto_state = PS_DOWN;
p->last_state_change = current_time();
p->vrf = c->vrf;
- p->vrf_set = c->vrf_set;
insert_node(&p->n, n);
p->event = ev_new_init(proto_pool, proto_event, p);
@@ -1034,11 +1496,30 @@ proto_init(struct proto_config *c, node *n)
static void
proto_start(struct proto *p)
{
- /* Here we cannot use p->cf->name since it won't survive reconfiguration */
- p->pool = rp_new(proto_pool, p->proto->name);
+ DBG("Kicking %s up\n", p->name);
+ PD(p, "Starting");
+
+ int ns = strlen("Protocol ") + strlen(p->cf->name) + 1;
+ void *nb = mb_alloc(proto_pool, ns);
+ ASSERT_DIE(ns - 1 == bsnprintf(nb, ns, "Protocol %s", p->cf->name));
if (graceful_restart_state == GRS_INIT)
p->gr_recovery = 1;
+
+ if (p->cf->loop_order == DOMAIN_ORDER(the_bird))
+ p->pool = rp_new(proto_pool, &main_birdloop, nb);
+ else
+ {
+ p->loop = birdloop_new(proto_pool, p->cf->loop_order, nb);
+ p->pool = birdloop_pool(p->loop);
+ }
+
+ p->event->list = proto_event_list(p);
+
+ mb_move(nb, p->pool);
+
+ PROTO_LOCKED_FROM_MAIN(p)
+ proto_notify_state(p, (p->proto->start ? p->proto->start(p) : PS_UP));
}
@@ -1074,6 +1555,7 @@ proto_config_new(struct protocol *pr, int class)
cf->class = class;
cf->debug = new_config->proto_default_debug;
cf->mrtdump = new_config->proto_default_mrtdump;
+ cf->loop_order = DOMAIN_ORDER(the_bird);
init_list(&cf->channels);
@@ -1189,8 +1671,7 @@ proto_reconfigure(struct proto *p, struct proto_config *oc, struct proto_config
if ((nc->protocol != oc->protocol) ||
(nc->net_type != oc->net_type) ||
(nc->disabled != p->disabled) ||
- (nc->vrf != oc->vrf) ||
- (nc->vrf_set != oc->vrf_set))
+ (nc->vrf != oc->vrf))
return 0;
p->name = nc->name;
@@ -1279,8 +1760,14 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty
nc->proto = p;
/* We will try to reconfigure protocol p */
- if (! force_reconfig && proto_reconfigure(p, oc, nc, type))
- continue;
+ if (!force_reconfig)
+ {
+ int ok;
+ PROTO_LOCKED_FROM_MAIN(p)
+ ok = proto_reconfigure(p, oc, nc, type);
+ if (ok)
+ continue;
+ }
if (nc->parent)
{
@@ -1363,11 +1850,20 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty
}
static void
-proto_rethink_goal(struct proto *p)
+proto_shutdown(struct proto *p)
{
- struct protocol *q;
- byte goal;
+ if (p->proto_state == PS_START || p->proto_state == PS_UP)
+ {
+ /* Going down */
+ DBG("Kicking %s down\n", p->name);
+ PD(p, "Shutting down");
+ proto_notify_state(p, (p->proto->shutdown ? p->proto->shutdown(p) : PS_DOWN));
+ }
+}
+static void
+proto_rethink_goal(struct proto *p)
+{
if (p->reconfiguring && !p->active)
{
struct proto_config *nc = p->cf_new;
@@ -1387,32 +1883,12 @@ proto_rethink_goal(struct proto *p)
/* Determine what state we want to reach */
if (p->disabled || p->reconfiguring)
- goal = PS_DOWN;
- else
- goal = PS_UP;
-
- q = p->proto;
- if (goal == PS_UP)
- {
- if (!p->active)
- {
- /* Going up */
- DBG("Kicking %s up\n", p->name);
- PD(p, "Starting");
- proto_start(p);
- proto_notify_state(p, (q->start ? q->start(p) : PS_UP));
- }
- }
- else
{
- if (p->proto_state == PS_START || p->proto_state == PS_UP)
- {
- /* Going down */
- DBG("Kicking %s down\n", p->name);
- PD(p, "Shutting down");
- proto_notify_state(p, (q->shutdown ? q->shutdown(p) : PS_DOWN));
- }
+ PROTO_LOCKED_FROM_MAIN(p)
+ proto_shutdown(p);
}
+ else if (!p->active)
+ proto_start(p);
}
struct proto *
@@ -1524,7 +2000,7 @@ graceful_restart_done(timer *t UNUSED)
WALK_LIST(c, p->channels)
{
/* Resume postponed export of routes */
- if ((c->channel_state == CS_UP) && c->gr_wait && c->proto->rt_notify)
+ if ((c->channel_state == CS_UP) && c->gr_wait && p->rt_notify)
channel_start_export(c);
/* Cleanup */
@@ -1614,7 +2090,11 @@ protos_dump_all(void)
struct proto *p;
WALK_LIST(p, proto_list)
{
- debug(" protocol %s state %s\n", p->name, p_states[p->proto_state]);
+#define DPF(x) (p->x ? " " #x : "")
+ debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s\n",
+ p->name, p, p_states[p->proto_state], p->active_channels,
+ DPF(disabled), DPF(active), DPF(do_stop), DPF(reconfiguring));
+#undef DPF
struct channel *c;
WALK_LIST(c, p->channels)
@@ -1624,6 +2104,23 @@ protos_dump_all(void)
debug("\tInput filter: %s\n", filter_name(c->in_filter));
if (c->out_filter)
debug("\tOutput filter: %s\n", filter_name(c->out_filter));
+ debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
+ c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
+ c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
+ if (c->in_table)
+ {
+ debug("\tInput aux table:\n");
+ rt_dump_hooks(c->in_table->tab);
+ rt_dump(c->in_table->tab);
+ debug("\tEnd of input aux table.\n");
+ }
+ if (c->out_table)
+ {
+ debug("\tOutput aux table:\n");
+ rt_dump_hooks(c->in_table->tab);
+ rt_dump(c->in_table->tab);
+ debug("\tEnd of output aux table.\n");
+ }
}
if (p->proto->dump && (p->proto_state != PS_DOWN))
@@ -1702,9 +2199,7 @@ protos_build(void)
proto_build(&proto_perf);
#endif
- proto_pool = rp_new(&root_pool, "Protocols");
- proto_shutdown_timer = tm_new(proto_pool);
- proto_shutdown_timer->hook = proto_shutdown_loop;
+ proto_pool = rp_new(&root_pool, &main_birdloop, "Protocols");
}
@@ -1712,7 +2207,7 @@ protos_build(void)
int proto_restart;
static void
-proto_shutdown_loop(timer *t UNUSED)
+proto_shutdown_loop(void *data UNUSED)
{
struct proto *p, *p_next;
@@ -1731,6 +2226,11 @@ proto_shutdown_loop(timer *t UNUSED)
}
}
+static event proto_schedule_down_event = {
+ .hook = proto_shutdown_loop,
+ .list = &global_event_list,
+};
+
static inline void
proto_schedule_down(struct proto *p, byte restart, byte code)
{
@@ -1743,7 +2243,8 @@ proto_schedule_down(struct proto *p, byte restart, byte code)
p->down_sched = restart ? PDS_RESTART : PDS_DISABLE;
p->down_code = code;
- tm_start_max(proto_shutdown_timer, restart ? 250 MS : 0);
+
+ ev_send_self(&proto_schedule_down_event);
}
/**
@@ -1780,108 +2281,136 @@ proto_set_message(struct proto *p, char *msg, int len)
}
-static const char *
-channel_limit_name(struct channel_limit *l)
-{
- const char *actions[] = {
- [PLA_WARN] = "warn",
- [PLA_BLOCK] = "block",
- [PLA_RESTART] = "restart",
- [PLA_DISABLE] = "disable",
- };
+static const char * channel_limit_name[] = {
+ [PLA_WARN] = "warn",
+ [PLA_BLOCK] = "block",
+ [PLA_RESTART] = "restart",
+ [PLA_DISABLE] = "disable",
+};
- return actions[l->action];
-}
-/**
- * channel_notify_limit: notify about limit hit and take appropriate action
- * @c: channel
- * @l: limit being hit
- * @dir: limit direction (PLD_*)
- * @rt_count: the number of routes
- *
- * The function is called by the route processing core when limit @l
- * is breached. It activates the limit and tooks appropriate action
- * according to @l->action.
- */
-void
-channel_notify_limit(struct channel *c, struct channel_limit *l, int dir, u32 rt_count)
+static void
+channel_log_limit(struct channel *c, struct limit *l, int dir)
{
const char *dir_name[PLD_MAX] = { "receive", "import" , "export" };
- const byte dir_down[PLD_MAX] = { PDC_RX_LIMIT_HIT, PDC_IN_LIMIT_HIT, PDC_OUT_LIMIT_HIT };
- struct proto *p = c->proto;
+ log(L_WARN "Channel %s.%s hits route %s limit (%d), action: %s",
+ c->proto->name, c->name, dir_name[dir], l->max, channel_limit_name[c->limit_actions[dir]]);
+}
- if (l->state == PLS_BLOCKED)
+static void
+channel_activate_limit(struct channel *c, struct limit *l, int dir)
+{
+ if (c->limit_active & (1 << dir))
return;
- /* For warning action, we want the log message every time we hit the limit */
- if (!l->state || ((l->action == PLA_WARN) && (rt_count == l->limit)))
- log(L_WARN "Protocol %s hits route %s limit (%d), action: %s",
- p->name, dir_name[dir], l->limit, channel_limit_name(l));
+ c->limit_active |= (1 << dir);
+ channel_log_limit(c, l, dir);
+}
+
+static int
+channel_limit_warn(struct limit *l, void *data)
+{
+ struct channel_limit_data *cld = data;
+ struct channel *c = cld->c;
+ int dir = cld->dir;
- switch (l->action)
- {
- case PLA_WARN:
- l->state = PLS_ACTIVE;
- break;
+ channel_log_limit(c, l, dir);
- case PLA_BLOCK:
- l->state = PLS_BLOCKED;
- break;
+ return 0;
+}
- case PLA_RESTART:
- case PLA_DISABLE:
- l->state = PLS_BLOCKED;
- if (p->proto_state == PS_UP)
- proto_schedule_down(p, l->action == PLA_RESTART, dir_down[dir]);
- break;
- }
+static int
+channel_limit_block(struct limit *l, void *data)
+{
+ struct channel_limit_data *cld = data;
+ struct channel *c = cld->c;
+ int dir = cld->dir;
+
+ channel_activate_limit(c, l, dir);
+
+ return 1;
}
-static void
-channel_verify_limits(struct channel *c)
+static const byte chl_dir_down[PLD_MAX] = { PDC_RX_LIMIT_HIT, PDC_IN_LIMIT_HIT, PDC_OUT_LIMIT_HIT };
+
+static int
+channel_limit_down(struct limit *l, void *data)
{
- struct channel_limit *l;
- u32 all_routes = c->stats.imp_routes + c->stats.filt_routes;
+ struct channel_limit_data *cld = data;
+ struct channel *c = cld->c;
+ struct proto *p = c->proto;
+ int dir = cld->dir;
- l = &c->rx_limit;
- if (l->action && (all_routes > l->limit))
- channel_notify_limit(c, l, PLD_RX, all_routes);
+ channel_activate_limit(c, l, dir);
- l = &c->in_limit;
- if (l->action && (c->stats.imp_routes > l->limit))
- channel_notify_limit(c, l, PLD_IN, c->stats.imp_routes);
+ if (p->proto_state == PS_UP)
+ proto_schedule_down(p, c->limit_actions[dir] == PLA_RESTART, chl_dir_down[dir]);
- l = &c->out_limit;
- if (l->action && (c->stats.exp_routes > l->limit))
- channel_notify_limit(c, l, PLD_OUT, c->stats.exp_routes);
+ return 1;
}
-static inline void
-channel_reset_limit(struct channel_limit *l)
+static int (*channel_limit_action[])(struct limit *, void *) = {
+ [PLA_NONE] = NULL,
+ [PLA_WARN] = channel_limit_warn,
+ [PLA_BLOCK] = channel_limit_block,
+ [PLA_RESTART] = channel_limit_down,
+ [PLA_DISABLE] = channel_limit_down,
+};
+
+static void
+channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf)
+{
+ l->action = channel_limit_action[cf->action];
+ c->limit_actions[dir] = cf->action;
+
+ struct channel_limit_data cld = { .c = c, .dir = dir };
+ limit_update(l, &cld, cf->action ? cf->limit : ~((u32) 0));
+}
+
+static void
+channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf)
{
- if (l->action)
- l->state = PLS_INITIAL;
+ channel_reset_limit(c, l, dir);
+ channel_update_limit(c, l, dir, cf);
}
+static void
+channel_reset_limit(struct channel *c, struct limit *l, int dir)
+{
+ limit_reset(l);
+ c->limit_active &= ~(1 << dir);
+}
+
+static struct rte_owner_class default_rte_owner_class;
+
static inline void
proto_do_start(struct proto *p)
{
+ ASSERT_DIE(birdloop_inside(p->loop));
+
p->active = 1;
- p->do_start = 1;
- ev_schedule(p->event);
+
+ rt_init_sources(&p->sources, p->name, proto_work_list(p));
+ if (!p->sources.class)
+ p->sources.class = &default_rte_owner_class;
+
+ if (!p->cf->late_if_feed)
+ if_feed_baby(p);
}
static void
proto_do_up(struct proto *p)
{
+ ASSERT_DIE(birdloop_inside(p->loop));
+
if (!p->main_source)
- {
p->main_source = rt_get_source(p, 0);
- rt_lock_source(p->main_source);
- }
+ // Locked automaticaly
proto_start_channels(p);
+
+ if (p->cf->late_if_feed)
+ if_feed_baby(p);
}
static inline void
@@ -1896,9 +2425,6 @@ proto_do_stop(struct proto *p)
p->down_sched = 0;
p->gr_recovery = 0;
- p->do_stop = 1;
- ev_schedule(p->event);
-
if (p->main_source)
{
rt_unlock_source(p->main_source);
@@ -1906,19 +2432,21 @@ proto_do_stop(struct proto *p)
}
proto_stop_channels(p);
+ rt_destroy_sources(&p->sources, p->event);
+
+ p->do_stop = 1;
+ proto_send_event(p);
}
static void
proto_do_down(struct proto *p)
{
p->down_code = 0;
- neigh_prune();
- rfree(p->pool);
- p->pool = NULL;
+ neigh_prune(p);
/* Shutdown is finished in the protocol event */
if (proto_is_done(p))
- ev_schedule(p->event);
+ proto_send_event(p);
}
@@ -2009,38 +2537,58 @@ proto_state_name(struct proto *p)
static void
channel_show_stats(struct channel *c)
{
- struct proto_stats *s = &c->stats;
+ struct channel_import_stats *ch_is = &c->import_stats;
+ struct channel_export_stats *ch_es = &c->export_stats;
+ struct rt_import_stats *rt_is = c->in_req.hook ? &c->in_req.hook->stats : NULL;
+ struct rt_export_stats *rt_es = c->out_req.hook ? &c->out_req.hook->stats : NULL;
+
+#define SON(ie, item) ((ie) ? (ie)->item : 0)
+#define SCI(item) SON(ch_is, item)
+#define SCE(item) SON(ch_es, item)
+#define SRI(item) SON(rt_is, item)
+#define SRE(item) SON(rt_es, item)
+
+ u32 rx_routes = c->rx_limit.count;
+ u32 in_routes = c->in_limit.count;
+ u32 out_routes = c->out_limit.count;
if (c->in_keep_filtered)
cli_msg(-1006, " Routes: %u imported, %u filtered, %u exported, %u preferred",
- s->imp_routes, s->filt_routes, s->exp_routes, s->pref_routes);
+ in_routes, (rx_routes - in_routes), out_routes, SRI(pref));
else
cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred",
- s->imp_routes, s->exp_routes, s->pref_routes);
-
- cli_msg(-1006, " Route change stats: received rejected filtered ignored accepted");
- cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u",
- s->imp_updates_received, s->imp_updates_invalid,
- s->imp_updates_filtered, s->imp_updates_ignored,
- s->imp_updates_accepted);
- cli_msg(-1006, " Import withdraws: %10u %10u --- %10u %10u",
- s->imp_withdraws_received, s->imp_withdraws_invalid,
- s->imp_withdraws_ignored, s->imp_withdraws_accepted);
- cli_msg(-1006, " Export updates: %10u %10u %10u --- %10u",
- s->exp_updates_received, s->exp_updates_rejected,
- s->exp_updates_filtered, s->exp_updates_accepted);
- cli_msg(-1006, " Export withdraws: %10u --- --- --- %10u",
- s->exp_withdraws_received, s->exp_withdraws_accepted);
+ in_routes, out_routes, SRI(pref));
+
+ cli_msg(-1006, " Route change stats: received rejected filtered ignored limited accepted");
+ cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u",
+ SCI(updates_received), SCI(updates_invalid),
+ SCI(updates_filtered), SRI(updates_ignored),
+ SCI(updates_limited_rx) + SCI(updates_limited_in),
+ SRI(updates_accepted));
+ cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u",
+ SCI(withdraws_received), SCI(withdraws_invalid),
+ SRI(withdraws_ignored), SRI(withdraws_accepted));
+ cli_msg(-1006, " Export updates: %10u %10u %10u --- %10u %10u",
+ SRE(updates_received), SCE(updates_rejected),
+ SCE(updates_filtered), SCE(updates_limited), SCE(updates_accepted));
+ cli_msg(-1006, " Export withdraws: %10u --- --- --- ---%10u",
+ SRE(withdraws_received), SCE(withdraws_accepted));
+
+#undef SRI
+#undef SRE
+#undef SCI
+#undef SCE
+#undef SON
}
void
-channel_show_limit(struct channel_limit *l, const char *dsc)
+channel_show_limit(struct limit *l, const char *dsc, int active, int action)
{
if (!l->action)
return;
- cli_msg(-1006, " %-16s%d%s", dsc, l->limit, l->state ? " [HIT]" : "");
- cli_msg(-1006, " Action: %s", channel_limit_name(l));
+ cli_msg(-1006, " %-16s%d%s", dsc, l->max, active ? " [HIT]" : "");
+ cli_msg(-1006, " Action: %s", channel_limit_name[action]);
}
void
@@ -2048,6 +2596,8 @@ channel_show_info(struct channel *c)
{
cli_msg(-1006, " Channel %s", c->name);
cli_msg(-1006, " State: %s", c_states[c->channel_state]);
+ cli_msg(-1006, " Import state: %s", rt_import_state_name(rt_import_get_state(c->in_req.hook)));
+ cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(c->out_req.hook)));
cli_msg(-1006, " Table: %s", c->table->name);
cli_msg(-1006, " Preference: %d", c->preference);
cli_msg(-1006, " Input filter: %s", filter_name(c->in_filter));
@@ -2058,9 +2608,9 @@ channel_show_info(struct channel *c)
c->gr_lock ? " pending" : "",
c->gr_wait ? " waiting" : "");
- channel_show_limit(&c->rx_limit, "Receive limit:");
- channel_show_limit(&c->in_limit, "Import limit:");
- channel_show_limit(&c->out_limit, "Export limit:");
+ channel_show_limit(&c->rx_limit, "Receive limit:", c->limit_active & (1 << PLD_RX), c->limit_actions[PLD_RX]);
+ channel_show_limit(&c->in_limit, "Import limit:", c->limit_active & (1 << PLD_IN), c->limit_actions[PLD_IN]);
+ channel_show_limit(&c->out_limit, "Export limit:", c->limit_active & (1 << PLD_OUT), c->limit_actions[PLD_OUT]);
if (c->channel_state != CS_DOWN)
channel_show_stats(c);
@@ -2106,8 +2656,8 @@ proto_cmd_show(struct proto *p, uintptr_t verbose, int cnt)
cli_msg(-1006, " Message: %s", p->message);
if (p->cf->router_id)
cli_msg(-1006, " Router ID: %R", p->cf->router_id);
- if (p->vrf_set)
- cli_msg(-1006, " VRF: %s", p->vrf ? p->vrf->name : "default");
+ if (p->vrf)
+ cli_msg(-1006, " VRF: %s", p->vrf->name);
if (p->proto->show_proto_info)
p->proto->show_proto_info(p);
@@ -2135,7 +2685,7 @@ proto_cmd_disable(struct proto *p, uintptr_t arg, int cnt UNUSED)
p->disabled = 1;
p->down_code = PDC_CMD_DISABLE;
proto_set_message(p, (char *) arg, -1);
- proto_rethink_goal(p);
+ proto_shutdown(p);
cli_msg(-9, "%s: disabled", p->name);
}
@@ -2168,9 +2718,9 @@ proto_cmd_restart(struct proto *p, uintptr_t arg, int cnt UNUSED)
p->disabled = 1;
p->down_code = PDC_CMD_RESTART;
proto_set_message(p, (char *) arg, -1);
- proto_rethink_goal(p);
+ proto_shutdown(p);
p->disabled = 0;
- proto_rethink_goal(p);
+ /* After the protocol shuts down, proto_rethink_goal() is run from proto_event. */
cli_msg(-12, "%s: restarted", p->name);
}
@@ -2243,8 +2793,15 @@ proto_apply_cmd_symbol(const struct symbol *s, void (* cmd)(struct proto *, uint
return;
}
- cmd(s->proto->proto, arg, 0);
- cli_msg(0, "");
+ if (s->proto->proto)
+ {
+ struct proto *p = s->proto->proto;
+ PROTO_LOCKED_FROM_MAIN(p)
+ cmd(p, arg, 0);
+ cli_msg(0, "");
+ }
+ else
+ cli_msg(9002, "%s does not exist", s->name);
}
static void
@@ -2255,7 +2812,8 @@ proto_apply_cmd_patt(const char *patt, void (* cmd)(struct proto *, uintptr_t, i
WALK_LIST(p, proto_list)
if (!patt || patmatch(patt, p->name))
- cmd(p, arg, cnt++);
+ PROTO_LOCKED_FROM_MAIN(p)
+ cmd(p, arg, cnt++);
if (!cnt)
cli_msg(8003, "No protocols match");
diff --git a/nest/protocol.h b/nest/protocol.h
index abcc505d..a4b6152b 100644
--- a/nest/protocol.h
+++ b/nest/protocol.h
@@ -13,11 +13,11 @@
#include "lib/resource.h"
#include "lib/event.h"
#include "nest/route.h"
+#include "nest/limit.h"
#include "conf/conf.h"
struct iface;
struct ifa;
-struct rtable;
struct rte;
struct neighbor;
struct rta;
@@ -74,12 +74,9 @@ struct protocol {
struct proto * (*init)(struct proto_config *); /* Create new instance */
int (*reconfigure)(struct proto *, struct proto_config *); /* Try to reconfigure instance, returns success */
void (*dump)(struct proto *); /* Debugging dump */
- void (*dump_attrs)(struct rte *); /* Dump protocol-dependent attributes */
int (*start)(struct proto *); /* Start the instance */
int (*shutdown)(struct proto *); /* Stop the instance */
- void (*cleanup)(struct proto *); /* Called after shutdown when protocol became hungry/down */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
- void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
void (*show_proto_info)(struct proto *); /* Show protocol info (for `show protocols all' command) */
void (*copy_config)(struct proto_config *, struct proto_config *); /* Copy config from given protocol instance */
@@ -120,9 +117,10 @@ struct proto_config {
int class; /* SYM_PROTO or SYM_TEMPLATE */
u8 net_type; /* Protocol network type (NET_*), 0 for undefined */
u8 disabled; /* Protocol enabled/disabled by default */
- u8 vrf_set; /* Related VRF instance (below) is defined */
+ u8 late_if_feed; /* Delay interface feed after channels are up */
u32 debug, mrtdump; /* Debugging bitfields, both use D_* constants */
u32 router_id; /* Protocol specific router ID */
+ uint loop_order; /* Launch a birdloop on this locking level; use DOMAIN_ORDER(the_bird) for mainloop */
list channels; /* List of channel configs (struct channel_config) */
struct iface *vrf; /* Related VRF instance, NULL if global */
@@ -133,31 +131,6 @@ struct proto_config {
};
/* Protocol statistics */
-struct proto_stats {
- /* Import - from protocol to core */
- u32 imp_routes; /* Number of routes successfully imported to the (adjacent) routing table */
- u32 filt_routes; /* Number of routes rejected in import filter but kept in the routing table */
- u32 pref_routes; /* Number of routes selected as best in the (adjacent) routing table */
- u32 imp_updates_received; /* Number of route updates received */
- u32 imp_updates_invalid; /* Number of route updates rejected as invalid */
- u32 imp_updates_filtered; /* Number of route updates rejected by filters */
- u32 imp_updates_ignored; /* Number of route updates rejected as already in route table */
- u32 imp_updates_accepted; /* Number of route updates accepted and imported */
- u32 imp_withdraws_received; /* Number of route withdraws received */
- u32 imp_withdraws_invalid; /* Number of route withdraws rejected as invalid */
- u32 imp_withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
- u32 imp_withdraws_accepted; /* Number of route withdraws accepted and processed */
-
- /* Export - from core to protocol */
- u32 exp_routes; /* Number of routes successfully exported to the protocol */
- u32 exp_updates_received; /* Number of route updates received */
- u32 exp_updates_rejected; /* Number of route updates rejected by protocol */
- u32 exp_updates_filtered; /* Number of route updates rejected by filters */
- u32 exp_updates_accepted; /* Number of route updates accepted and exported */
- u32 exp_withdraws_received; /* Number of route withdraws received */
- u32 exp_withdraws_accepted; /* Number of route withdraws accepted and processed */
-};
-
struct proto {
node n; /* Node in global proto_list */
struct protocol *proto; /* Protocol */
@@ -165,22 +138,23 @@ struct proto {
struct proto_config *cf_new; /* Configuration we want to switch to after shutdown (NULL=delete) */
pool *pool; /* Pool containing local objects */
event *event; /* Protocol event */
+ struct birdloop *loop; /* BIRDloop running this protocol */
list channels; /* List of channels to rtables (struct channel) */
struct channel *main_channel; /* Primary channel */
struct rte_src *main_source; /* Primary route source */
+ struct rte_owner sources; /* Route source owner structure */
struct iface *vrf; /* Related VRF instance, NULL if global */
const char *name; /* Name of this instance (== cf->name) */
u32 debug; /* Debugging flags */
u32 mrtdump; /* MRTDump flags */
uint active_channels; /* Number of active channels */
+ uint active_coroutines; /* Number of active coroutines */
byte net_type; /* Protocol network type (NET_*), 0 for undefined */
byte disabled; /* Manually disabled */
- byte vrf_set; /* Related VRF instance (above) is defined */
byte proto_state; /* Protocol state machine (PS_*, see below) */
byte active; /* From PS_START to cleanup after PS_STOP */
- byte do_start; /* Start actions are scheduled */
byte do_stop; /* Stop actions are scheduled */
byte reconfiguring; /* We're shutting down due to reconfiguration */
byte gr_recovery; /* Protocol should participate in graceful restart recovery */
@@ -198,12 +172,11 @@ struct proto {
* ifa_notify Notify protocol about interface address changes.
* rt_notify Notify protocol about routing table updates.
* neigh_notify Notify protocol about neighbor cache events.
- * make_tmp_attrs Add attributes to rta from from private attrs stored in rte. The route and rta MUST NOT be cached.
- * store_tmp_attrs Store private attrs back to rte and undef added attributes. The route and rta MUST NOT be cached.
- * preexport Called as the first step of the route exporting process.
- * It can construct a new rte, add private attributes and
- * decide whether the route shall be exported: 1=yes, -1=no,
- * 0=process it through the export filter set by the user.
+ * preexport Called as the first step of the route exporting process.
+ * It can decide whether the route shall be exported:
+ * -1 = reject,
+ * 0 = continue to export filter
+ * 1 = accept immediately
* reload_routes Request channel to reload all its routes to the core
* (using rte_update()). Returns: 0=reload cannot be done,
* 1= reload is scheduled and will happen (asynchronously).
@@ -213,11 +186,9 @@ struct proto {
void (*if_notify)(struct proto *, unsigned flags, struct iface *i);
void (*ifa_notify)(struct proto *, unsigned flags, struct ifa *a);
- void (*rt_notify)(struct proto *, struct channel *, struct network *net, struct rte *new, struct rte *old);
+ void (*rt_notify)(struct proto *, struct channel *, const net_addr *net, struct rte *new, const struct rte *old);
void (*neigh_notify)(struct neighbor *neigh);
- void (*make_tmp_attrs)(struct rte *rt, struct linpool *pool);
- void (*store_tmp_attrs)(struct rte *rt, struct linpool *pool);
- int (*preexport)(struct proto *, struct rte **rt, struct linpool *pool);
+ int (*preexport)(struct channel *, struct rte *rt);
void (*reload_routes)(struct channel *);
void (*feed_begin)(struct channel *, int initial);
void (*feed_end)(struct channel *);
@@ -233,13 +204,12 @@ struct proto {
* rte_remove Called whenever a rte is removed from the routing table.
*/
- int (*rte_recalculate)(struct rtable *, struct network *, struct rte *, struct rte *, struct rte *);
+ int (*rte_recalculate)(rtable *, struct network *, struct rte *, struct rte *, struct rte *);
int (*rte_better)(struct rte *, struct rte *);
- int (*rte_same)(struct rte *, struct rte *);
int (*rte_mergable)(struct rte *, struct rte *);
- struct rte * (*rte_modify)(struct rte *, struct linpool *);
void (*rte_insert)(struct network *, struct rte *);
void (*rte_remove)(struct network *, struct rte *);
+ u32 (*rte_igp_metric)(struct rte *);
/* Hic sunt protocol-specific data */
};
@@ -279,7 +249,7 @@ void channel_graceful_restart_unlock(struct channel *c);
#define DEFAULT_GR_WAIT 240
-void channel_show_limit(struct channel_limit *l, const char *dsc);
+void channel_show_limit(struct limit *l, const char *dsc, int active, int action);
void channel_show_info(struct channel *c);
void channel_cmd_debug(struct channel *c, uint mask);
@@ -298,6 +268,18 @@ struct proto *proto_iterate_named(struct symbol *sym, struct protocol *proto, st
#define PROTO_WALK_CMD(sym,pr,p) for(struct proto *p = NULL; p = proto_iterate_named(sym, pr, p); )
+#define PROTO_ENTER_FROM_MAIN(p) ({ \
+ ASSERT_DIE(birdloop_inside(&main_birdloop)); \
+ struct birdloop *_loop = (p)->loop; \
+ if (_loop != &main_birdloop) birdloop_enter(_loop); \
+ _loop; \
+ })
+
+#define PROTO_LEAVE_FROM_MAIN(loop) ({ if (loop != &main_birdloop) birdloop_leave(loop); })
+
+#define PROTO_LOCKED_FROM_MAIN(p) for (struct birdloop *_proto_loop = PROTO_ENTER_FROM_MAIN(p); _proto_loop; PROTO_LEAVE_FROM_MAIN(_proto_loop), (_proto_loop = NULL))
+
+
#define CMD_RELOAD 0
#define CMD_RELOAD_IN 1
#define CMD_RELOAD_OUT 2
@@ -386,6 +368,8 @@ void proto_notify_state(struct proto *p, unsigned state);
* as a result of received ROUTE-REFRESH request).
*/
+static inline int proto_is_inactive(struct proto *p)
+{ return (p->active_channels == 0) && (p->active_coroutines == 0) && (p->sources.uc == 0); }
/*
@@ -434,18 +418,29 @@ extern struct proto_config *cf_dev_proto;
#define PLA_RESTART 4 /* Force protocol restart */
#define PLA_DISABLE 5 /* Shutdown and disable protocol */
-#define PLS_INITIAL 0 /* Initial limit state after protocol start */
-#define PLS_ACTIVE 1 /* Limit was hit */
-#define PLS_BLOCKED 2 /* Limit is active and blocking new routes */
-
struct channel_limit {
u32 limit; /* Maximum number of prefixes */
u8 action; /* Action to take (PLA_*) */
- u8 state; /* State of limit (PLS_*) */
};
-void channel_notify_limit(struct channel *c, struct channel_limit *l, int dir, u32 rt_count);
+struct channel_limit_data {
+ struct channel *c;
+ int dir;
+};
+
+#define CLP__RX(_c) (&(_c)->rx_limit)
+#define CLP__IN(_c) (&(_c)->in_limit)
+#define CLP__OUT(_c) (&(_c)->out_limit)
+
+
+#if 0
+#define CHANNEL_LIMIT_LOG(_c, _dir, _op) log(L_TRACE "%s.%s: %s limit %s %u", (_c)->proto->name, (_c)->name, #_dir, _op, (CLP__##_dir(_c))->count)
+#else
+#define CHANNEL_LIMIT_LOG(_c, _dir, _op)
+#endif
+#define CHANNEL_LIMIT_PUSH(_c, _dir) ({ CHANNEL_LIMIT_LOG(_c, _dir, "push from"); struct channel_limit_data cld = { .c = (_c), .dir = PLD_##_dir }; limit_push(CLP__##_dir(_c), &cld); })
+#define CHANNEL_LIMIT_POP(_c, _dir) ({ limit_pop(CLP__##_dir(_c)); CHANNEL_LIMIT_LOG(_c, _dir, "pop to"); })
/*
* Channels
@@ -469,7 +464,6 @@ struct channel_class {
void (*dump)(struct proto *); /* Debugging dump */
- void (*dump_attrs)(struct rte *); /* Dump protocol-dependent attributes */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
@@ -489,6 +483,7 @@ struct channel_config {
struct proto_config *parent; /* Where channel is defined (proto or template) */
struct rtable_config *table; /* Table we're attached to */
const struct filter *in_filter, *out_filter; /* Attached filters */
+
struct channel_limit rx_limit; /* Limit for receiving routes from protocol
(relevant when in_keep_filtered is active) */
struct channel_limit in_limit; /* Limit for importing routes from protocol */
@@ -505,23 +500,50 @@ struct channel_config {
struct channel {
node n; /* Node in proto->channels */
- node table_node; /* Node in table->channels */
const char *name; /* Channel name (may be NULL) */
const struct channel_class *channel;
struct proto *proto;
- struct rtable *table;
+ rtable *table;
const struct filter *in_filter; /* Input filter */
const struct filter *out_filter; /* Output filter */
- struct bmap export_map; /* Keeps track which routes passed export filter */
- struct channel_limit rx_limit; /* Receive limit (for in_keep_filtered) */
- struct channel_limit in_limit; /* Input limit */
- struct channel_limit out_limit; /* Output limit */
-
- struct event *feed_event; /* Event responsible for feeding */
- struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
- struct proto_stats stats; /* Per-channel protocol statistics */
+ struct bmap export_map; /* Keeps track which routes were really exported */
+ struct bmap export_reject_map; /* Keeps track which routes were rejected by export filter */
+
+ struct limit rx_limit; /* Receive limit (for in_keep_filtered) */
+ struct limit in_limit; /* Input limit */
+ struct limit out_limit; /* Output limit */
+
+ u8 limit_actions[PLD_MAX]; /* Limit actions enum */
+ u8 limit_active; /* Flags for active limits */
+
+ linpool *rte_update_pool;
+ uint rte_update_nest_cnt;
+
+ struct channel_import_stats {
+ /* Import - from protocol to core */
+ u32 updates_received; /* Number of route updates received */
+ u32 updates_invalid; /* Number of route updates rejected as invalid */
+ u32 updates_filtered; /* Number of route updates rejected by filters */
+ u32 updates_limited_rx; /* Number of route updates exceeding the rx_limit */
+ u32 updates_limited_in; /* Number of route updates exceeding the in_limit */
+ u32 withdraws_received; /* Number of route withdraws received */
+ u32 withdraws_invalid; /* Number of route withdraws rejected as invalid */
+ } import_stats;
+
+ struct channel_export_stats {
+ /* Export - from core to protocol */
+ u32 updates_rejected; /* Number of route updates rejected by protocol */
+ u32 updates_filtered; /* Number of route updates rejected by filters */
+ u32 updates_accepted; /* Number of route updates accepted and exported */
+ u32 updates_limited; /* Number of route updates exceeding the out_limit */
+ u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
+ } export_stats;
+
+ struct rt_import_request in_req; /* Table import connection */
+ struct rt_export_request out_req; /* Table export connection */
+
u32 refeed_count; /* Number of routes exported during refeed regardless of out_limit */
u8 net_type; /* Routing table network type (NET_*), 0 for undefined */
@@ -534,31 +556,36 @@ struct channel {
u8 stale; /* Used in reconfiguration */
u8 channel_state;
- u8 export_state; /* Route export state (ES_*, see below) */
- u8 feed_active;
- u8 flush_active;
- u8 refeeding; /* We are refeeding (valid only if export_state == ES_FEEDING) */
+ u8 refeeding; /* Refeeding the channel. */
u8 reloadable; /* Hook reload_routes() is allowed on the channel */
u8 gr_lock; /* Graceful restart mechanism should wait for this channel */
u8 gr_wait; /* Route export to channel is postponed until graceful restart */
+ u8 restart_export; /* Route export should restart as soon as it stops */
btime last_state_change; /* Time of last state transition */
- struct rtable *in_table; /* Internal table for received routes */
- struct event *reload_event; /* Event responsible for reloading from in_table */
- struct fib_iterator reload_fit; /* FIB iterator in in_table used during reloading */
- struct rte *reload_next_rte; /* Route iterator in in_table used during reloading */
- u8 reload_active; /* Iterator reload_fit is linked */
+ struct channel_aux_table *in_table; /* Internal table for received routes */
+ struct event in_stopped; /* Import stop callback */
u8 reload_pending; /* Reloading and another reload is scheduled */
u8 refeed_pending; /* Refeeding and another refeed is scheduled */
u8 rpki_reload; /* RPKI changes trigger channel reload */
- struct rtable *out_table; /* Internal table for exported routes */
+ struct channel_aux_table *out_table; /* Internal table for exported routes */
list roa_subscriptions; /* List of active ROA table subscriptions based on filters roa_check() */
};
+struct channel_aux_table {
+ struct channel *c;
+ struct rt_import_request push;
+ struct rt_export_request get;
+ event push_stopped;
+ rtable *tab;
+ event *stop;
+ u8 refeed_pending;
+ u8 stop_pending;
+};
/*
* Channel states
@@ -585,70 +612,59 @@ struct channel {
* restricted by that and is on volition of the protocol. Generally, channels
* are opened in protocols' start() hooks when going to PS_UP.
*
- * CS_FLUSHING - The transitional state between initialized channel and closed
+ * CS_STOP - The transitional state between initialized channel and closed
* channel. The channel is still initialized, but no route exchange is allowed.
* Instead, the associated table is running flush loop to remove routes imported
* through the channel. After that, the channel changes state to CS_DOWN and
* is detached from the table (the table is unlocked and the channel is unlinked
- * from it). Unlike other states, the CS_FLUSHING state is not explicitly
+ * from it). Unlike other states, the CS_STOP state is not explicitly
* entered or left by the protocol. A protocol may request to close a channel
* (by calling channel_close()), which causes the channel to change state to
- * CS_FLUSHING and later to CS_DOWN. Also note that channels are closed
+ * CS_STOP and later to CS_DOWN. Also note that channels are closed
* automatically by the core when the protocol is going down.
*
+ * CS_PAUSE - Almost the same as CS_STOP, just the table import is kept and
+ * the table export is stopped before transitioning to CS_START.
+ *
* Allowed transitions:
*
* CS_DOWN -> CS_START / CS_UP
- * CS_START -> CS_UP / CS_FLUSHING
- * CS_UP -> CS_START / CS_FLUSHING
- * CS_FLUSHING -> CS_DOWN (automatic)
+ * CS_START -> CS_UP / CS_STOP
+ * CS_UP -> CS_PAUSE / CS_STOP
+ * CS_PAUSE -> CS_START (automatic)
+ * CS_STOP -> CS_DOWN (automatic)
*/
#define CS_DOWN 0
#define CS_START 1
#define CS_UP 2
-#define CS_FLUSHING 3
-
-#define ES_DOWN 0
-#define ES_FEEDING 1
-#define ES_READY 2
-
+#define CS_STOP 3
+#define CS_PAUSE 4
struct channel_config *proto_cf_find_channel(struct proto_config *p, uint net_type);
static inline struct channel_config *proto_cf_main_channel(struct proto_config *pc)
{ return proto_cf_find_channel(pc, pc->net_type); }
-struct channel *proto_find_channel_by_table(struct proto *p, struct rtable *t);
+struct channel *proto_find_channel_by_table(struct proto *p, rtable *t);
struct channel *proto_find_channel_by_name(struct proto *p, const char *n);
struct channel *proto_add_channel(struct proto *p, struct channel_config *cf);
int proto_configure_channel(struct proto *p, struct channel **c, struct channel_config *cf);
void channel_set_state(struct channel *c, uint state);
-void channel_setup_in_table(struct channel *c);
+void channel_setup_in_table(struct channel *c, int best);
void channel_setup_out_table(struct channel *c);
void channel_schedule_reload(struct channel *c);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
-static inline void channel_close(struct channel *c) { channel_set_state(c, CS_FLUSHING); }
+static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
void channel_request_feeding(struct channel *c);
+void channel_request_reload(struct channel *c);
+void channel_refresh_begin(struct channel *c);
+void channel_refresh_end(struct channel *c);
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
void *channel_config_get(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
int channel_reconfigure(struct channel *c, struct channel_config *cf);
-
-/* Moved from route.h to avoid dependency conflicts */
-static inline void rte_update(struct proto *p, const net_addr *n, rte *new) { rte_update2(p->main_channel, n, new, p->main_source); }
-
-static inline void
-rte_update3(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
-{
- if (c->in_table && !rte_update_in(c, n, new, src))
- return;
-
- rte_update2(c, n, new, src);
-}
-
-
#endif
diff --git a/nest/route.h b/nest/route.h
index f5fc9e31..9093108b 100644
--- a/nest/route.h
+++ b/nest/route.h
@@ -2,6 +2,7 @@
* BIRD Internet Routing Daemon -- Routing Table
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2019--2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@@ -10,13 +11,19 @@
#define _BIRD_ROUTE_H_
#include "lib/lists.h"
+#include "lib/event.h"
#include "lib/bitmap.h"
#include "lib/resource.h"
#include "lib/net.h"
+#include "lib/hash.h"
+#include "lib/event.h"
+
+#include <stdatomic.h>
struct ea_list;
struct protocol;
struct proto;
+struct channel;
struct rte_src;
struct symbol;
struct timer;
@@ -139,61 +146,91 @@ void fit_copy(struct fib *f, struct fib_iterator *dst, struct fib_iterator *src)
* It's guaranteed that there is at most one RTE for every (prefix,proto) pair.
*/
-struct rtable_config {
- node n;
- char *name;
- struct rtable *table;
- struct proto_config *krt_attached; /* Kernel syncer attached to this table */
- uint addr_type; /* Type of address data stored in table (NET_*) */
- int gc_max_ops; /* Maximum number of operations before GC is run */
- int gc_min_time; /* Minimum time between two consecutive GC runs */
- byte sorted; /* Routes of network are sorted according to rte_better() */
- byte internal; /* Internal table of a protocol */
- btime min_settle_time; /* Minimum settle time for notifications */
- btime max_settle_time; /* Maximum settle time for notifications */
-};
-
-typedef struct rtable {
- resource r;
- node n; /* Node in list of all tables */
+typedef struct rtable_private {
+#define RTABLE_PUBLIC \
+ resource r; \
+ node n; /* Node in list of all tables */ \
+ struct birdloop *loop; /* This loop runs the table */ \
+ char *name; /* Name of this table */ \
+ uint addr_type; /* Type of address data stored in table (NET_*) */ \
+ struct rtable_config *config; /* Configuration of this table */ \
+ struct event *nhu_event; /* Event to update next hops */ \
+ _Atomic byte nhu_state; /* Next Hop Update state */ \
+
+ RTABLE_PUBLIC;
pool *rp; /* Resource pool to allocate everything from, including itself */
+ struct slab *rte_slab; /* Slab to allocate route objects */
struct fib fib;
- char *name; /* Name of this table */
- list channels; /* List of attached channels (struct channel) */
- uint addr_type; /* Type of address data stored in table (NET_*) */
- int pipe_busy; /* Pipe loop detection */
int use_count; /* Number of protocols using this table */
u32 rt_count; /* Number of routes in the table */
+ u32 rr_count; /* Number of running route refresh requests */
+ u32 imports_up; /* Number of imports in TIS_UP state */
- byte internal; /* Internal table of a protocol */
+ list imports; /* Registered route importers */
+ list exports; /* Registered route exporters */
struct hmap id_map;
struct hostcache *hostcache;
- struct rtable_config *config; /* Configuration of this table */
- struct config *deleted; /* Table doesn't exist in current configuration,
- * delete as soon as use_count becomes 0 and remove
- * obstacle from this routing table.
- */
- struct event *rt_event; /* Routing table event */
+ struct event *prune_event; /* Event to prune abandoned routes */
+ struct event *announce_event; /* Event to announce pending exports */
+ struct event *ec_event; /* Event to prune finished exports */
+ struct event *hcu_event; /* Event to update host cache */
+ void (*delete)(void *); /* Delete callback (in parent loop context) */
btime last_rt_change; /* Last time when route changed */
btime base_settle_time; /* Start time of rtable settling interval */
btime gc_time; /* Time of last GC */
int gc_counter; /* Number of operations since last GC */
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
- byte hcu_scheduled; /* Hostcache update is scheduled */
- byte nhu_state; /* Next Hop Update state */
+
+ byte cork_active; /* Congestion control activated */
+
struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
+ struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
+
+ linpool *nhu_lp; /* Linpool used for NHU */
list subscribers; /* Subscribers for notifications */
struct timer *settle_timer; /* Settle time for notifications */
+
+ list pending_exports; /* List of packed struct rt_pending_export */
+
+ struct rt_pending_export *first_export; /* First export to announce */
+ u64 next_export_seq; /* The next export will have this ID */
+} rtable_private;
+
+typedef union {
+ struct { RTABLE_PUBLIC };
+ rtable_private priv;
} rtable;
+#define RT_LOCK(tab) ({ birdloop_enter((tab)->loop); &(tab)->priv; })
+#define RT_UNLOCK(tab) birdloop_leave((tab)->loop)
+#define RT_PRIV(tab) ({ ASSERT_DIE(birdloop_inside((tab)->loop)); &(tab)->priv; })
+
+#define RT_LOCKED(tpub, tpriv) for (rtable_private *tpriv = RT_LOCK(tpub); tpriv; RT_UNLOCK(tpriv), (tpriv = NULL))
+
+struct rtable_config {
+ node n;
+ char *name;
+ void *owner; /* Main config if global table, channel_aux_table if channel table */
+ rtable *table;
+ struct proto_config *krt_attached; /* Kernel syncer attached to this table */
+ uint addr_type; /* Type of address data stored in table (NET_*) */
+ int gc_max_ops; /* Maximum number of operations before GC is run */
+ int gc_min_time; /* Minimum time between two consecutive GC runs */
+ byte sorted; /* Routes of network are sorted according to rte_better() */
+ btime min_settle_time; /* Minimum settle time for notifications */
+ btime max_settle_time; /* Maximum settle time for notifications */
+ btime min_rr_settle_time; /* Minimum settle time for notifications when route refresh is running */
+ btime max_rr_settle_time; /* Maximum settle time for notifications when route refresh is running */
+ uint cork_limit; /* Amount of routes to be pending on export to cork imports */
+};
+
struct rt_subscription {
node n;
rtable *tab;
- void (*hook)(struct rt_subscription *b);
- void *data;
+ event *event;
};
#define NHU_CLEAN 0
@@ -202,7 +239,8 @@ struct rt_subscription {
#define NHU_DIRTY 3
typedef struct network {
- struct rte *routes; /* Available routes for this network */
+ struct rte_storage *routes; /* Available routes for this network */
+ struct rt_pending_export *last, *first; /* Routes with unfinished exports */
struct fib_node n; /* FIB flags reserved for kernel syncer */
} net;
@@ -223,7 +261,7 @@ struct hostentry {
ip_addr addr; /* IP address of host, part of key */
ip_addr link; /* (link-local) IP address of host, used as gw
if host is directly attached */
- struct rtable *tab; /* Dependent table, part of key */
+ rtable *tab; /* Dependent table, part of key */
struct hostentry *next; /* Next in hash chain */
unsigned hash_key; /* Hash key */
unsigned uc; /* Use count */
@@ -234,64 +272,178 @@ struct hostentry {
};
typedef struct rte {
- struct rte *next;
- net *net; /* Network this RTE belongs to */
- struct channel *sender; /* Channel used to send the route to the routing table */
struct rta *attrs; /* Attributes of this route */
+ const net_addr *net; /* Network this RTE belongs to */
+ struct rte_src *src; /* Route source that created the route */
+ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
+ btime lastmod; /* Last modified (set by table) */
u32 id; /* Table specific route id */
- byte flags; /* Flags (REF_...) */
+ byte flags; /* Table-specific flags */
byte pflags; /* Protocol-specific flags */
- word pref; /* Route preference */
- btime lastmod; /* Last modified */
- union { /* Protocol-dependent data (metrics etc.) */
-#ifdef CONFIG_RIP
- struct {
- struct iface *from; /* Incoming iface */
- u8 metric; /* RIP metric */
- u16 tag; /* External route tag */
- } rip;
-#endif
-#ifdef CONFIG_OSPF
- struct {
- u32 metric1, metric2; /* OSPF Type 1 and Type 2 metrics */
- u32 tag; /* External route tag */
- u32 router_id; /* Router that originated this route */
- } ospf;
-#endif
-#ifdef CONFIG_BGP
- struct {
- u8 suppressed; /* Used for deterministic MED comparison */
- s8 stale; /* Route is LLGR_STALE, -1 if unknown */
- } bgp;
-#endif
-#ifdef CONFIG_BABEL
- struct {
- u16 seqno; /* Babel seqno */
- u16 metric; /* Babel metric */
- u64 router_id; /* Babel router id */
- } babel;
-#endif
- struct { /* Routes generated by krt sync (both temporary and inherited ones) */
- s8 src; /* Alleged route source (see krt.h) */
- u8 proto; /* Kernel source protocol ID */
- u8 seen; /* Seen during last scan */
- u8 best; /* Best route in network, propagated to core */
- u32 metric; /* Kernel metric */
- } krt;
- } u;
+ u8 generation; /* If this route import is based on other previously exported route,
+ this value should be 1 + MAX(generation of the parent routes).
+ Otherwise the route is independent and this value is zero. */
+ u8 stale_cycle; /* Auxiliary value for route refresh */
} rte;
-#define REF_COW 1 /* Copy this rte on write */
+struct rte_storage {
+ struct rte_storage *next; /* Next in chain */
+ struct rte rte; /* Route data */
+};
+
+#define RTES_CLONE(r, l) ((r) ? (((*(l)) = (r)->rte), (l)) : NULL)
+#define RTES_OR_NULL(r) ((r) ? &((r)->rte) : NULL)
+
#define REF_FILTERED 2 /* Route is rejected by import filter */
-#define REF_STALE 4 /* Route is stale in a refresh cycle */
-#define REF_DISCARD 8 /* Route is scheduled for discard */
-#define REF_MODIFY 16 /* Route is scheduled for modify */
+#define REF_USE_STALE 4 /* Do not reset route's stale_cycle to the actual value */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
-static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
+static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
/* Route just has REF_FILTERED flag */
-static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
+static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
+
+
+/* Table-channel connections */
+
+struct rt_import_request {
+ struct rt_import_hook *hook; /* The table part of importer */
+ char *name;
+ u8 trace_routes;
+
+ event_list *list; /* Where to schedule import events */
+
+ void (*dump_req)(struct rt_import_request *req);
+ void (*log_state_change)(struct rt_import_request *req, u8 state);
+ /* Preimport is called when the @new route is just-to-be inserted, replacing @old.
+ * Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
+ struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
+};
+
+struct rt_import_hook {
+ node n;
+ rtable *table; /* The connected table */
+ struct rt_import_request *req; /* The requestor */
+
+ struct rt_import_stats {
+ /* Import - from protocol to core */
+ u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
+ u32 updates_ignored; /* Number of route updates rejected as already in route table */
+ u32 updates_accepted; /* Number of route updates accepted and imported */
+ u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
+ u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
+ } stats;
+
+ u64 flush_seq; /* Table export seq when the channel announced flushing */
+ btime last_state_change; /* Time of last state transition */
+
+ u8 import_state; /* IS_* */
+ u8 stale_set; /* Set this stale_cycle to imported routes */
+ u8 stale_valid; /* Routes with this stale_cycle and bigger are considered valid */
+ u8 stale_pruned; /* Last prune finished when this value was set at stale_valid */
+ u8 stale_pruning; /* Last prune started when this value was set at stale_valid */
+
+ struct event *export_announce_event; /* Event to run to announce new exports */
+ struct event *stopped; /* Event to run when import is stopped */
+};
+
+struct rt_pending_export {
+ struct rt_pending_export * _Atomic next; /* Next export for the same destination */
+ struct rte_storage *new, *new_best, *old, *old_best;
+ u64 seq; /* Sequential ID (table-local) of the pending export */
+};
+
+struct rt_export_request {
+ struct rt_export_hook *hook; /* Table part of the export */
+ char *name;
+ u8 trace_routes;
+
+ event_list *list; /* Where to schedule export events */
+
+ /* There are two methods of export. You can either request feeding every single change
+ * or feeding the whole route feed. In case of regular export, &export_one is preferred.
+ * Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
+ * Thus, for RA_OPTIMAL, &export_one is only set,
+ * for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
+ * and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
+ */
+ void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+ void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+
+ void (*dump_req)(struct rt_export_request *req);
+ void (*log_state_change)(struct rt_export_request *req, u8);
+};
+
+struct rt_export_hook {
+ node n;
+ rtable *table; /* The connected table */
+
+ pool *pool;
+
+ struct rt_export_request *req; /* The requestor */
+
+ struct rt_export_stats {
+ /* Export - from core to protocol */
+ u32 updates_received; /* Number of route updates received */
+ u32 withdraws_received; /* Number of route withdraws received */
+ } stats;
+
+ struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
+
+ struct bmap seq_map; /* Keep track which exports were already procesed */
+
+ struct rt_pending_export * _Atomic last_export;/* Last export processed */
+ struct rt_pending_export *rpe_next; /* Next pending export to process */
+
+ btime last_state_change; /* Time of last state transition */
+
+ u8 refeed_pending; /* Refeeding and another refeed is scheduled */
+ _Atomic u8 export_state; /* Route export state (TES_*, see below) */
+
+ struct event *event; /* Event running all the export operations */
+
+ void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
+};
+
+extern struct event_cork rt_cork;
+
+#define TIS_DOWN 0
+#define TIS_UP 1
+#define TIS_STOP 2
+#define TIS_FLUSHING 3
+#define TIS_WAITING 4
+#define TIS_CLEARED 5
+#define TIS_MAX 6
+
+#define TES_DOWN 0
+#define TES_HUNGRY 1
+#define TES_FEEDING 2
+#define TES_READY 3
+#define TES_STOP 4
+#define TES_MAX 5
+
+void rt_request_import(rtable *tab, struct rt_import_request *req);
+void rt_request_export(rtable *tab, struct rt_export_request *req);
+
+void rt_stop_import(struct rt_import_request *, struct event *stopped);
+void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
+
+const char *rt_import_state_name(u8 state);
+const char *rt_export_state_name(u8 state);
+
+static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
+static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
+
+void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
+
+/* Get next rpe. If src is given, it must match. */
+struct rt_pending_export *rpe_next(struct rt_pending_export *rpe, struct rte_src *src);
+
+/* Mark the pending export processed */
+void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
+
+/* Get pending export seen status */
+int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
/* Types of route announcement, also used as flags */
@@ -307,56 +459,74 @@ static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED);
#define RIC_REJECT -1 /* Rejected by protocol */
#define RIC_DROP -2 /* Silently dropped by protocol */
+#define rte_update channel_rte_import
+/**
+ * rte_update - enter a new update to a routing table
+ * @c: channel doing the update
+ * @net: network address
+ * @rte: a &rte representing the new route
+ * @src: old route source identifier
+ *
+ * This function imports a new route to the appropriate table (via the channel).
+ * Table keys are @net (obligatory) and @rte->attrs->src.
+ * Both the @net and @rte pointers can be local.
+ *
+ * The route attributes (@rte->attrs) are obligatory. They can be also allocated
+ * locally. Anyway, if you use an already-cached attribute object, you shall
+ * call rta_clone() on that object yourself. (This semantics may change in future.)
+ *
+ * If the route attributes are local, you may set @rte->attrs->src to NULL, then
+ * the protocol's default route source will be supplied.
+ *
+ * When rte_update() gets a route, it automatically validates it. This includes
+ * checking for validity of the given network and next hop addresses and also
+ * checking for host-scope or link-scope routes. Then the import filters are
+ * processed and if accepted, the route is passed to route table recalculation.
+ *
+ * The accepted routes are then inserted into the table, replacing the old route
+ * for the same @net identified by @src. Then the route is announced
+ * to all the channels connected to the table using the standard export mechanism.
+ * Setting @rte to NULL makes this a withdraw, otherwise @rte->src must be the same
+ * as @src.
+ *
+ * All memory used for temporary allocations is taken from a special linpool
+ * @rte_update_pool and freed when rte_update() finishes.
+ */
+void rte_update(struct channel *c, const net_addr *net, struct rte *rte, struct rte_src *src);
+
extern list routing_tables;
struct config;
void rt_init(void);
void rt_preconfig(struct config *);
void rt_commit(struct config *new, struct config *old);
-void rt_lock_table(rtable *);
-void rt_unlock_table(rtable *);
+void rt_lock_table(rtable_private *);
+void rt_unlock_table(rtable_private *);
void rt_subscribe(rtable *tab, struct rt_subscription *s);
void rt_unsubscribe(struct rt_subscription *s);
rtable *rt_setup(pool *, struct rtable_config *);
-static inline void rt_shutdown(rtable *r) { rfree(r->rp); }
-static inline net *net_find(rtable *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
-static inline net *net_find_valid(rtable *tab, const net_addr *addr)
-{ net *n = net_find(tab, addr); return (n && rte_is_valid(n->routes)) ? n : NULL; }
-static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
-void *net_route(rtable *tab, const net_addr *n);
+static inline net *net_find(rtable_private *tab, const net_addr *addr) { return (net *) fib_find(&tab->fib, addr); }
+static inline net *net_find_valid(rtable_private *tab, const net_addr *addr)
+{ net *n = net_find(tab, addr); return (n && n->routes && rte_is_valid(&n->routes->rte)) ? n : NULL; }
+static inline net *net_get(rtable_private *tab, const net_addr *addr) { return (net *) fib_get(&tab->fib, addr); }
+void *net_route(rtable_private *tab, const net_addr *n);
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
-rte *rte_find(net *net, struct rte_src *src);
-rte *rte_get_temp(struct rta *);
-void rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
-/* rte_update() moved to protocol.h to avoid dependency conflicts */
-int rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter);
-rte *rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent);
-void rt_refresh_begin(rtable *t, struct channel *c);
-void rt_refresh_end(rtable *t, struct channel *c);
-void rt_modify_stale(rtable *t, struct channel *c);
-void rt_schedule_prune(rtable *t);
-void rte_dump(rte *);
-void rte_free(rte *);
-rte *rte_do_cow(rte *);
-static inline rte * rte_cow(rte *r) { return (r->flags & REF_COW) ? rte_do_cow(r) : r; }
-rte *rte_cow_rta(rte *r, linpool *lp);
-void rte_init_tmp_attrs(struct rte *r, linpool *lp, uint max);
-void rte_make_tmp_attr(struct rte *r, uint id, uint type, uintptr_t val);
-void rte_make_tmp_attrs(struct rte **r, struct linpool *pool, struct rta **old_attrs);
-uintptr_t rte_store_tmp_attr(struct rte *r, uint id);
+int rt_examine(rtable_private *t, net_addr *a, struct channel *c, const struct filter *filter);
+rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
+
+void rt_refresh_begin(struct rt_import_request *);
+void rt_refresh_end(struct rt_import_request *);
+void rt_schedule_prune(rtable_private *t);
+void rte_dump(struct rte_storage *);
+void rte_free(struct rte_storage *, rtable_private *);
void rt_dump(rtable *);
void rt_dump_all(void);
-int rt_feed_channel(struct channel *c);
-void rt_feed_channel_abort(struct channel *c);
-int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
-int rt_reload_channel(struct channel *c);
-void rt_reload_channel_abort(struct channel *c);
+void rt_dump_hooks(rtable *);
+void rt_dump_hooks_all(void);
void rt_prune_sync(rtable *t, int all);
-int rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, int refeed);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
-
/* Default limit for ECMP next hops, defined in sysdep code */
extern const int rt_default_ecmp;
@@ -379,6 +549,7 @@ struct rt_show_data {
struct channel *export_channel;
struct config *running_on_config;
struct krt_proto *kernel;
+ struct rt_export_hook *kernel_export_hook;
int export_mode, primary_only, filtered, stats, show_for;
int table_open; /* Iteration (fit) is open */
@@ -430,30 +601,29 @@ struct nexthop {
struct rte_src {
struct rte_src *next; /* Hash chain */
- struct proto *proto; /* Protocol the source is based on */
+ struct rte_owner *owner; /* Route source owner */
u32 private_id; /* Private ID, assigned by the protocol */
u32 global_id; /* Globally unique ID of the source */
- unsigned uc; /* Use count */
+ _Atomic u64 uc; /* Use count */
};
typedef struct rta {
- struct rta *next, **pprev; /* Hash chain */
- u32 uc; /* Use count */
+ struct rta * _Atomic next, * _Atomic *pprev; /* Hash chain */
+ _Atomic u32 uc; /* Use count */
u32 hash_key; /* Hash over important fields */
struct ea_list *eattrs; /* Extended Attribute chain */
- struct rte_src *src; /* Route source that created the route */
struct hostentry *hostentry; /* Hostentry for recursive next-hops */
ip_addr from; /* Advertising router */
u32 igp_metric; /* IGP metric to next hop (for iBGP routes) */
- u8 source; /* Route source (RTS_...) */
- u8 scope; /* Route scope (SCOPE_... -- see ip.h) */
- u8 dest; /* Route destination type (RTD_...) */
- u8 aflags;
+ u16 cached:1; /* Are attributes cached? */
+ u16 source:7; /* Route source (RTS_...) */
+ u16 scope:4; /* Route scope (SCOPE_... -- see ip.h) */
+ u16 dest:4; /* Route destination type (RTD_...) */
+ word pref;
struct nexthop nh; /* Next hop */
} rta;
-#define RTS_DUMMY 0 /* Dummy route to be removed soon */
#define RTS_STATIC 1 /* Normal static route */
#define RTS_INHERIT 2 /* Route inherited from kernel */
#define RTS_DEVICE 3 /* Device route */
@@ -471,11 +641,6 @@ typedef struct rta {
#define RTS_PERF 15 /* Perf checker */
#define RTS_MAX 16
-#define RTC_UNICAST 0
-#define RTC_BROADCAST 1
-#define RTC_MULTICAST 2
-#define RTC_ANYCAST 3 /* IPv6 Anycast */
-
#define RTD_NONE 0 /* Undefined next hop */
#define RTD_UNICAST 1 /* Next hop is neighbor router */
#define RTD_BLACKHOLE 2 /* Silently drop packets */
@@ -483,8 +648,6 @@ typedef struct rta {
#define RTD_PROHIBIT 4 /* Administratively prohibited */
#define RTD_MAX 5
-#define RTAF_CACHED 1 /* This is a cached rta */
-
#define IGP_METRIC_UNKNOWN 0x80000000 /* Default igp_metric used when no other
protocol-specific metric is availabe */
@@ -508,8 +671,8 @@ typedef struct eattr {
byte flags; /* Protocol-dependent flags */
byte type; /* Attribute type and several flags (EAF_...) */
union {
- u32 data;
- const struct adata *ptr; /* Attribute data elsewhere */
+ uintptr_t data;
+ const struct adata *ptr; /* Attribute data elsewhere */
} u;
} eattr;
@@ -517,7 +680,6 @@ typedef struct eattr {
#define EA_CODE(proto,id) (((proto) << 8) | (id))
#define EA_ID(ea) ((ea) & 0xff)
#define EA_PROTO(ea) ((ea) >> 8)
-#define EA_ID_FLAG(ea) (1 << EA_ID(ea))
#define EA_CUSTOM(id) ((id) | EA_CUSTOM_BIT)
#define EA_IS_CUSTOM(ea) ((ea) & EA_CUSTOM_BIT)
#define EA_CUSTOM_ID(ea) ((ea) & ~EA_CUSTOM_BIT)
@@ -540,6 +702,7 @@ const char *ea_custom_name(uint ea);
#define EAF_TYPE_AS_PATH 0x06 /* BGP AS path (encoding per RFC 1771:4.3) */
#define EAF_TYPE_BITFIELD 0x09 /* 32-bit embedded bitfield */
#define EAF_TYPE_INT_SET 0x0a /* Set of u32's (e.g., a community list) */
+#define EAF_TYPE_PTR 0x0d /* Pointer to an object */
#define EAF_TYPE_EC_SET 0x0e /* Set of pairs of u32's - ext. community list */
#define EAF_TYPE_LC_SET 0x12 /* Set of triplets of u32's - large community list */
#define EAF_TYPE_UNDEF 0x1f /* `force undefined' entry */
@@ -578,13 +741,52 @@ typedef struct ea_list {
#define EALF_SORTED 1 /* Attributes are sorted by code */
#define EALF_BISECT 2 /* Use interval bisection for searching */
#define EALF_CACHED 4 /* Attributes belonging to cached rta */
-#define EALF_TEMP 8 /* Temporary ea_list added by make_tmp_attrs hooks */
-struct rte_src *rt_find_source(struct proto *p, u32 id);
-struct rte_src *rt_get_source(struct proto *p, u32 id);
-static inline void rt_lock_source(struct rte_src *src) { src->uc++; }
-static inline void rt_unlock_source(struct rte_src *src) { src->uc--; }
-void rt_prune_sources(void);
+struct rte_owner_class {
+ void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
+ int (*rte_better)(struct rte *, struct rte *);
+ int (*rte_mergable)(struct rte *, struct rte *);
+ u32 (*rte_igp_metric)(struct rte *);
+};
+
+struct rte_owner {
+ struct rte_owner_class *class;
+ int (*rte_recalculate)(rtable_private *, struct network *, struct rte *, struct rte *, struct rte *);
+ HASH(struct rte_src) hash;
+ const char *name;
+ u32 hash_key;
+ u32 uc;
+ event_list *list;
+ event *prune;
+ event *stop;
+};
+
+#define RTE_SRC_PU_SHIFT 44
+#define RTE_SRC_IN_PROGRESS (1ULL << RTE_SRC_PU_SHIFT)
+
+struct rte_src *rt_get_source_o(struct rte_owner *o, u32 id);
+#define rt_get_source(p, id) rt_get_source_o(&(p)->sources, (id))
+static inline void rt_lock_source(struct rte_src *src)
+{
+ u64 uc = atomic_fetch_add_explicit(&src->uc, 1, memory_order_acq_rel);
+ ASSERT_DIE(uc > 0);
+}
+
+static inline void rt_unlock_source(struct rte_src *src)
+{
+ u64 uc = atomic_fetch_add_explicit(&src->uc, RTE_SRC_IN_PROGRESS, memory_order_acq_rel);
+ u64 pending = uc >> RTE_SRC_PU_SHIFT;
+ uc &= RTE_SRC_IN_PROGRESS - 1;
+
+ ASSERT_DIE(uc > pending);
+ if (uc == pending + 1)
+ ev_send(src->owner->list, src->owner->prune);
+
+ atomic_fetch_sub_explicit(&src->uc, RTE_SRC_IN_PROGRESS + 1, memory_order_acq_rel);
+}
+
+void rt_init_sources(struct rte_owner *, const char *name, event_list *list);
+void rt_destroy_sources(struct rte_owner *, event *);
struct ea_walk_state {
ea_list *eattrs; /* Ccurrent ea_list, initially set by caller */
@@ -594,7 +796,7 @@ struct ea_walk_state {
eattr *ea_find(ea_list *, unsigned ea);
eattr *ea_walk(struct ea_walk_state *s, uint id, uint max);
-int ea_get_int(ea_list *, unsigned ea, int def);
+uintptr_t ea_get_int(ea_list *, unsigned ea, uintptr_t def);
void ea_dump(ea_list *);
void ea_sort(ea_list *); /* Sort entries in all sub-lists */
unsigned ea_scan(ea_list *); /* How many bytes do we need for merged ea_list */
@@ -673,24 +875,47 @@ void rta_init(void);
static inline size_t rta_size(const rta *a) { return sizeof(rta) + sizeof(u32)*a->nh.labels; }
#define RTA_MAX_SIZE (sizeof(rta) + sizeof(u32)*MPLS_MAX_LABEL_STACK)
rta *rta_lookup(rta *); /* Get rta equivalent to this one, uc++ */
-static inline int rta_is_cached(rta *r) { return r->aflags & RTAF_CACHED; }
-static inline rta *rta_clone(rta *r) { r->uc++; return r; }
-void rta__free(rta *r);
-static inline void rta_free(rta *r) { if (r && !--r->uc) rta__free(r); }
+static inline int rta_is_cached(rta *r) { return r->cached; }
+
+static inline rta *rta_clone(rta *r) {
+ u32 uc = atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
+ ASSERT_DIE(uc > 0);
+ return r;
+}
+
+#define RTA_OBSOLETE_LIMIT 512
+
+extern _Atomic u32 rta_obsolete_count;
+extern event rta_cleanup_event;
+
+static inline void rta_free(rta *r) {
+ if (!r)
+ return;
+
+ u32 uc = atomic_fetch_sub_explicit(&r->uc, 1, memory_order_acq_rel);
+ if (uc > 1)
+ return;
+
+ u32 obs = atomic_fetch_add_explicit(&rta_obsolete_count, 1, memory_order_acq_rel);
+ if (obs == RTA_OBSOLETE_LIMIT)
+ ev_send(&global_work_list, &rta_cleanup_event);
+}
+
rta *rta_do_cow(rta *o, linpool *lp);
static inline rta * rta_cow(rta *r, linpool *lp) { return rta_is_cached(r) ? rta_do_cow(r, lp) : r; }
-void rta_dump(rta *);
+static inline void rta_uncache(rta *r) { r->cached = 0; r->uc = 0; }
+void rta_dump(const rta *);
void rta_dump_all(void);
-void rta_show(struct cli *, rta *);
+void rta_show(struct cli *, const rta *);
-u32 rt_get_igp_metric(rte *rt);
+u32 rt_get_igp_metric(rte *);
struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
-void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls);
+void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls, linpool *lp);
static inline void
-rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, mpls_label_stack *mls)
+rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, mpls_label_stack *mls, linpool *lp)
{
- rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), mls);
+ rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), mls, lp);
}
/*
diff --git a/nest/rt-attr.c b/nest/rt-attr.c
index c630aa95..c3da4782 100644
--- a/nest/rt-attr.c
+++ b/nest/rt-attr.c
@@ -54,6 +54,7 @@
#include "lib/hash.h"
#include "lib/idm.h"
#include "lib/resource.h"
+#include "lib/rcu.h"
#include "lib/string.h"
#include <stddef.h>
@@ -61,7 +62,6 @@
const adata null_adata; /* adata of length 0 */
const char * const rta_src_names[RTS_MAX] = {
- [RTS_DUMMY] = "",
[RTS_STATIC] = "static",
[RTS_INHERIT] = "inherit",
[RTS_DEVICE] = "device",
@@ -86,7 +86,13 @@ const char * rta_dest_names[RTD_MAX] = {
[RTD_PROHIBIT] = "prohibited",
};
+static DOMAIN(attrs) src_domain;
+
+#define SRC_LOCK LOCK_DOMAIN(attrs, src_domain)
+#define SRC_UNLOCK UNLOCK_DOMAIN(attrs, src_domain)
+
pool *rta_pool;
+pool *src_pool;
static slab *rta_slab_[4];
static slab *nexthop_slab_[4];
@@ -97,72 +103,151 @@ static struct idm src_ids;
/* rte source hash */
-#define RSH_KEY(n) n->proto, n->private_id
+#define RSH_KEY(n) n->private_id
#define RSH_NEXT(n) n->next
-#define RSH_EQ(p1,n1,p2,n2) p1 == p2 && n1 == n2
-#define RSH_FN(p,n) p->hash_key ^ u32_hash(n)
+#define RSH_EQ(n1,n2) n1 == n2
+#define RSH_FN(n) u32_hash(n)
#define RSH_REHASH rte_src_rehash
#define RSH_PARAMS /2, *2, 1, 1, 8, 20
-#define RSH_INIT_ORDER 6
-
-static HASH(struct rte_src) src_hash;
+#define RSH_INIT_ORDER 2
static void
rte_src_init(void)
{
- rte_src_slab = sl_new(rta_pool, sizeof(struct rte_src));
-
- idm_init(&src_ids, rta_pool, SRC_ID_INIT_SIZE);
+ src_domain = DOMAIN_NEW(attrs, "Route sources");
+ src_pool = rp_new(&root_pool, &main_birdloop, "Route sources");
+ rte_src_slab = sl_new(src_pool, sizeof(struct rte_src));
- HASH_INIT(src_hash, rta_pool, RSH_INIT_ORDER);
+ idm_init(&src_ids, src_pool, SRC_ID_INIT_SIZE);
}
-
HASH_DEFINE_REHASH_FN(RSH, struct rte_src)
-struct rte_src *
-rt_find_source(struct proto *p, u32 id)
+static struct rte_src *
+rt_find_source(struct rte_owner *p, u32 id)
{
- return HASH_FIND(src_hash, RSH, p, id);
+ return HASH_FIND(p->hash, RSH, id);
}
struct rte_src *
-rt_get_source(struct proto *p, u32 id)
+rt_get_source_o(struct rte_owner *p, u32 id)
{
+ if (p->stop)
+ bug("Stopping route owner asked for another source.");
+
struct rte_src *src = rt_find_source(p, id);
if (src)
+ {
+ UNUSED u64 uc = atomic_fetch_add_explicit(&src->uc, 1, memory_order_acq_rel);
return src;
+ }
+ SRC_LOCK;
src = sl_allocz(rte_src_slab);
- src->proto = p;
+ src->owner = p;
src->private_id = id;
src->global_id = idm_alloc(&src_ids);
- src->uc = 0;
- HASH_INSERT2(src_hash, RSH, rta_pool, src);
+ atomic_store_explicit(&src->uc, 1, memory_order_release);
+ p->uc++;
+
+ HASH_INSERT2(p->hash, RSH, src_pool, src);
+ if (config->table_debug)
+ log(L_TRACE "Allocated new rte_src for %s, ID %uL %uG, have %u sources now",
+ p->name, src->private_id, src->global_id, p->uc);
+
+ SRC_UNLOCK;
return src;
}
+static inline void
+rt_done_sources(struct rte_owner *o)
+{
+ if (o->stop->list)
+ ev_send(o->stop->list, o->stop);
+ else
+ ev_send(o->list, o->stop);
+}
+
void
-rt_prune_sources(void)
+rt_prune_sources(void *data)
{
- HASH_WALK_FILTER(src_hash, next, src, sp)
+ struct rte_owner *o = data;
+
+ HASH_WALK_FILTER(o->hash, next, src, sp)
{
- if (src->uc == 0)
+ u64 uc;
+ while ((uc = atomic_load_explicit(&src->uc, memory_order_acquire)) >> RTE_SRC_PU_SHIFT)
+ ;
+
+ if (uc == 0)
{
- HASH_DO_REMOVE(src_hash, RSH, sp);
+ o->uc--;
+
+ HASH_DO_REMOVE(o->hash, RSH, sp);
+
+ SRC_LOCK;
idm_free(&src_ids, src->global_id);
sl_free(rte_src_slab, src);
+ SRC_UNLOCK;
}
}
HASH_WALK_FILTER_END;
- HASH_MAY_RESIZE_DOWN(src_hash, RSH, rta_pool);
+ SRC_LOCK;
+ HASH_MAY_RESIZE_DOWN(o->hash, RSH, src_pool);
+
+ if (o->stop && !o->uc)
+ {
+ rfree(o->prune);
+ SRC_UNLOCK;
+
+ if (config->table_debug)
+ log(L_TRACE "All rte_src's for %s pruned, scheduling stop event", o->name);
+
+ rt_done_sources(o);
+ }
+ else
+ SRC_UNLOCK;
}
+void
+rt_init_sources(struct rte_owner *o, const char *name, event_list *list)
+{
+ SRC_LOCK;
+ HASH_INIT(o->hash, src_pool, RSH_INIT_ORDER);
+ o->hash_key = random_u32();
+ o->uc = 0;
+ o->name = name;
+ o->prune = ev_new_init(src_pool, rt_prune_sources, o);
+ o->stop = NULL;
+ o->list = list;
+ SRC_UNLOCK;
+}
+
+void
+rt_destroy_sources(struct rte_owner *o, event *done)
+{
+ o->stop = done;
+
+ if (!o->uc)
+ {
+ if (config->table_debug)
+ log(L_TRACE "Source owner %s destroy requested. All rte_src's already pruned, scheduling stop event", o->name);
+
+ SRC_LOCK;
+ rfree(o->prune);
+ SRC_UNLOCK;
+
+ rt_done_sources(o);
+ }
+ else
+ if (config->table_debug)
+ log(L_TRACE "Source owner %s destroy requested. Remaining %u rte_src's to prune.", o->name, o->uc);
+}
/*
* Multipath Next Hop
@@ -541,8 +626,8 @@ ea_walk(struct ea_walk_state *s, uint id, uint max)
* by calling ea_find() to find the attribute, extracting its value or returning
* a provided default if no such attribute is present.
*/
-int
-ea_get_int(ea_list *e, unsigned id, int def)
+uintptr_t
+ea_get_int(ea_list *e, unsigned id, uintptr_t def)
{
eattr *a = ea_find(e, id);
if (!a)
@@ -1081,21 +1166,28 @@ ea_append(ea_list *to, ea_list *what)
* rta's
*/
-static uint rta_cache_count;
-static uint rta_cache_size = 32;
-static uint rta_cache_limit;
-static uint rta_cache_mask;
-static rta **rta_hash_table;
+static DOMAIN(attrs) attrs_domain;
-static void
-rta_alloc_hash(void)
+#define RTA_LOCK LOCK_DOMAIN(attrs, attrs_domain)
+#define RTA_UNLOCK UNLOCK_DOMAIN(attrs, attrs_domain)
+
+struct rta_cache {
+ u32 count;
+ u32 size;
+ u32 limit;
+ u32 mask;
+ rta * _Atomic table[0];
+} * _Atomic rta_cache;
+// rta_aux, rta_cache = { .size = ATOMIC_VAR_INIT(32), };
+
+static struct rta_cache *
+rta_alloc_hash(u32 size)
{
- rta_hash_table = mb_allocz(rta_pool, sizeof(rta *) * rta_cache_size);
- if (rta_cache_size < 32768)
- rta_cache_limit = rta_cache_size * 2;
- else
- rta_cache_limit = ~0;
- rta_cache_mask = rta_cache_size - 1;
+ struct rta_cache *c = mb_allocz(rta_pool, sizeof(struct rta_cache) + sizeof(rta * _Atomic) * size);
+ c->size = size;
+ c->limit = (size >> 20) ? (~0U) : (size * 2);
+ c->mask = size - 1;
+ return c;
}
static inline uint
@@ -1104,13 +1196,14 @@ rta_hash(rta *a)
u64 h;
mem_hash_init(&h);
#define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f));
- MIX(src);
+#define BMIX(f) mem_hash_mix_num(&h, a->f);
MIX(hostentry);
MIX(from);
MIX(igp_metric);
- MIX(source);
- MIX(scope);
- MIX(dest);
+ BMIX(source);
+ BMIX(scope);
+ BMIX(dest);
+ MIX(pref);
#undef MIX
return mem_hash_value(&h) ^ nexthop_hash(&(a->nh)) ^ ea_hash(a->eattrs);
@@ -1119,8 +1212,7 @@ rta_hash(rta *a)
static inline int
rta_same(rta *x, rta *y)
{
- return (x->src == y->src &&
- x->source == y->source &&
+ return (x->source == y->source &&
x->scope == y->scope &&
x->dest == y->dest &&
x->igp_metric == y->igp_metric &&
@@ -1149,34 +1241,88 @@ rta_copy(rta *o)
}
static inline void
-rta_insert(rta *r)
+rta_insert(rta *r, struct rta_cache *c)
{
- uint h = r->hash_key & rta_cache_mask;
- r->next = rta_hash_table[h];
- if (r->next)
- r->next->pprev = &r->next;
- r->pprev = &rta_hash_table[h];
- rta_hash_table[h] = r;
+ uint h = r->hash_key & c->mask;
+ rta *next = atomic_load_explicit(&c->table[h], memory_order_relaxed);
+
+ atomic_store_explicit(&r->next, next, memory_order_relaxed);
+ r->pprev = &c->table[h];
+
+ if (next)
+ next->pprev = &r->next;
+
+ /* This store MUST be the last and MUST have release order for thread-safety */
+ atomic_store_explicit(&c->table[h], r, memory_order_release);
}
static void
-rta_rehash(void)
+rta_rehash(struct rta_cache *c)
{
- uint ohs = rta_cache_size;
- uint h;
- rta *r, *n;
- rta **oht = rta_hash_table;
-
- rta_cache_size = 2*rta_cache_size;
- DBG("Rehashing rta cache from %d to %d entries.\n", ohs, rta_cache_size);
- rta_alloc_hash();
- for(h=0; h<ohs; h++)
- for(r=oht[h]; r; r=n)
+ u32 os = c->size;
+
+ struct rta_cache *nc = rta_alloc_hash(os * 2);
+ nc->count = c->count;
+
+ /* First we simply copy every chain to both new locations */
+ for (u32 h = 0; h < os; h++)
+ {
+ rta *r = atomic_load_explicit(&c->table[h], memory_order_relaxed);
+ atomic_store_explicit(&nc->table[h], r, memory_order_relaxed);
+ atomic_store_explicit(&nc->table[h + os], r, memory_order_relaxed);
+ }
+
+ /* Then we exchange the hashes; release semantics forces the previous code to be already done */
+ atomic_store_explicit(&rta_cache, nc, memory_order_release);
+
+ /* And now we pass through both chains and filter them */
+ for (u32 h = 0; h < c->size; h++)
+ {
+ rta * _Atomic * ap = &nc->table[h];
+ rta * _Atomic * bp = &nc->table[h + os];
+
+ rta *r = atomic_load_explicit(ap, memory_order_relaxed);
+ ASSERT_DIE(r == atomic_load_explicit(bp, memory_order_relaxed));
+
+ while (r)
+ {
+ if (r->hash_key & os)
{
- n = r->next;
- rta_insert(r);
+ r->pprev = bp;
+ atomic_store_explicit(bp, r, memory_order_release);
+ bp = &r->next;
}
- mb_free(oht);
+ else
+ {
+ r->pprev = ap;
+ atomic_store_explicit(ap, r, memory_order_release);
+ ap = &r->next;
+ }
+
+ r = atomic_load_explicit(&r->next, memory_order_acquire);
+ }
+
+ atomic_store_explicit(ap, NULL, memory_order_release);
+ atomic_store_explicit(bp, NULL, memory_order_release);
+ }
+
+ synchronize_rcu();
+ mb_free(c);
+}
+
+static rta *
+rta_find(rta *o, u32 h, struct rta_cache *c)
+{
+ rta *r = NULL;
+
+ for (r = atomic_load_explicit(&c->table[h & c->mask], memory_order_acquire); r; r = atomic_load_explicit(&r->next, memory_order_acquire))
+ if (r->hash_key == h && rta_same(r, o))
+ {
+ atomic_fetch_add_explicit(&r->uc, 1, memory_order_acq_rel);
+ return r;
+ }
+
+ return NULL;
}
/**
@@ -1198,45 +1344,117 @@ rta_lookup(rta *o)
rta *r;
uint h;
- ASSERT(!(o->aflags & RTAF_CACHED));
+ ASSERT(!o->cached);
if (o->eattrs)
ea_normalize(o->eattrs);
h = rta_hash(o);
- for(r=rta_hash_table[h & rta_cache_mask]; r; r=r->next)
- if (r->hash_key == h && rta_same(r, o))
- return rta_clone(r);
+ /* Lockless lookup */
+ rcu_read_lock();
+ r = rta_find(o, h, atomic_load_explicit(&rta_cache, memory_order_acquire));
+ rcu_read_unlock();
+
+ if (r)
+ return r;
+
+ RTA_LOCK;
+
+ /* Locked lookup to avoid duplicates if possible */
+ struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);
+ r = rta_find(o, h, c);
+ if (r)
+ {
+ RTA_UNLOCK;
+ return r;
+ }
+
+ /* Store the rta */
r = rta_copy(o);
r->hash_key = h;
- r->aflags = RTAF_CACHED;
- rt_lock_source(r->src);
+ r->cached = 1;
rt_lock_hostentry(r->hostentry);
- rta_insert(r);
+ rta_insert(r, c);
- if (++rta_cache_count > rta_cache_limit)
- rta_rehash();
+ if (++c->count > c->limit)
+ rta_rehash(c);
+ RTA_UNLOCK;
return r;
}
-void
-rta__free(rta *a)
+static void
+rta_cleanup(void *data UNUSED)
{
- ASSERT(rta_cache_count && (a->aflags & RTAF_CACHED));
- rta_cache_count--;
- *a->pprev = a->next;
- if (a->next)
- a->next->pprev = a->pprev;
- rt_unlock_hostentry(a->hostentry);
- rt_unlock_source(a->src);
- if (a->nh.next)
- nexthop_free(a->nh.next);
- ea_free(a->eattrs);
- a->aflags = 0; /* Poison the entry */
- sl_free(rta_slab(a), a);
+ u32 count = 0;
+ rta *ax[RTA_OBSOLETE_LIMIT];
+
+ RTA_LOCK;
+ struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);
+
+ for(u32 h=0; h<c->size; h++)
+ for(rta *a = atomic_load_explicit(&c->table[h], memory_order_acquire), *next;
+ a;
+ a = next)
+ {
+ next = atomic_load_explicit(&a->next, memory_order_acquire);
+ if (atomic_load_explicit(&a->uc, memory_order_acquire) > 0)
+ continue;
+
+ /* Check if the cleanup fits in the buffer */
+ if (count == RTA_OBSOLETE_LIMIT)
+ {
+ ev_send(&global_work_list, &rta_cleanup_event);
+ goto wait;
+ }
+
+ /* Relink the forward pointer */
+ atomic_store_explicit(a->pprev, next, memory_order_release);
+
+ /* Relink the backwards pointer */
+ if (next)
+ next->pprev = a->pprev;
+
+ /* Store for freeing and go to the next */
+ ax[count++] = a;
+ a = next;
+ }
+
+wait:
+ /* Wait until nobody knows about us */
+ synchronize_rcu();
+
+ u32 freed = 0;
+
+ for (u32 i=0; i<count; i++)
+ {
+ rta *a = ax[i];
+ /* Acquired inbetween, relink back */
+ if (atomic_load_explicit(&a->uc, memory_order_acquire))
+ {
+ rta_insert(a, c);
+ continue;
+ }
+
+ /* Cleared to free the memory */
+ rt_unlock_hostentry(a->hostentry);
+ if (a->nh.next)
+ nexthop_free(a->nh.next);
+ ea_free(a->eattrs);
+ a->cached = 0;
+ c->count--;
+ sl_free(rta_slab(a), a);
+ freed++;
+ }
+
+ atomic_fetch_sub_explicit(&rta_obsolete_count, freed, memory_order_release);
+
+ RTA_UNLOCK;
}
+_Atomic u32 rta_obsolete_count;
+event rta_cleanup_event = { .hook = rta_cleanup, .list = &global_work_list };
+
rta *
rta_do_cow(rta *o, linpool *lp)
{
@@ -1248,8 +1466,7 @@ rta_do_cow(rta *o, linpool *lp)
memcpy(*nhn, nho, nexthop_size(nho));
nhn = &((*nhn)->next);
}
- r->aflags = 0;
- r->uc = 0;
+ rta_uncache(r);
return r;
}
@@ -1260,22 +1477,22 @@ rta_do_cow(rta *o, linpool *lp)
* This function takes a &rta and dumps its contents to the debug output.
*/
void
-rta_dump(rta *a)
+rta_dump(const rta *a)
{
- static char *rts[] = { "RTS_DUMMY", "RTS_STATIC", "RTS_INHERIT", "RTS_DEVICE",
+ static char *rts[] = { "", "RTS_STATIC", "RTS_INHERIT", "RTS_DEVICE",
"RTS_STAT_DEV", "RTS_REDIR", "RTS_RIP",
"RTS_OSPF", "RTS_OSPF_IA", "RTS_OSPF_EXT1",
"RTS_OSPF_EXT2", "RTS_BGP", "RTS_PIPE", "RTS_BABEL" };
static char *rtd[] = { "", " DEV", " HOLE", " UNREACH", " PROHIBIT" };
- debug("p=%s uc=%d %s %s%s h=%04x",
- a->src->proto->name, a->uc, rts[a->source], ip_scope_text(a->scope),
+ debug("pref=%d uc=%d %s %s%s h=%04x",
+ a->pref, a->uc, rts[a->source], ip_scope_text(a->scope),
rtd[a->dest], a->hash_key);
- if (!(a->aflags & RTAF_CACHED))
+ if (!a->cached)
debug(" !CACHED");
debug(" <-%I", a->from);
if (a->dest == RTD_UNICAST)
- for (struct nexthop *nh = &(a->nh); nh; nh = nh->next)
+ for (const struct nexthop *nh = &(a->nh); nh; nh = nh->next)
{
if (ipa_nonzero(nh->gw)) debug(" ->%I", nh->gw);
if (nh->labels) debug(" L %d", nh->label[0]);
@@ -1302,19 +1519,27 @@ rta_dump_all(void)
rta *a;
uint h;
- debug("Route attribute cache (%d entries, rehash at %d):\n", rta_cache_count, rta_cache_limit);
- for(h=0; h<rta_cache_size; h++)
- for(a=rta_hash_table[h]; a; a=a->next)
+ RTA_LOCK;
+
+ struct rta_cache *c = atomic_load_explicit(&rta_cache, memory_order_acquire);
+
+ debug("Route attribute cache (%d entries, rehash at %d):\n", c->count, c->limit);
+ for(h=0; h<c->size; h++)
+ for(a = atomic_load_explicit(&c->table[h], memory_order_acquire);
+ a;
+ a = atomic_load_explicit(&a->next, memory_order_acquire))
{
debug("%p ", a);
rta_dump(a);
debug("\n");
}
debug("\n");
+
+ RTA_UNLOCK;
}
void
-rta_show(struct cli *c, rta *a)
+rta_show(struct cli *c, const rta *a)
{
cli_printf(c, -1008, "\tType: %s %s", rta_src_names[a->source], ip_scope_text(a->scope));
@@ -1332,7 +1557,9 @@ rta_show(struct cli *c, rta *a)
void
rta_init(void)
{
- rta_pool = rp_new(&root_pool, "Attributes");
+ attrs_domain = DOMAIN_NEW(attrs, "Attributes");
+
+ rta_pool = rp_new(&root_pool, &main_birdloop, "Attributes");
rta_slab_[0] = sl_new(rta_pool, sizeof(rta));
rta_slab_[1] = sl_new(rta_pool, sizeof(rta) + sizeof(u32));
@@ -1344,7 +1571,7 @@ rta_init(void)
nexthop_slab_[2] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*2);
nexthop_slab_[3] = sl_new(rta_pool, sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK);
- rta_alloc_hash();
+ atomic_store_explicit(&rta_cache, rta_alloc_hash(32), memory_order_relaxed);
rte_src_init();
}
diff --git a/nest/rt-dev.c b/nest/rt-dev.c
index 61f025ce..c1251675 100644
--- a/nest/rt-dev.c
+++ b/nest/rt-dev.c
@@ -67,13 +67,11 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
/* Use iface ID as local source ID */
struct rte_src *src = rt_get_source(P, ad->iface->index);
- rte_update2(c, net, NULL, src);
+ rte_update(c, net, NULL, src);
+ rt_unlock_source(src);
}
else if (flags & IF_CHANGE_UP)
{
- rta *a;
- rte *e;
-
DBG("dev_if_notify: %s:%I going up\n", ad->iface->name, ad->ip);
if (cf->check_link && !(ad->iface->flags & IF_LINK_UP))
@@ -83,17 +81,20 @@ dev_ifa_notify(struct proto *P, uint flags, struct ifa *ad)
struct rte_src *src = rt_get_source(P, ad->iface->index);
rta a0 = {
- .src = src,
+ .pref = c->preference,
.source = RTS_DEVICE,
.scope = SCOPE_UNIVERSE,
.dest = RTD_UNICAST,
.nh.iface = ad->iface,
};
- a = rta_lookup(&a0);
- e = rte_get_temp(a);
- e->pflags = 0;
- rte_update2(c, net, e, src);
+ rte e0 = {
+ .attrs = rta_lookup(&a0),
+ .src = src,
+ };
+
+ rte_update(c, net, &e0, src);
+ rt_unlock_source(src);
}
}
diff --git a/nest/rt-show.c b/nest/rt-show.c
index 7691878d..65b59af4 100644
--- a/nest/rt-show.c
+++ b/nest/rt-show.c
@@ -56,17 +56,17 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
if (d->verbose && !rta_is_cached(a) && a->eattrs)
ea_normalize(a->eattrs);
- get_route_info = a->src->proto->proto->get_route_info;
+ get_route_info = e->src->owner->class ? e->src->owner->class->get_route_info : NULL;
if (get_route_info)
get_route_info(e, info);
else
- bsprintf(info, " (%d)", e->pref);
+ bsprintf(info, " (%d)", a->pref);
if (d->last_table != d->tab)
rt_show_table(c, d);
cli_printf(c, -1007, "%-20s %s [%s %s%s]%s%s", ia, rta_dest_name(a->dest),
- a->src->proto->name, tm, from, primary ? (sync_error ? " !" : " *") : "", info);
+ e->src->owner->name, tm, from, primary ? (sync_error ? " !" : " *") : "", info);
if (a->dest == RTD_UNICAST)
for (nh = &(a->nh); nh; nh = nh->next)
@@ -95,13 +95,38 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
}
if (d->verbose)
+ {
+ cli_printf(c, -1008, "\tInternal route ID: %uL %uG %uS", e->src->private_id, e->src->global_id, e->stale_cycle);
rta_show(c, a);
+ }
+}
+
+static uint
+rte_feed_count(net *n)
+{
+ uint count = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTES_OR_NULL(e)))
+ count++;
+ return count;
+}
+
+static void
+rte_feed_obtain(net *n, rte **feed, uint count)
+{
+ uint i = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTES_OR_NULL(e)))
+ {
+ ASSERT_DIE(i < count);
+ feed[i++] = &e->rte;
+ }
+ ASSERT_DIE(i == count);
}
static void
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
{
- rte *e, *ee;
byte ia[NET_MAX_TEXT_LENGTH+1];
struct channel *ec = d->tab->export_channel;
@@ -114,9 +139,9 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
bsnprintf(ia, sizeof(ia), "%N", n->n.addr);
- for (e = n->routes; e; e = e->next)
+ for (struct rte_storage *er = n->routes; er; er = er->next)
{
- if (rte_is_filtered(e) != d->filtered)
+ if (rte_is_filtered(&er->rte) != d->filtered)
continue;
d->rt_counter++;
@@ -126,16 +151,15 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
if (pass)
continue;
- ee = e;
- rte_make_tmp_attrs(&e, c->show_pool, NULL);
+ struct rte e = er->rte;
/* Export channel is down, do not try to export routes to it */
- if (ec && (ec->export_state == ES_DOWN))
+ if (ec && !ec->out_req.hook)
goto skip;
if (d->export_mode == RSEM_EXPORTED)
{
- if (!bmap_test(&ec->export_map, ee->id))
+ if (!bmap_test(&ec->export_map, e.id))
goto skip;
// if (ec->ra_mode != RA_ANY)
@@ -144,17 +168,24 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
else if ((d->export_mode == RSEM_EXPORT) && (ec->ra_mode == RA_MERGED))
{
/* Special case for merged export */
- rte *rt_free;
- e = rt_export_merged(ec, n, &rt_free, c->show_pool, 1);
pass = 1;
+ uint count = rte_feed_count(n);
+ if (!count)
+ goto skip;
- if (!e)
- { e = ee; goto skip; }
+ rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(n, feed, count);
+ rte *em = rt_export_merged(ec, feed, count, c->show_pool, 1);
+
+ if (em)
+ e = *em;
+ else
+ goto skip;
}
else if (d->export_mode)
{
struct proto *ep = ec->proto;
- int ic = ep->preexport ? ep->preexport(ep, &e, c->show_pool) : 0;
+ int ic = ep->preexport ? ep->preexport(ec, &e) : 0;
if (ec->ra_mode == RA_OPTIMAL || ec->ra_mode == RA_MERGED)
pass = 1;
@@ -180,24 +211,19 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
}
}
- if (d->show_protocol && (d->show_protocol != e->attrs->src->proto))
+ if (d->show_protocol && (&d->show_protocol->sources != e.src->owner))
goto skip;
if (f_run(d->filter, &e, c->show_pool, 0) > F_ACCEPT)
goto skip;
if (d->stats < 2)
- rt_show_rte(c, ia, e, d, (e->net->routes == ee));
+ rt_show_rte(c, ia, &e, d, (n->routes == er));
d->show_counter++;
ia[0] = 0;
skip:
- if (e != ee)
- {
- rte_free(e);
- e = ee;
- }
lp_flush(c->show_pool);
if (d->primary_only)
@@ -213,11 +239,13 @@ rt_show_cleanup(struct cli *c)
/* Unlink the iterator */
if (d->table_open)
- fit_get(&d->tab->table->fib, &d->fit);
+ RT_LOCKED(d->tab->table, t)
+ fit_get(&t->fib, &d->fit);
/* Unlock referenced tables */
WALK_LIST(tab, d->tables)
- rt_unlock_table(tab->table);
+ RT_LOCKED(tab->table, t)
+ rt_unlock_table(t);
}
static void
@@ -229,8 +257,6 @@ rt_show_cont(struct cli *c)
#else
unsigned max = 64;
#endif
- struct fib *fib = &d->tab->table->fib;
- struct fib_iterator *it = &d->fit;
if (d->running_on_config && (d->running_on_config != config))
{
@@ -238,9 +264,14 @@ rt_show_cont(struct cli *c)
goto done;
}
+ rtable_private *t = RT_LOCK(d->tab->table);
+
+ struct fib *fib = &t->fib;
+ struct fib_iterator *it = &d->fit;
+
if (!d->table_open)
{
- FIB_ITERATE_INIT(&d->fit, &d->tab->table->fib);
+ FIB_ITERATE_INIT(&d->fit, fib);
d->table_open = 1;
d->table_counter++;
d->kernel = rt_show_get_kernel(d);
@@ -258,6 +289,7 @@ rt_show_cont(struct cli *c)
if (!max--)
{
FIB_ITERATE_PUT(it);
+ RT_UNLOCK(d->tab->table);
return;
}
rt_show_net(c, n, d);
@@ -274,6 +306,8 @@ rt_show_cont(struct cli *c)
d->net_counter - d->net_counter_last, d->tab->table->name);
}
+ RT_UNLOCK(d->tab->table);
+
d->kernel = NULL;
d->table_open = 0;
d->tab = NODE_NEXT(d->tab);
@@ -322,7 +356,7 @@ rt_show_get_default_tables(struct rt_show_data *d)
{
WALK_LIST(c, d->export_protocol->channels)
{
- if (c->export_state == ES_DOWN)
+ if (!c->out_req.hook)
continue;
tab = rt_show_add_table(d, c->table);
@@ -339,7 +373,7 @@ rt_show_get_default_tables(struct rt_show_data *d)
}
for (int i=1; i<NET_MAX; i++)
- if (config->def_tables[i])
+ if (config->def_tables[i] && config->def_tables[i]->table)
rt_show_add_table(d, config->def_tables[i]->table);
}
@@ -405,7 +439,8 @@ rt_show(struct rt_show_data *d)
if (!d->addr)
{
WALK_LIST(tab, d->tables)
- rt_lock_table(tab->table);
+ RT_LOCKED(tab->table, t)
+ rt_lock_table(t);
/* There is at least one table */
d->tab = HEAD(d->tables);
@@ -420,13 +455,17 @@ rt_show(struct rt_show_data *d)
d->tab = tab;
d->kernel = rt_show_get_kernel(d);
+ RT_LOCK(tab->table);
+
if (d->show_for)
- n = net_route(tab->table, d->addr);
+ n = net_route(RT_PRIV(tab->table), d->addr);
else
- n = net_find(tab->table, d->addr);
+ n = net_find(RT_PRIV(tab->table), d->addr);
if (n)
rt_show_net(this_cli, n, d);
+
+ RT_UNLOCK(tab->table);
}
if (d->rt_counter)
diff --git a/nest/rt-table.c b/nest/rt-table.c
index 390b3277..5f1e1679 100644
--- a/nest/rt-table.c
+++ b/nest/rt-table.c
@@ -45,28 +45,85 @@
#include "lib/string.h"
#include "lib/alloca.h"
-#ifdef CONFIG_BGP
-#include "proto/bgp/bgp.h"
-#endif
+#include <stdatomic.h>
pool *rt_table_pool;
-static slab *rte_slab;
-static linpool *rte_update_pool;
-
list routing_tables;
-static void rt_free_hostcache(rtable *tab);
-static void rt_notify_hostcache(rtable *tab, net *net);
-static void rt_update_hostcache(rtable *tab);
-static void rt_next_hop_update(rtable *tab);
-static inline void rt_prune_table(rtable *tab);
-static inline void rt_schedule_notify(rtable *tab);
+/* Data structures for export journal */
+#define RT_PENDING_EXPORT_ITEMS (page_size - sizeof(struct rt_export_block)) / sizeof(struct rt_pending_export)
+struct rt_export_block {
+ node n;
+ _Atomic u32 end;
+ _Atomic _Bool not_last;
+ struct rt_pending_export export[];
+};
+
+static void rt_free_hostcache(rtable_private *tab);
+static void rt_notify_hostcache(rtable_private *tab, net *net);
+static void rt_update_hostcache(void *tab);
+static void rt_next_hop_update(void *tab);
+static inline void rt_prune_table(void *tab);
+static void rt_fast_prune_check(rtable_private *tab);
+static inline void rt_schedule_notify(rtable_private *tab);
+static void rt_feed_channel(void *);
+
+static inline void rt_export_used(rtable_private *tab);
+static void rt_export_cleanup(void *tab);
+
+const char *rt_import_state_name_array[TIS_MAX] = {
+ [TIS_DOWN] = "DOWN",
+ [TIS_UP] = "UP",
+ [TIS_STOP] = "STOP",
+ [TIS_FLUSHING] = "FLUSHING",
+ [TIS_WAITING] = "WAITING",
+ [TIS_CLEARED] = "CLEARED",
+};
+
+const char *rt_export_state_name_array[TES_MAX] = {
+ [TES_DOWN] = "DOWN",
+ [TES_HUNGRY] = "HUNGRY",
+ [TES_FEEDING] = "FEEDING",
+ [TES_READY] = "READY",
+ [TES_STOP] = "STOP"
+};
+
+const char *rt_import_state_name(u8 state)
+{
+ if (state >= TIS_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_import_state_name_array[state];
+}
+
+const char *rt_export_state_name(u8 state)
+{
+ if (state >= TES_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_export_state_name_array[state];
+}
+
+struct event_cork rt_cork;
+
+static inline void
+rte_update_lock(struct channel *c)
+{
+ c->rte_update_nest_cnt++;
+}
+
+static inline void
+rte_update_unlock(struct channel *c)
+{
+ if (!--c->rte_update_nest_cnt)
+ lp_flush(c->rte_update_pool);
+}
/* Like fib_route(), but skips empty net entries */
static inline void *
-net_route_ip4(rtable *t, net_addr_ip4 *n)
+net_route_ip4(rtable_private *t, net_addr_ip4 *n)
{
net *r;
@@ -80,7 +137,7 @@ net_route_ip4(rtable *t, net_addr_ip4 *n)
}
static inline void *
-net_route_ip6(rtable *t, net_addr_ip6 *n)
+net_route_ip6(rtable_private *t, net_addr_ip6 *n)
{
net *r;
@@ -94,7 +151,7 @@ net_route_ip6(rtable *t, net_addr_ip6 *n)
}
static inline void *
-net_route_ip6_sadr(rtable *t, net_addr_ip6_sadr *n)
+net_route_ip6_sadr(rtable_private *t, net_addr_ip6_sadr *n)
{
struct fib_node *fn;
@@ -133,7 +190,7 @@ net_route_ip6_sadr(rtable *t, net_addr_ip6_sadr *n)
}
void *
-net_route(rtable *tab, const net_addr *n)
+net_route(rtable_private *tab, const net_addr *n)
{
ASSERT(tab->addr_type == n->type);
@@ -162,12 +219,15 @@ net_route(rtable *tab, const net_addr *n)
static int
-net_roa_check_ip4(rtable *tab, const net_addr_ip4 *px, u32 asn)
+net_roa_check_ip4(rtable *t, const net_addr_ip4 *px, u32 asn)
{
struct net_addr_roa4 n = NET_ADDR_ROA4(px->prefix, px->pxlen, 0, 0);
struct fib_node *fn;
int anything = 0;
+ RT_LOCK(t);
+ rtable_private *tab = RT_PRIV(t);
+
while (1)
{
for (fn = fib_get_chain(&tab->fib, (net_addr *) &n); fn; fn = fn->next)
@@ -175,11 +235,14 @@ net_roa_check_ip4(rtable *tab, const net_addr_ip4 *px, u32 asn)
net_addr_roa4 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
- if (net_equal_prefix_roa4(roa, &n) && rte_is_valid(r->routes))
+ if (net_equal_prefix_roa4(roa, &n) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
+ {
+ RT_UNLOCK(tab);
return ROA_VALID;
+ }
}
}
@@ -190,16 +253,20 @@ net_roa_check_ip4(rtable *tab, const net_addr_ip4 *px, u32 asn)
ip4_clrbit(&n.prefix, n.pxlen);
}
+ RT_UNLOCK(tab);
return anything ? ROA_INVALID : ROA_UNKNOWN;
}
static int
-net_roa_check_ip6(rtable *tab, const net_addr_ip6 *px, u32 asn)
+net_roa_check_ip6(rtable *t, const net_addr_ip6 *px, u32 asn)
{
struct net_addr_roa6 n = NET_ADDR_ROA6(px->prefix, px->pxlen, 0, 0);
struct fib_node *fn;
int anything = 0;
+ RT_LOCK(t);
+ rtable_private *tab = RT_PRIV(t);
+
while (1)
{
for (fn = fib_get_chain(&tab->fib, (net_addr *) &n); fn; fn = fn->next)
@@ -207,11 +274,14 @@ net_roa_check_ip6(rtable *tab, const net_addr_ip6 *px, u32 asn)
net_addr_roa6 *roa = (void *) fn->addr;
net *r = fib_node_to_user(&tab->fib, fn);
- if (net_equal_prefix_roa6(roa, &n) && rte_is_valid(r->routes))
+ if (net_equal_prefix_roa6(roa, &n) && r->routes && rte_is_valid(&r->routes->rte))
{
anything = 1;
if (asn && (roa->asn == asn) && (roa->max_pxlen >= px->pxlen))
+ {
+ RT_UNLOCK(tab);
return ROA_VALID;
+ }
}
}
@@ -222,6 +292,7 @@ net_roa_check_ip6(rtable *tab, const net_addr_ip6 *px, u32 asn)
ip6_clrbit(&n.prefix, n.pxlen);
}
+ RT_UNLOCK(tab);
return anything ? ROA_INVALID : ROA_UNKNOWN;
}
@@ -256,253 +327,50 @@ net_roa_check(rtable *tab, const net_addr *n, u32 asn)
* @net: network node
* @src: route source
*
- * The rte_find() function returns a route for destination @net
- * which is from route source @src.
+ * The rte_find() function returns a pointer to a route for destination @net
+ * which is from route source @src. List end pointer is returned if no route is found.
*/
-rte *
+static struct rte_storage **
rte_find(net *net, struct rte_src *src)
{
- rte *e = net->routes;
-
- while (e && e->attrs->src != src)
- e = e->next;
- return e;
-}
-
-/**
- * rte_get_temp - get a temporary &rte
- * @a: attributes to assign to the new route (a &rta; in case it's
- * un-cached, rte_update() will create a cached copy automatically)
- *
- * Create a temporary &rte and bind it with the attributes @a.
- * Also set route preference to the default preference set for
- * the protocol.
- */
-rte *
-rte_get_temp(rta *a)
-{
- rte *e = sl_alloc(rte_slab);
+ struct rte_storage **e = &net->routes;
- e->attrs = a;
- e->id = 0;
- e->flags = 0;
- e->pref = 0;
- return e;
-}
+ while ((*e) && (*e)->rte.src != src)
+ e = &(*e)->next;
-rte *
-rte_do_cow(rte *r)
-{
- rte *e = sl_alloc(rte_slab);
-
- memcpy(e, r, sizeof(rte));
- e->attrs = rta_clone(r->attrs);
- e->flags = 0;
return e;
}
-/**
- * rte_cow_rta - get a private writable copy of &rte with writable &rta
- * @r: a route entry to be copied
- * @lp: a linpool from which to allocate &rta
- *
- * rte_cow_rta() takes a &rte and prepares it and associated &rta for
- * modification. There are three possibilities: First, both &rte and &rta are
- * private copies, in that case they are returned unchanged. Second, &rte is
- * private copy, but &rta is cached, in that case &rta is duplicated using
- * rta_do_cow(). Third, both &rte is shared and &rta is cached, in that case
- * both structures are duplicated by rte_do_cow() and rta_do_cow().
- *
- * Note that in the second case, cached &rta loses one reference, while private
- * copy created by rta_do_cow() is a shallow copy sharing indirect data (eattrs,
- * nexthops, ...) with it. To work properly, original shared &rta should have
- * another reference during the life of created private copy.
- *
- * Result: a pointer to the new writable &rte with writable &rta.
- */
-rte *
-rte_cow_rta(rte *r, linpool *lp)
-{
- if (!rta_is_cached(r->attrs))
- return r;
-
- r = rte_cow(r);
- rta *a = rta_do_cow(r->attrs, lp);
- rta_free(r->attrs);
- r->attrs = a;
- return r;
-}
-
-/**
- * rte_init_tmp_attrs - initialize temporary ea_list for route
- * @r: route entry to be modified
- * @lp: linpool from which to allocate attributes
- * @max: maximum number of added temporary attribus
- *
- * This function is supposed to be called from make_tmp_attrs() and
- * store_tmp_attrs() hooks before rte_make_tmp_attr() / rte_store_tmp_attr()
- * functions. It allocates &ea_list with length for @max items for temporary
- * attributes and puts it on top of eattrs stack.
- */
-void
-rte_init_tmp_attrs(rte *r, linpool *lp, uint max)
+static struct rte_storage *
+rte_store(const rte *r, net *net, rtable_private *tab)
{
- struct ea_list *e = lp_alloc(lp, sizeof(struct ea_list) + max * sizeof(eattr));
+ struct rte_storage *e = sl_alloc(tab->rte_slab);
- e->next = r->attrs->eattrs;
- e->flags = EALF_SORTED | EALF_TEMP;
- e->count = 0;
+ e->rte = *r;
+ e->rte.net = net->n.addr;
- r->attrs->eattrs = e;
-}
+ rt_lock_source(e->rte.src);
-/**
- * rte_make_tmp_attr - make temporary eattr from private route fields
- * @r: route entry to be modified
- * @id: attribute ID
- * @type: attribute type
- * @val: attribute value (u32 or adata ptr)
- *
- * This function is supposed to be called from make_tmp_attrs() hook for
- * each temporary attribute, after temporary &ea_list was initialized by
- * rte_init_tmp_attrs(). It checks whether temporary attribute is supposed to
- * be defined (based on route pflags) and if so then it fills &eattr field in
- * preallocated temporary &ea_list on top of route @r eattrs stack.
- *
- * Note that it may require free &eattr in temporary &ea_list, so it must not be
- * called more times than @max argument of rte_init_tmp_attrs().
- */
-void
-rte_make_tmp_attr(rte *r, uint id, uint type, uintptr_t val)
-{
- if (r->pflags & EA_ID_FLAG(id))
- {
- ea_list *e = r->attrs->eattrs;
- eattr *a = &e->attrs[e->count++];
- a->id = id;
- a->type = type;
- a->flags = 0;
-
- if (type & EAF_EMBEDDED)
- a->u.data = (u32) val;
- else
- a->u.ptr = (struct adata *) val;
- }
+ return e;
}
/**
- * rte_store_tmp_attr - store temporary eattr to private route fields
- * @r: route entry to be modified
- * @id: attribute ID
- *
- * This function is supposed to be called from store_tmp_attrs() hook for
- * each temporary attribute, after temporary &ea_list was initialized by
- * rte_init_tmp_attrs(). It checks whether temporary attribute is defined in
- * route @r eattrs stack, updates route pflags accordingly, undefines it by
- * filling &eattr field in preallocated temporary &ea_list on top of the eattrs
- * stack, and returns the value. Caller is supposed to store it in the
- * appropriate private field.
+ * rte_free - delete a &rte
+ * @e: &struct rte_storage to be deleted
+ * @tab: the table which the rte belongs to
*
- * Note that it may require free &eattr in temporary &ea_list, so it must not be
- * called more times than @max argument of rte_init_tmp_attrs()
+ * rte_free() deletes the given &rte from the routing table it's linked to.
*/
-uintptr_t
-rte_store_tmp_attr(rte *r, uint id)
-{
- ea_list *e = r->attrs->eattrs;
- eattr *a = ea_find(e->next, id);
- if (a)
- {
- e->attrs[e->count++] = (struct eattr) { .id = id, .type = EAF_TYPE_UNDEF };
- r->pflags |= EA_ID_FLAG(id);
- return (a->type & EAF_EMBEDDED) ? a->u.data : (uintptr_t) a->u.ptr;
- }
- else
- {
- r->pflags &= ~EA_ID_FLAG(id);
- return 0;
- }
-}
-
-/**
- * rte_make_tmp_attrs - prepare route by adding all relevant temporary route attributes
- * @r: route entry to be modified (may be replaced if COW)
- * @lp: linpool from which to allocate attributes
- * @old_attrs: temporary ref to old &rta (may be NULL)
- *
- * This function expands privately stored protocol-dependent route attributes
- * to a uniform &eattr / &ea_list representation. It is essentially a wrapper
- * around protocol make_tmp_attrs() hook, which does some additional work like
- * ensuring that route @r is writable.
- *
- * The route @r may be read-only (with %REF_COW flag), in that case rw copy is
- * obtained by rte_cow() and @r is replaced. If @rte is originally rw, it may be
- * directly modified (and it is never copied).
- *
- * If the @old_attrs ptr is supplied, the function obtains another reference of
- * old cached &rta, that is necessary in some cases (see rte_cow_rta() for
- * details). It is freed by rte_store_tmp_attrs(), or manually by rta_free().
- *
- * Generally, if caller ensures that @r is read-only (e.g. in route export) then
- * it may ignore @old_attrs (and set it to NULL), but must handle replacement of
- * @r. If caller ensures that @r is writable (e.g. in route import) then it may
- * ignore replacement of @r, but it must handle @old_attrs.
- */
void
-rte_make_tmp_attrs(rte **r, linpool *lp, rta **old_attrs)
+rte_free(struct rte_storage *e, rtable_private *tab)
{
- void (*make_tmp_attrs)(rte *r, linpool *lp);
- make_tmp_attrs = (*r)->attrs->src->proto->make_tmp_attrs;
-
- if (!make_tmp_attrs)
- return;
-
- /* We may need to keep ref to old attributes, will be freed in rte_store_tmp_attrs() */
- if (old_attrs)
- *old_attrs = rta_is_cached((*r)->attrs) ? rta_clone((*r)->attrs) : NULL;
-
- *r = rte_cow_rta(*r, lp);
- make_tmp_attrs(*r, lp);
+ rt_unlock_source(e->rte.src);
+ rta_free(e->rte.attrs);
+ sl_free(tab->rte_slab, e);
}
-/**
- * rte_store_tmp_attrs - store temporary route attributes back to private route fields
- * @r: route entry to be modified
- * @lp: linpool from which to allocate attributes
- * @old_attrs: temporary ref to old &rta
- *
- * This function stores temporary route attributes that were expanded by
- * rte_make_tmp_attrs() back to private route fields and also undefines them.
- * It is essentially a wrapper around protocol store_tmp_attrs() hook, which
- * does some additional work like shortcut if there is no change and cleanup
- * of @old_attrs reference obtained by rte_make_tmp_attrs().
- */
-static void
-rte_store_tmp_attrs(rte *r, linpool *lp, rta *old_attrs)
-{
- void (*store_tmp_attrs)(rte *rt, linpool *lp);
- store_tmp_attrs = r->attrs->src->proto->store_tmp_attrs;
-
- if (!store_tmp_attrs)
- return;
-
- ASSERT(!rta_is_cached(r->attrs));
-
- /* If there is no new ea_list, we just skip the temporary ea_list */
- ea_list *ea = r->attrs->eattrs;
- if (ea && (ea->flags & EALF_TEMP))
- r->attrs->eattrs = ea->next;
- else
- store_tmp_attrs(r, lp);
-
- /* Free ref we got in rte_make_tmp_attrs(), have to do rta_lookup() first */
- r->attrs = rta_lookup(r->attrs);
- rta_free(old_attrs);
-}
-
-
static int /* Actually better or at least as good as */
rte_better(rte *new, rte *old)
{
@@ -513,20 +381,20 @@ rte_better(rte *new, rte *old)
if (!rte_is_valid(new))
return 0;
- if (new->pref > old->pref)
+ if (new->attrs->pref > old->attrs->pref)
return 1;
- if (new->pref < old->pref)
+ if (new->attrs->pref < old->attrs->pref)
return 0;
- if (new->attrs->src->proto->proto != old->attrs->src->proto->proto)
+ if (new->src->owner->class != old->src->owner->class)
{
/*
* If the user has configured protocol preferences, so that two different protocols
* have the same preference, try to break the tie by comparing addresses. Not too
* useful, but keeps the ordering of routes unambiguous.
*/
- return new->attrs->src->proto->proto > old->attrs->src->proto->proto;
+ return new->src->owner->class > old->src->owner->class;
}
- if (better = new->attrs->src->proto->rte_better)
+ if (better = new->src->owner->class->rte_better)
return better(new, old);
return 0;
}
@@ -539,180 +407,209 @@ rte_mergable(rte *pri, rte *sec)
if (!rte_is_valid(pri) || !rte_is_valid(sec))
return 0;
- if (pri->pref != sec->pref)
+ if (pri->attrs->pref != sec->attrs->pref)
return 0;
- if (pri->attrs->src->proto->proto != sec->attrs->src->proto->proto)
+ if (pri->src->owner->class != sec->src->owner->class)
return 0;
- if (mergable = pri->attrs->src->proto->rte_mergable)
+ if (mergable = pri->src->owner->class->rte_mergable)
return mergable(pri, sec);
return 0;
}
static void
-rte_trace(struct channel *c, rte *e, int dir, char *msg)
+rte_trace(const char *name, const rte *e, int dir, const char *msg)
{
- log(L_TRACE "%s.%s %c %s %N %s",
- c->proto->name, c->name ?: "?", dir, msg, e->net->n.addr,
- rta_dest_name(e->attrs->dest));
+ log(L_TRACE "%s %c %s %N src %uL %uG %uS id %u %s%s",
+ name, dir, msg, e->net,
+ e->src->private_id, e->src->global_id, e->stale_cycle, e->id,
+ rta_dest_name(e->attrs->dest),
+ rte_is_filtered(e) ? " (filtered)" : "");
}
static inline void
-rte_trace_in(uint flag, struct channel *c, rte *e, char *msg)
+channel_rte_trace_in(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
- rte_trace(c, e, '>', msg);
+ rte_trace(c->in_req.name, e, '>', msg);
}
static inline void
-rte_trace_out(uint flag, struct channel *c, rte *e, char *msg)
+channel_rte_trace_out(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
- rte_trace(c, e, '<', msg);
+ rte_trace(c->out_req.name, e, '<', msg);
+}
+
+static inline void
+rt_rte_trace_in(uint flag, struct rt_import_request *req, const rte *e, const char *msg)
+{
+ if (req->trace_routes & flag)
+ rte_trace(req->name, e, '>', msg);
+}
+
+#if 0
+// seems to be unused at all
+static inline void
+rt_rte_trace_out(uint flag, struct rt_export_request *req, const rte *e, const char *msg)
+{
+ if (req->trace_routes & flag)
+ rte_trace(req->name, e, '<', msg);
+}
+#endif
+
+static uint
+rte_feed_count(net *n)
+{
+ uint count = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTES_OR_NULL(e)))
+ count++;
+ return count;
+}
+
+static void
+rte_feed_obtain(net *n, struct rte **feed, uint count)
+{
+ uint i = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTES_OR_NULL(e)))
+ {
+ ASSERT_DIE(i < count);
+ feed[i++] = &e->rte;
+ }
+ ASSERT_DIE(i == count);
}
static rte *
-export_filter_(struct channel *c, rte *rt0, rte **rt_free, linpool *pool, int silent)
+export_filter_(struct channel *c, rte *rt, linpool *pool, int silent)
{
struct proto *p = c->proto;
const struct filter *filter = c->out_filter;
- struct proto_stats *stats = &c->stats;
- rte *rt;
- int v;
+ struct channel_export_stats *stats = &c->export_stats;
- rt = rt0;
- *rt_free = NULL;
+ /* Do nothing if we have already rejected the route */
+ if (silent && bmap_test(&c->export_reject_map, rt->id))
+ goto reject_noset;
- v = p->preexport ? p->preexport(p, &rt, pool) : 0;
+ int v = p->preexport ? p->preexport(c, rt) : 0;
if (v < 0)
{
if (silent)
- goto reject;
+ goto reject_noset;
- stats->exp_updates_rejected++;
+ stats->updates_rejected++;
if (v == RIC_REJECT)
- rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
+ channel_rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
goto reject;
+
}
if (v > 0)
{
if (!silent)
- rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
+ channel_rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
goto accept;
}
- rte_make_tmp_attrs(&rt, pool, NULL);
-
v = filter && ((filter == FILTER_REJECT) ||
- (f_run(filter, &rt, pool,
+ (f_run(filter, rt, pool,
(silent ? FF_SILENT : 0)) > F_ACCEPT));
if (v)
{
if (silent)
goto reject;
- stats->exp_updates_filtered++;
- rte_trace_out(D_FILTERS, c, rt, "filtered out");
+ stats->updates_filtered++;
+ channel_rte_trace_out(D_FILTERS, c, rt, "filtered out");
goto reject;
}
-#ifdef CONFIG_PIPE
- /* Pipes need rte with stored tmpattrs, remaining protocols need expanded tmpattrs */
- if (p->proto == &proto_pipe)
- rte_store_tmp_attrs(rt, pool, NULL);
-#endif
-
accept:
- if (rt != rt0)
- *rt_free = rt;
+ /* We have accepted the route */
+ bmap_clear(&c->export_reject_map, rt->id);
return rt;
reject:
+ /* We have rejected the route by filter */
+ bmap_set(&c->export_reject_map, rt->id);
+
+reject_noset:
/* Discard temporary rte */
- if (rt != rt0)
- rte_free(rt);
return NULL;
}
static inline rte *
-export_filter(struct channel *c, rte *rt0, rte **rt_free, int silent)
+export_filter(struct channel *c, rte *rt, int silent)
{
- return export_filter_(c, rt0, rt_free, rte_update_pool, silent);
+ return export_filter_(c, rt, c->rte_update_pool, silent);
}
+void do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old);
+
static void
-do_rt_notify(struct channel *c, net *net, rte *new, rte *old, int refeed)
+do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
{
- struct proto *p = c->proto;
- struct proto_stats *stats = &c->stats;
+ struct channel_export_stats *stats = &c->export_stats;
- if (refeed && new)
+ if (c->refeeding && new)
c->refeed_count++;
- /* Apply export limit */
- struct channel_limit *l = &c->out_limit;
- if (l->action && !old && new)
- {
- if (stats->exp_routes >= l->limit)
- channel_notify_limit(c, l, PLD_OUT, stats->exp_routes);
-
- if (l->state == PLS_BLOCKED)
+ if (!old && new)
+ if (CHANNEL_LIMIT_PUSH(c, OUT))
{
- stats->exp_updates_rejected++;
- rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
+ stats->updates_rejected++;
+ channel_rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
return;
}
- }
- /* Apply export table */
- if (c->out_table && !rte_update_out(c, net->n.addr, new, old, refeed))
- return;
-
- if (new)
- stats->exp_updates_accepted++;
- else
- stats->exp_withdraws_accepted++;
+ if (!new && old)
+ CHANNEL_LIMIT_POP(c, OUT);
+ /* Store route export state */
if (old)
- {
bmap_clear(&c->export_map, old->id);
- stats->exp_routes--;
- }
if (new)
- {
bmap_set(&c->export_map, new->id);
- stats->exp_routes++;
- }
+
+ /* Apply export table */
+ if (c->out_table)
+ rte_import(&c->out_table->push, net, new, old ? old->src : new->src);
+ else
+ do_rt_notify_direct(c, net, new, old);
+}
+
+void
+do_rt_notify_direct(struct channel *c, const net_addr *net, rte *new, const rte *old)
+{
+ struct proto *p = c->proto;
+ struct channel_export_stats *stats = &c->export_stats;
+
+ if (new)
+ stats->updates_accepted++;
+ else
+ stats->withdraws_accepted++;
if (p->debug & D_ROUTES)
{
if (new && old)
- rte_trace_out(D_ROUTES, c, new, "replaced");
+ channel_rte_trace_out(D_ROUTES, c, new, "replaced");
else if (new)
- rte_trace_out(D_ROUTES, c, new, "added");
+ channel_rte_trace_out(D_ROUTES, c, new, "added");
else if (old)
- rte_trace_out(D_ROUTES, c, old, "removed");
+ channel_rte_trace_out(D_ROUTES, c, old, "removed");
}
p->rt_notify(p, c, net, new, old);
}
static void
-rt_notify_basic(struct channel *c, net *net, rte *new, rte *old, int refeed)
+rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old)
{
- // struct proto *p = c->proto;
- rte *new_free = NULL;
-
- if (new)
- c->stats.exp_updates_received++;
- else
- c->stats.exp_withdraws_received++;
-
if (new)
- new = export_filter(c, new, &new_free, 0);
+ new = export_filter(c, new, 0);
if (old && !bmap_test(&c->export_map, old->id))
old = NULL;
@@ -720,87 +617,87 @@ rt_notify_basic(struct channel *c, net *net, rte *new, rte *old, int refeed)
if (!new && !old)
return;
- do_rt_notify(c, net, new, old, refeed);
-
- /* Discard temporary rte */
- if (new_free)
- rte_free(new_free);
+ do_rt_notify(c, net, new, old);
}
static void
-rt_notify_accepted(struct channel *c, net *net, rte *new_changed, rte *old_changed, int refeed)
+channel_rpe_mark_seen(struct rt_export_request *req, struct rt_pending_export *rpe)
{
- // struct proto *p = c->proto;
- rte *new_best = NULL;
- rte *old_best = NULL;
- rte *new_free = NULL;
- int new_first = 0;
-
- /*
- * We assume that there are no changes in net route order except (added)
- * new_changed and (removed) old_changed. Therefore, the function is not
- * compatible with deterministic_med (where nontrivial reordering can happen
- * as a result of a route change) and with recomputation of recursive routes
- * due to next hop update (where many routes can be changed in one step).
- *
- * Note that we need this assumption just for optimizations, we could just
- * run full new_best recomputation otherwise.
- *
- * There are three cases:
- * feed or old_best is old_changed -> we need to recompute new_best
- * old_best is before new_changed -> new_best is old_best, ignore
- * old_best is after new_changed -> try new_changed, otherwise old_best
- */
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
- if (net->routes)
- c->stats.exp_updates_received++;
- else
- c->stats.exp_withdraws_received++;
+ rpe_mark_seen(req->hook, rpe);
+ if (rpe->old)
+ bmap_clear(&c->export_reject_map, rpe->old->rte.id);
+}
- /* Find old_best - either old_changed, or route for net->routes */
- if (old_changed && bmap_test(&c->export_map, old_changed->id))
- old_best = old_changed;
- else
+void
+rt_notify_accepted(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe,
+ struct rte **feed, uint count)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+
+ rte_update_lock(c);
+
+ rte nb0, *new_best = NULL;
+ const rte *old_best = NULL;
+
+ for (uint i = 0; i < count; i++)
{
- for (rte *r = net->routes; rte_is_valid(r); r = r->next)
+ if (!rte_is_valid(feed[i]))
+ continue;
+
+ /* Has been already rejected, won't bother with it */
+ if (!c->refeeding && bmap_test(&c->export_reject_map, feed[i]->id))
+ continue;
+
+ /* Previously exported */
+ if (!old_best && bmap_test(&c->export_map, feed[i]->id))
{
- if (bmap_test(&c->export_map, r->id))
+ /* is still best */
+ if (!new_best)
{
- old_best = r;
- break;
+ DBG("rt_notify_accepted: idempotent\n");
+ goto done;
}
- /* Note if new_changed found before old_best */
- if (r == new_changed)
- new_first = 1;
+ /* is superseded */
+ old_best = feed[i];
+ break;
}
- }
- /* Find new_best */
- if ((new_changed == old_changed) || (old_best == old_changed))
- {
- /* Feed or old_best changed -> find first accepted by filters */
- for (rte *r = net->routes; rte_is_valid(r); r = r->next)
- if (new_best = export_filter(c, r, &new_free, 0))
- break;
+ /* Have no new best route yet */
+ if (!new_best)
+ {
+ /* Try this route not seen before */
+ nb0 = *feed[i];
+ new_best = export_filter(c, &nb0, 0);
+ DBG("rt_notify_accepted: checking route id %u: %s\n", feed[i]->id, new_best ? "ok" : "no");
+ }
}
- else
+
+done:
+ /* Check obsolete routes for previously exported */
+ while (rpe)
{
- /* Other cases -> either new_changed, or old_best (and nothing changed) */
- if (new_first && (new_changed = export_filter(c, new_changed, &new_free, 0)))
- new_best = new_changed;
- else
- return;
+ channel_rpe_mark_seen(req, rpe);
+ if (rpe->old)
+ {
+ if (bmap_test(&c->export_map, rpe->old->rte.id))
+ {
+ ASSERT_DIE(old_best == NULL);
+ old_best = &rpe->old->rte;
+ }
+ }
+ rpe = rpe_next(rpe, NULL);
}
- if (!new_best && !old_best)
- return;
-
- do_rt_notify(c, net, new_best, old_best, refeed);
+ /* Nothing to export */
+ if (new_best || old_best)
+ do_rt_notify(c, n, new_best, old_best);
+ else
+ DBG("rt_notify_accepted: nothing to export\n");
- /* Discard temporary rte */
- if (new_free)
- rte_free(new_free);
+ rte_update_unlock(c);
}
@@ -811,38 +708,45 @@ nexthop_merge_rta(struct nexthop *nhs, rta *a, linpool *pool, int max)
}
rte *
-rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int silent)
+rt_export_merged(struct channel *c, struct rte **feed, uint count, linpool *pool, int silent)
{
+ _Thread_local static rte rloc;
+
// struct proto *p = c->proto;
struct nexthop *nhs = NULL;
- rte *best0, *best, *rt0, *rt, *tmp;
-
- best0 = net->routes;
- *rt_free = NULL;
+ rte *best0 = feed[0];
+ rte *best = NULL;
if (!rte_is_valid(best0))
return NULL;
- best = export_filter_(c, best0, rt_free, pool, silent);
+ /* Already rejected, no need to re-run the filter */
+ if (!c->refeeding && bmap_test(&c->export_reject_map, best0->id))
+ return NULL;
+
+ rloc = *best0;
+ best = export_filter_(c, &rloc, pool, silent);
+
+ if (!best)
+ /* Best route doesn't pass the filter */
+ return NULL;
- if (!best || !rte_is_reachable(best))
+ if (!rte_is_reachable(best))
+ /* Unreachable routes can't be merged */
return best;
- for (rt0 = best0->next; rt0; rt0 = rt0->next)
+ for (uint i = 1; i < count; i++)
{
- if (!rte_mergable(best0, rt0))
+ if (!rte_mergable(best0, feed[i]))
continue;
- rt = export_filter_(c, rt0, &tmp, pool, 1);
+ rte tmp0 = *feed[i];
+ rte *tmp = export_filter_(c, &tmp0, pool, 1);
- if (!rt)
+ if (!tmp || !rte_is_reachable(tmp))
continue;
- if (rte_is_reachable(rt))
- nhs = nexthop_merge_rta(nhs, rt->attrs, pool, c->merge_limit);
-
- if (tmp)
- rte_free(tmp);
+ nhs = nexthop_merge_rta(nhs, tmp->attrs, pool, c->merge_limit);
}
if (nhs)
@@ -851,66 +755,208 @@ rt_export_merged(struct channel *c, net *net, rte **rt_free, linpool *pool, int
if (nhs->next)
{
- best = rte_cow_rta(best, pool);
+ best->attrs = rta_cow(best->attrs, pool);
nexthop_link(best->attrs, nhs);
}
}
- if (best != best0)
- *rt_free = best;
-
return best;
}
-
-static void
-rt_notify_merged(struct channel *c, net *net, rte *new_changed, rte *old_changed,
- rte *new_best, rte *old_best, int refeed)
+void
+rt_notify_merged(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe,
+ struct rte **feed, uint count)
{
- // struct proto *p = c->proto;
- rte *new_free = NULL;
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
- /* We assume that all rte arguments are either NULL or rte_is_valid() */
-
- /* This check should be done by the caller */
- if (!new_best && !old_best)
- return;
+ rte_update_lock(c);
+ // struct proto *p = c->proto;
+#if 0 /* TODO: Find whether this check is possible when processing multiple changes at once. */
/* Check whether the change is relevant to the merged route */
if ((new_best == old_best) &&
(new_changed != old_changed) &&
!rte_mergable(new_best, new_changed) &&
!rte_mergable(old_best, old_changed))
return;
+#endif
- if (new_best)
- c->stats.exp_updates_received++;
- else
- c->stats.exp_withdraws_received++;
+ rte *old_best = NULL;
+ /* Find old best route */
+ for (uint i = 0; i < count; i++)
+ if (bmap_test(&c->export_map, feed[i]->id))
+ {
+ old_best = feed[i];
+ break;
+ }
+
+ /* Check obsolete routes for previously exported */
+ while (rpe)
+ {
+ channel_rpe_mark_seen(req, rpe);
+ if (rpe->old)
+ {
+ if (bmap_test(&c->export_map, rpe->old->rte.id))
+ {
+ ASSERT_DIE(old_best == NULL);
+ old_best = &rpe->old->rte;
+ }
+ }
+ rpe = rpe_next(rpe, NULL);
+ }
/* Prepare new merged route */
- if (new_best)
- new_best = rt_export_merged(c, net, &new_free, rte_update_pool, 0);
+ rte *new_merged = count ? rt_export_merged(c, feed, count, c->rte_update_pool, 0) : NULL;
- /* Check old merged route */
- if (old_best && !bmap_test(&c->export_map, old_best->id))
- old_best = NULL;
+ if (new_merged || old_best)
+ do_rt_notify(c, n, new_merged, old_best);
- if (!new_best && !old_best)
- return;
+ rte_update_unlock(c);
+}
- do_rt_notify(c, net, new_best, old_best, refeed);
+void
+rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ rte_update_lock(c);
+ rte *old = RTES_OR_NULL(rpe->old_best);
+ struct rte_storage *new_best = rpe->new_best;
- /* Discard temporary rte */
- if (new_free)
- rte_free(new_free);
+ while (rpe)
+ {
+ channel_rpe_mark_seen(req, rpe);
+ new_best = rpe->new_best;
+ rpe = rpe_next(rpe, NULL);
+ }
+
+ if (&new_best->rte != old)
+ {
+ rte n0, *new = RTES_CLONE(new_best, &n0);
+ rt_notify_basic(c, net, new, old);
+ }
+
+ rte_update_unlock(c);
+}
+
+void
+rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ rte_update_lock(c);
+ struct rte_src *src = rpe->new ? rpe->new->rte.src : rpe->old->rte.src;
+ rte *old = RTES_OR_NULL(rpe->old);
+ struct rte_storage *new_any = rpe->new;
+
+ while (rpe)
+ {
+ channel_rpe_mark_seen(req, rpe);
+ new_any = rpe->new;
+ rpe = rpe_next(rpe, src);
+ }
+
+ if (&new_any->rte != old)
+ {
+ rte n0, *new = RTES_CLONE(new_any, &n0);
+ rt_notify_basic(c, net, new, old);
+ }
+
+ rte_update_unlock(c);
+}
+
+void
+rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ rte_update_lock(c);
+
+ for (uint i=0; i<count; i++)
+ {
+ rte n0 = *feed[i];
+ rt_notify_basic(c, net, &n0, NULL);
+ }
+
+ rte_update_unlock(c);
}
+void
+rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe)
+{
+ bmap_set(&hook->seq_map, rpe->seq);
+}
+
+struct rt_pending_export *
+rpe_next(struct rt_pending_export *rpe, struct rte_src *src)
+{
+ struct rt_pending_export *next = atomic_load_explicit(&rpe->next, memory_order_acquire);
+
+ if (!next)
+ return NULL;
+
+ if (!src)
+ return next;
+
+ while (rpe = next)
+ if (src == (rpe->new ? rpe->new->rte.src : rpe->old->rte.src))
+ return rpe;
+ else
+ next = atomic_load_explicit(&rpe->next, memory_order_acquire);
+
+ return NULL;
+}
+
+static struct rt_pending_export * rt_next_export_fast(struct rt_pending_export *last);
+static void
+rte_export(struct rt_export_hook *hook, struct rt_pending_export *rpe)
+{
+ if (bmap_test(&hook->seq_map, rpe->seq))
+ goto seen;
+
+ const net_addr *n = rpe->new_best ? rpe->new_best->rte.net : rpe->old_best->rte.net;
+
+ if (rpe->new)
+ hook->stats.updates_received++;
+ else
+ hook->stats.withdraws_received++;
+
+ if (hook->req->export_one)
+ hook->req->export_one(hook->req, n, rpe);
+ else if (hook->req->export_bulk)
+ {
+ RT_LOCK(hook->table);
+ net *net = SKIP_BACK(struct network, n.addr, (net_addr (*)[0]) n);
+ uint count = rte_feed_count(net);
+ rte **feed = NULL;
+ if (count)
+ {
+ feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(net, feed, count);
+ }
+ RT_UNLOCK(hook->table);
+ hook->req->export_bulk(hook->req, n, rpe, feed, count);
+ }
+ else
+ bug("Export request must always provide an export method");
+
+seen:
+ /* Get the next export if exists */
+ hook->rpe_next = rt_next_export_fast(rpe);
+
+ /* The last block may be available to free */
+ if (PAGE_HEAD(hook->rpe_next) != PAGE_HEAD(rpe))
+ {
+ RT_LOCK(hook->table);
+ rt_export_used(RT_PRIV(hook->table));
+ RT_UNLOCK(hook->table);
+ }
+
+ /* Releasing this export for cleanup routine */
+ DBG("store hook=%p last_export=%p seq=%lu\n", hook, rpe, rpe->seq);
+ atomic_store_explicit(&hook->last_export, rpe, memory_order_release);
+}
/**
* rte_announce - announce a routing table change
* @tab: table the route has been added to
- * @type: type of route announcement (RA_UNDEF or RA_ANY)
* @net: network in question
* @new: the new or changed route
* @old: the previous route replaced by the new one
@@ -926,13 +972,6 @@ rt_notify_merged(struct channel *c, net *net, rte *new_changed, rte *old_changed
* and @new_best and @old_best describes best routes. Other routes are not
* affected, but in sorted table the order of other routes might change.
*
- * Second, There is a bulk change of multiple routes in @net, with shared best
- * route selection. In such case separate route changes are described using
- * @type of %RA_ANY, with @new and @old specifying the changed route, while
- * @new_best and @old_best are NULL. After that, another notification is done
- * where @new_best and @old_best are filled (may be the same), but @new and @old
- * are NULL.
- *
* The function announces the change to all associated channels. For each
* channel, an appropriate preprocessing is done according to channel &ra_mode.
* For example, %RA_OPTIMAL channels receive just changes of best routes.
@@ -947,306 +986,416 @@ rt_notify_merged(struct channel *c, net *net, rte *new_changed, rte *old_changed
* done outside of scope of rte_announce().
*/
static void
-rte_announce(rtable *tab, uint type, net *net, rte *new, rte *old,
- rte *new_best, rte *old_best)
+rte_announce(rtable_private *tab, net *net, struct rte_storage *new, struct rte_storage *old,
+ struct rte_storage *new_best, struct rte_storage *old_best)
{
- if (!rte_is_valid(new))
- new = NULL;
-
- if (!rte_is_valid(old))
- old = NULL;
-
- if (!rte_is_valid(new_best))
+ if (!new_best || !rte_is_valid(&new_best->rte))
new_best = NULL;
- if (!rte_is_valid(old_best))
+ if (!old_best || !rte_is_valid(&old_best->rte))
old_best = NULL;
- if (!new && !old && !new_best && !old_best)
+ if (!new || !rte_is_valid(&new->rte))
+ new = NULL;
+
+ if (old && !rte_is_valid(&old->rte))
+ {
+ /* Filtered old route isn't announced, should be freed immediately. */
+ rte_free(old, tab);
+ old = NULL;
+ }
+
+ if ((new == old) && (new_best == old_best))
return;
if (new_best != old_best)
{
if (new_best)
- new_best->sender->stats.pref_routes++;
+ new_best->rte.sender->stats.pref++;
if (old_best)
- old_best->sender->stats.pref_routes--;
+ old_best->rte.sender->stats.pref--;
if (tab->hostcache)
rt_notify_hostcache(tab, net);
}
+ if (EMPTY_LIST(tab->exports) && EMPTY_LIST(tab->pending_exports))
+ {
+ /* No export hook and no pending exports to cleanup. We may free the route immediately. */
+ if (!old)
+ return;
+
+ hmap_clear(&tab->id_map, old->rte.id);
+ rte_free(old, tab);
+ return;
+ }
+
+ /* Get the pending export structure */
+ struct rt_export_block *rpeb = NULL, *rpebsnl = NULL;
+ u32 end = 0;
+
+ if (!EMPTY_LIST(tab->pending_exports))
+ {
+ rpeb = TAIL(tab->pending_exports);
+ end = atomic_load_explicit(&rpeb->end, memory_order_relaxed);
+ if (end >= RT_PENDING_EXPORT_ITEMS)
+ {
+ ASSERT_DIE(end == RT_PENDING_EXPORT_ITEMS);
+ rpebsnl = rpeb;
+
+ rpeb = NULL;
+ end = 0;
+ }
+ }
+
+ if (!rpeb)
+ {
+ rpeb = alloc_page();
+ *rpeb = (struct rt_export_block) {};
+ add_tail(&tab->pending_exports, &rpeb->n);
+ }
+
+ /* Fill the pending export */
+ struct rt_pending_export *rpe = &rpeb->export[rpeb->end];
+ *rpe = (struct rt_pending_export) {
+ .new = new,
+ .new_best = new_best,
+ .old = old,
+ .old_best = old_best,
+ .seq = tab->next_export_seq++,
+ };
+
+ DBG("rte_announce: table=%s net=%N new=%p from %p old=%p from %p new_best=%p old_best=%p seq=%lu\n", tab->name, net->n.addr, new, new ? new->sender : NULL, old, old ? old->sender : NULL, new_best, old_best, rpe->seq);
+
+ ASSERT_DIE(atomic_fetch_add_explicit(&rpeb->end, 1, memory_order_release) == end);
+
+ if (rpebsnl)
+ {
+ _Bool f = 0;
+ ASSERT_DIE(atomic_compare_exchange_strong_explicit(&rpebsnl->not_last, &f, 1,
+ memory_order_release, memory_order_relaxed));
+ }
+
+ /* Append to the same-network squasher list */
+ if (net->last)
+ {
+ struct rt_pending_export *rpenull = NULL;
+ ASSERT_DIE(atomic_compare_exchange_strong_explicit(
+ &net->last->next, &rpenull, rpe,
+ memory_order_relaxed,
+ memory_order_relaxed));
+
+ }
+
+ net->last = rpe;
+
+ if (!net->first)
+ net->first = rpe;
+
+ if (tab->first_export == NULL)
+ tab->first_export = rpe;
+
+ if (!EMPTY_LIST(tab->exports) &&
+ (tab->first_export->seq + tab->config->cork_limit <= tab->next_export_seq) &&
+ !tab->cork_active)
+ {
+ if (config->table_debug)
+ log(L_TRACE "%s: cork activated", tab->name);
+
+ ev_cork(&rt_cork);
+ tab->cork_active = 1;
+ }
+}
+
+static struct rt_pending_export *
+rt_next_export_fast(struct rt_pending_export *last)
+{
+ /* Get the whole export block and find our position in there. */
+ struct rt_export_block *rpeb = PAGE_HEAD(last);
+ u32 pos = (last - &rpeb->export[0]);
+ u32 end = atomic_load_explicit(&rpeb->end, memory_order_acquire);
+ ASSERT_DIE(pos < end);
+
+ /* Next is in the same block. */
+ if (++pos < end)
+ return &rpeb->export[pos];
+
+ /* There is another block. */
+ if (atomic_load_explicit(&rpeb->not_last, memory_order_acquire))
+ {
+ /* This is OK to do non-atomically because of the not_last flag. */
+ rpeb = NODE_NEXT(rpeb);
+ return &rpeb->export[0];
+ }
+
+ /* There is nothing more. */
+ return NULL;
+}
+
+static struct rt_pending_export *
+rt_next_export(struct rt_export_hook *hook, rtable_private *tab)
+{
+ /* As the table is locked, it is safe to reload the last export pointer */
+ struct rt_pending_export *last = atomic_load_explicit(&hook->last_export, memory_order_acquire);
+
+ /* It is still valid, let's reuse it */
+ if (last)
+ return rt_next_export_fast(last);
+
+ /* No, therefore we must process the table's first pending export */
+ else
+ return tab->first_export;
+}
+
+static inline void
+rt_send_export_event(struct rt_export_hook *hook)
+{
+ ev_send(hook->req->list, hook->event);
+}
+
+static void
+rt_announce_exports(void *data)
+{
+ rtable_private *tab = data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
+
rt_schedule_notify(tab);
- struct channel *c; node *n;
- WALK_LIST2(c, n, tab->channels, table_node)
+ struct rt_export_hook *c; node *n;
+ WALK_LIST2(c, n, tab->exports, n)
{
- if (c->export_state == ES_DOWN)
+ if (atomic_load_explicit(&c->export_state, memory_order_acquire) != TES_READY)
continue;
- if (type && (type != c->ra_mode))
- continue;
+ rt_send_export_event(c);
+ }
+}
- switch (c->ra_mode)
+static void
+rt_import_announce_exports(void *data)
+{
+ struct rt_import_hook *hook = data;
+ RT_LOCKED(hook->table, tab)
+ {
+ if (hook->import_state == TIS_CLEARED)
{
- case RA_OPTIMAL:
- if (new_best != old_best)
- rt_notify_basic(c, net, new_best, old_best, 0);
- break;
+ rfree(hook->export_announce_event);
- case RA_ANY:
- if (new != old)
- rt_notify_basic(c, net, new, old, 0);
- break;
+ ev_send(hook->stopped->list, hook->stopped);
+ rem_node(&hook->n);
+ mb_free(hook);
+ rt_unlock_table(tab);
+ }
+ else
+ ev_send_loop(tab->loop, tab->announce_event);
+ }
+}
- case RA_ACCEPTED:
- rt_notify_accepted(c, net, new, old, 0);
- break;
+static struct rt_pending_export *
+rt_last_export(rtable_private *tab)
+{
+ struct rt_pending_export *rpe = NULL;
- case RA_MERGED:
- rt_notify_merged(c, net, new, old, new_best, old_best, 0);
- break;
+ if (!EMPTY_LIST(tab->pending_exports))
+ {
+ /* We'll continue processing exports from this export on */
+ struct rt_export_block *reb = TAIL(tab->pending_exports);
+ ASSERT_DIE(reb->end);
+ rpe = &reb->export[reb->end - 1];
+ }
+
+ return rpe;
+}
+
+#define RT_EXPORT_BULK 1024
+
+static void
+rt_export_hook(void *_data)
+{
+ struct rt_export_hook *c = _data;
+
+ ASSERT_DIE(atomic_load_explicit(&c->export_state, memory_order_relaxed) == TES_READY);
+
+ if (!c->rpe_next)
+ {
+ RT_LOCK(c->table);
+ c->rpe_next = rt_next_export(c, RT_PRIV(c->table));
+
+ if (!c->rpe_next)
+ {
+ rt_export_used(RT_PRIV(c->table));
+ RT_UNLOCK(c->table);
+ return;
}
+
+ RT_UNLOCK(c->table);
+ }
+
+ /* Process the export */
+ for (uint i=0; i<RT_EXPORT_BULK; i++)
+ {
+ rte_export(c, c->rpe_next);
+
+ if (!c->rpe_next)
+ break;
}
+
+ rt_send_export_event(c);
}
+
static inline int
-rte_validate(rte *e)
+rte_validate(struct channel *ch, rte *e)
{
int c;
- net *n = e->net;
+ const net_addr *n = e->net;
- if (!net_validate(n->n.addr))
+ if (!net_validate(n))
{
log(L_WARN "Ignoring bogus prefix %N received via %s",
- n->n.addr, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
/* FIXME: better handling different nettypes */
- c = !net_is_flow(n->n.addr) ?
- net_classify(n->n.addr): (IADDR_HOST | SCOPE_UNIVERSE);
+ c = !net_is_flow(n) ?
+ net_classify(n): (IADDR_HOST | SCOPE_UNIVERSE);
if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
{
log(L_WARN "Ignoring bogus route %N received via %s",
- n->n.addr, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
- if (net_type_match(n->n.addr, NB_DEST) == !e->attrs->dest)
+ if (net_type_match(n, NB_DEST) == !e->attrs->dest)
{
log(L_WARN "Ignoring route %N with invalid dest %d received via %s",
- n->n.addr, e->attrs->dest, e->sender->proto->name);
+ n, e->attrs->dest, ch->proto->name);
return 0;
}
if ((e->attrs->dest == RTD_UNICAST) && !nexthop_is_sorted(&(e->attrs->nh)))
{
log(L_WARN "Ignoring unsorted multipath route %N received via %s",
- n->n.addr, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
return 1;
}
-/**
- * rte_free - delete a &rte
- * @e: &rte to be deleted
- *
- * rte_free() deletes the given &rte from the routing table it's linked to.
- */
-void
-rte_free(rte *e)
-{
- if (rta_is_cached(e->attrs))
- rta_free(e->attrs);
- sl_free(rte_slab, e);
-}
-
-static inline void
-rte_free_quick(rte *e)
-{
- rta_free(e->attrs);
- sl_free(rte_slab, e);
-}
-
static int
rte_same(rte *x, rte *y)
{
+ ASSERT_DIE(x->attrs->cached && y->attrs->cached);
+
/* rte.flags are not checked, as they are mostly internal to rtable */
return
x->attrs == y->attrs &&
- x->pflags == y->pflags &&
- x->pref == y->pref &&
- (!x->attrs->src->proto->rte_same || x->attrs->src->proto->rte_same(x, y)) &&
+ x->src == y->src &&
rte_is_filtered(x) == rte_is_filtered(y);
}
static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
static void
-rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
+rte_recalculate(rtable_private *table, struct rt_import_hook *c, net *net, rte *new, struct rte_src *src)
{
- struct proto *p = c->proto;
- struct rtable *table = c->table;
- struct proto_stats *stats = &c->stats;
- static struct tbf rl_pipe = TBF_DEFAULT_LOG_LIMITS;
- rte *before_old = NULL;
- rte *old_best = net->routes;
+ struct rt_import_request *req = c->req;
+ struct rt_import_stats *stats = &c->stats;
+ struct rte_storage *old_best_stored = net->routes, *old_stored = NULL;
+ rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
rte *old = NULL;
- rte **k;
-
- k = &net->routes; /* Find and remove original route from the same protocol */
- while (old = *k)
- {
- if (old->attrs->src == src)
- {
- /* If there is the same route in the routing table but from
- * a different sender, then there are two paths from the
- * source protocol to this routing table through transparent
- * pipes, which is not allowed.
- *
- * We log that and ignore the route. If it is withdraw, we
- * ignore it completely (there might be 'spurious withdraws',
- * see FIXME in do_rte_announce())
- */
- if (old->sender->proto != p)
- {
- if (new)
- {
- log_rl(&rl_pipe, L_ERR "Pipe collision detected when sending %N to table %s",
- net->n.addr, table->name);
- rte_free_quick(new);
- }
- return;
- }
- if (new && rte_same(old, new))
- {
- /* No changes, ignore the new route and refresh the old one */
+ /* Set the stale cycle unless already set */
+ if (new && !(new->flags & REF_USE_STALE))
+ new->stale_cycle = c->stale_set;
- old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
+ /* Find and remove original route from the same protocol */
+ struct rte_storage **before_old = rte_find(net, src);
- if (!rte_is_filtered(new))
- {
- stats->imp_updates_ignored++;
- rte_trace_in(D_ROUTES, c, new, "ignored");
- }
-
- rte_free_quick(new);
- return;
- }
- *k = old->next;
- table->rt_count--;
- break;
- }
- k = &old->next;
- before_old = old;
- }
-
- /* Save the last accessed position */
- rte **pos = k;
-
- if (!old)
- before_old = NULL;
-
- if (!old && !new)
+ if (!*before_old && !new)
{
- stats->imp_withdraws_ignored++;
+ stats->withdraws_ignored++;
return;
}
- int new_ok = rte_is_ok(new);
- int old_ok = rte_is_ok(old);
+ if (new)
+ new->attrs = rta_is_cached(new->attrs) ? rta_clone(new->attrs) : rta_lookup(new->attrs);
- struct channel_limit *l = &c->rx_limit;
- if (l->action && !old && new && !c->in_table)
+ if (*before_old)
{
- u32 all_routes = stats->imp_routes + stats->filt_routes;
-
- if (all_routes >= l->limit)
- channel_notify_limit(c, l, PLD_RX, all_routes);
-
- if (l->state == PLS_BLOCKED)
+ old = &(old_stored = (*before_old))->rte;
+
+ /* If there is the same route in the routing table but from
+ * a different sender, then there are two paths from the
+ * source protocol to this routing table through transparent
+ * pipes, which is not allowed.
+ * We log that and ignore the route. */
+ if (old->sender != c)
{
- /* In receive limit the situation is simple, old is NULL so
- we just free new and exit like nothing happened */
+ if (!old->generation && !new->generation)
+ bug("Two protocols claim to author a route with the same rte_src in table %s: %N %s/%u:%u",
+ c->table->name, net->n.addr, old->src->owner->name, old->src->private_id, old->src->global_id);
- stats->imp_updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
- rte_free_quick(new);
- return;
+ log_rl(&table->rl_pipe, L_ERR "Route source collision in table %s: %N %s/%u:%u",
+ c->table->name, net->n.addr, old->src->owner->name, old->src->private_id, old->src->global_id);
}
- }
- l = &c->in_limit;
- if (l->action && !old_ok && new_ok)
- {
- if (stats->imp_routes >= l->limit)
- channel_notify_limit(c, l, PLD_IN, stats->imp_routes);
-
- if (l->state == PLS_BLOCKED)
- {
- /* In import limit the situation is more complicated. We
- shouldn't just drop the route, we should handle it like
- it was filtered. We also have to continue the route
- processing if old or new is non-NULL, but we should exit
- if both are NULL as this case is probably assumed to be
- already handled. */
+ if (new && rte_same(old, new))
+ {
+ /* No changes, ignore the new route and refresh the old one */
+ old->stale_cycle = new->stale_cycle;
- stats->imp_updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
+ if (!rte_is_filtered(new))
+ {
+ stats->updates_ignored++;
+ rt_rte_trace_in(D_ROUTES, req, new, "ignored");
+ }
- if (c->in_keep_filtered)
- new->flags |= REF_FILTERED;
- else
- { rte_free_quick(new); new = NULL; }
+ rta_free(new->attrs);
+ return;
+ }
- /* Note that old && !new could be possible when
- c->in_keep_filtered changed in the recent past. */
+ *before_old = (*before_old)->next;
+ table->rt_count--;
+ }
- if (!old && !new)
- return;
+ if (req->preimport)
+ new = req->preimport(req, new, old);
- new_ok = 0;
- goto skip_stats1;
- }
- }
+ int new_ok = rte_is_ok(new);
+ int old_ok = rte_is_ok(old);
if (new_ok)
- stats->imp_updates_accepted++;
+ stats->updates_accepted++;
else if (old_ok)
- stats->imp_withdraws_accepted++;
+ stats->withdraws_accepted++;
else
- stats->imp_withdraws_ignored++;
+ stats->withdraws_ignored++;
if (old_ok || new_ok)
table->last_rt_change = current_time();
- skip_stats1:
-
- if (new)
- rte_is_filtered(new) ? stats->filt_routes++ : stats->imp_routes++;
- if (old)
- rte_is_filtered(old) ? stats->filt_routes-- : stats->imp_routes--;
+ struct rte_storage *new_stored = new ? rte_store(new, net, table) : NULL;
if (table->config->sorted)
{
/* If routes are sorted, just insert new route to appropriate position */
- if (new)
+ if (new_stored)
{
- if (before_old && !rte_better(new, before_old))
- k = &before_old->next;
+ struct rte_storage **k;
+ if ((before_old != &net->routes) && !rte_better(new, &SKIP_BACK(struct rte_storage, next, before_old)->rte))
+ k = before_old;
else
k = &net->routes;
for (; *k; k=&(*k)->next)
- if (rte_better(new, *k))
+ if (rte_better(new, &(*k)->rte))
break;
- new->next = *k;
- *k = new;
+ new_stored->next = *k;
+ *k = new_stored;
table->rt_count++;
}
@@ -1256,16 +1405,17 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
/* If routes are not sorted, find the best route and move it on
the first position. There are several optimized cases. */
- if (src->proto->rte_recalculate && src->proto->rte_recalculate(table, net, new, old, old_best))
+ if (src->owner->rte_recalculate &&
+ src->owner->rte_recalculate(table, net, new_stored ? &new_stored->rte : NULL, old, old_best))
goto do_recalculate;
- if (new && rte_better(new, old_best))
+ if (new_stored && rte_better(&new_stored->rte, old_best))
{
/* The first case - the new route is cleary optimal,
we link it at the first position */
- new->next = net->routes;
- net->routes = new;
+ new_stored->next = net->routes;
+ net->routes = new_stored;
table->rt_count++;
}
@@ -1279,10 +1429,10 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
do_recalculate:
/* Add the new route to the list */
- if (new)
+ if (new_stored)
{
- new->next = *pos;
- *pos = new;
+ new_stored->next = *before_old;
+ *before_old = new_stored;
table->rt_count++;
}
@@ -1290,335 +1440,408 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
/* Find a new optimal route (if there is any) */
if (net->routes)
{
- rte **bp = &net->routes;
- for (k=&(*bp)->next; *k; k=&(*k)->next)
- if (rte_better(*k, *bp))
+ struct rte_storage **bp = &net->routes;
+ for (struct rte_storage **k=&(*bp)->next; *k; k=&(*k)->next)
+ if (rte_better(&(*k)->rte, &(*bp)->rte))
bp = k;
/* And relink it */
- rte *best = *bp;
+ struct rte_storage *best = *bp;
*bp = best->next;
best->next = net->routes;
net->routes = best;
}
}
- else if (new)
+ else if (new_stored)
{
/* The third case - the new route is not better than the old
best route (therefore old_best != NULL) and the old best
route was not removed (therefore old_best == net->routes).
We just link the new route to the old/last position. */
- new->next = *pos;
- *pos = new;
+ new_stored->next = *before_old;
+ *before_old = new_stored;
table->rt_count++;
}
/* The fourth (empty) case - suboptimal route was removed, nothing to do */
}
- if (new)
+ if (new_stored)
{
- new->lastmod = current_time();
-
- if (!old)
- {
- new->id = hmap_first_zero(&table->id_map);
- hmap_set(&table->id_map, new->id);
- }
- else
- new->id = old->id;
+ new_stored->rte.lastmod = current_time();
+ new_stored->rte.id = hmap_first_zero(&table->id_map);
+ hmap_set(&table->id_map, new_stored->rte.id);
}
+ _Bool nb = (new_stored == net->routes);
+ _Bool ob = (old_best == old);
+
/* Log the route change */
- if ((c->debug & D_ROUTES) || (p->debug & D_ROUTES))
+ if (new_ok && old_ok)
{
- if (new_ok)
- rte_trace(c, new, '>', new == net->routes ? "added [best]" : "added");
- else if (old_ok)
- {
- if (old != old_best)
- rte_trace(c, old, '>', "removed");
- else if (rte_is_ok(net->routes))
- rte_trace(c, old, '>', "removed [replaced]");
- else
- rte_trace(c, old, '>', "removed [sole]");
- }
+ const char *best_indicator[2][2] = { { "updated", "updated [-best]" }, { "updated [+best]", "updated [best]" } };
+ rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, best_indicator[nb][ob]);
}
+ else if (new_ok)
+ rt_rte_trace_in(D_ROUTES, req, &new_stored->rte,
+ (!net->routes->next || !rte_is_ok(&net->routes->next->rte)) ? "added [sole]" :
+ nb ? "added [best]" : "added");
+ else if (old_ok)
+ rt_rte_trace_in(D_ROUTES, req, old,
+ (!net->routes || !rte_is_ok(&net->routes->rte)) ? "removed [sole]" :
+ ob ? "removed [best]" : "removed");
/* Propagate the route change */
- rte_announce(table, RA_UNDEF, net, new, old, net->routes, old_best);
+ rte_announce(table, net, new_stored, old_stored,
+ net->routes, old_best_stored);
if (!net->routes &&
(table->gc_counter++ >= table->config->gc_max_ops) &&
(table->gc_time + table->config->gc_min_time <= current_time()))
rt_schedule_prune(table);
+#if 0
+ /* Enable and reimplement these callbacks if anybody wants to use them */
if (old_ok && p->rte_remove)
p->rte_remove(net, old);
if (new_ok && p->rte_insert)
- p->rte_insert(net, new);
-
- if (old)
- {
- if (!new)
- hmap_clear(&table->id_map, old->id);
-
- rte_free_quick(old);
- }
-}
-
-static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */
+ p->rte_insert(net, &new_stored->rte);
+#endif
-static inline void
-rte_update_lock(void)
-{
- rte_update_nest_cnt++;
}
-static inline void
-rte_update_unlock(void)
+rte *
+channel_preimport(struct rt_import_request *req, rte *new, rte *old)
{
- if (!--rte_update_nest_cnt)
- lp_flush(rte_update_pool);
-}
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
-static inline void
-rte_hide_dummy_routes(net *net, rte **dummy)
-{
- if (net->routes && net->routes->attrs->source == RTS_DUMMY)
+ if (!c->in_table)
{
- *dummy = net->routes;
- net->routes = (*dummy)->next;
+ if (new && !old)
+ if (CHANNEL_LIMIT_PUSH(c, RX))
+ {
+ rta_free(new->attrs);
+ return NULL;
+ }
+
+ if (!new && old)
+ CHANNEL_LIMIT_POP(c, RX);
}
+
+ int new_in = new && !rte_is_filtered(new);
+ int old_in = old && !rte_is_filtered(old);
+
+ if (new_in && !old_in)
+ if (CHANNEL_LIMIT_PUSH(c, IN))
+ if (c->in_keep_filtered)
+ {
+ new->flags |= REF_FILTERED;
+ return new;
+ }
+ else
+ {
+ rta_free(new->attrs);
+ return NULL;
+ }
+
+ if (!new_in && old_in)
+ CHANNEL_LIMIT_POP(c, IN);
+
+ return new;
}
-static inline void
-rte_unhide_dummy_routes(net *net, rte **dummy)
+rte *
+channel_in_preimport(struct rt_import_request *req, rte *new, rte *old)
{
- if (*dummy)
- {
- (*dummy)->next = net->routes;
- net->routes = *dummy;
- }
+ struct channel_aux_table *cat = SKIP_BACK(struct channel_aux_table, push, req);
+
+ if (new && !old)
+ if (CHANNEL_LIMIT_PUSH(cat->c, RX))
+ return NULL;
+
+ if (!new && old)
+ CHANNEL_LIMIT_POP(cat->c, RX);
+
+ return new;
}
-/**
- * rte_update - enter a new update to a routing table
- * @table: table to be updated
- * @c: channel doing the update
- * @net: network node
- * @p: protocol submitting the update
- * @src: protocol originating the update
- * @new: a &rte representing the new route or %NULL for route removal.
- *
- * This function is called by the routing protocols whenever they discover
- * a new route or wish to update/remove an existing route. The right announcement
- * sequence is to build route attributes first (either un-cached with @aflags set
- * to zero or a cached one using rta_lookup(); in this case please note that
- * you need to increase the use count of the attributes yourself by calling
- * rta_clone()), call rte_get_temp() to obtain a temporary &rte, fill in all
- * the appropriate data and finally submit the new &rte by calling rte_update().
- *
- * @src specifies the protocol that originally created the route and the meaning
- * of protocol-dependent data of @new. If @new is not %NULL, @src have to be the
- * same value as @new->attrs->proto. @p specifies the protocol that called
- * rte_update(). In most cases it is the same protocol as @src. rte_update()
- * stores @p in @new->sender;
- *
- * When rte_update() gets any route, it automatically validates it (checks,
- * whether the network and next hop address are valid IP addresses and also
- * whether a normal routing protocol doesn't try to smuggle a host or link
- * scope route to the table), converts all protocol dependent attributes stored
- * in the &rte to temporary extended attributes, consults import filters of the
- * protocol to see if the route should be accepted and/or its attributes modified,
- * stores the temporary attributes back to the &rte.
- *
- * Now, having a "public" version of the route, we
- * automatically find any old route defined by the protocol @src
- * for network @n, replace it by the new one (or removing it if @new is %NULL),
- * recalculate the optimal route for this destination and finally broadcast
- * the change (if any) to all routing protocols by calling rte_announce().
- *
- * All memory used for attribute lists and other temporary allocations is taken
- * from a special linear pool @rte_update_pool and freed when rte_update()
- * finishes.
- */
+void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
void
-rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
+rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{
- // struct proto *p = c->proto;
- struct proto_stats *stats = &c->stats;
- const struct filter *filter = c->in_filter;
- rte *dummy = NULL;
- net *nn;
+ if (!c->in_req.hook)
+ return;
ASSERT(c->channel_state == CS_UP);
- rte_update_lock();
+ if (c->in_table)
+ rte_import(&c->in_table->push, n, new, src);
+ else
+ rte_update_direct(c, n, new, src);
+}
+
+void
+rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
+{
+ const struct filter *filter = c->in_filter;
+ struct channel_import_stats *stats = &c->import_stats;
+
+ rte_update_lock(c);
if (new)
{
- /* Create a temporary table node */
- nn = alloca(sizeof(net) + n->length);
- memset(nn, 0, sizeof(net) + n->length);
- net_copy(nn->n.addr, n);
-
- new->net = nn;
- new->sender = c;
+ new->net = n;
- if (!new->pref)
- new->pref = c->preference;
+ int fr;
- stats->imp_updates_received++;
- if (!rte_validate(new))
+ stats->updates_received++;
+ if (!rte_validate(c, new))
{
- rte_trace_in(D_FILTERS, c, new, "invalid");
- stats->imp_updates_invalid++;
- goto drop;
+ channel_rte_trace_in(D_FILTERS, c, new, "invalid");
+ stats->updates_invalid++;
+ new = NULL;
}
-
- if (filter == FILTER_REJECT)
+ else if ((filter == FILTER_REJECT) ||
+ ((fr = f_run(filter, new, c->rte_update_pool, 0)) > F_ACCEPT))
{
- stats->imp_updates_filtered++;
- rte_trace_in(D_FILTERS, c, new, "filtered out");
-
- if (! c->in_keep_filtered)
- goto drop;
+ stats->updates_filtered++;
+ channel_rte_trace_in(D_FILTERS, c, new, "filtered out");
- /* new is a private copy, i could modify it */
- new->flags |= REF_FILTERED;
+ if (c->in_keep_filtered)
+ new->flags |= REF_FILTERED;
+ else
+ new = NULL;
}
- else if (filter)
- {
- rta *old_attrs = NULL;
- rte_make_tmp_attrs(&new, rte_update_pool, &old_attrs);
+ }
+ else
+ stats->withdraws_received++;
- int fr = f_run(filter, &new, rte_update_pool, 0);
- if (fr > F_ACCEPT)
- {
- stats->imp_updates_filtered++;
- rte_trace_in(D_FILTERS, c, new, "filtered out");
+ rte_import(&c->in_req, n, new, src);
- if (! c->in_keep_filtered)
- {
- rta_free(old_attrs);
- goto drop;
- }
+ rte_update_unlock(c);
+}
- new->flags |= REF_FILTERED;
- }
+void
+rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rte_src *src)
+{
+ struct rt_import_hook *hook = req->hook;
+ if (!hook)
+ return;
- rte_store_tmp_attrs(new, rte_update_pool, old_attrs);
- }
- if (!rta_is_cached(new->attrs)) /* Need to copy attributes */
- new->attrs = rta_lookup(new->attrs);
- new->flags |= REF_COW;
+ RT_LOCK(hook->table);
+ rtable_private *tab = RT_PRIV(hook->table);
+ net *nn;
+ if (new)
+ {
/* Use the actual struct network, not the dummy one */
- nn = net_get(c->table, n);
- new->net = nn;
+ nn = net_get(tab, n);
+ new->net = nn->n.addr;
+ new->sender = hook;
}
- else
+ else if (!(nn = net_find(tab, n)))
{
- stats->imp_withdraws_received++;
-
- if (!(nn = net_find(c->table, n)) || !src)
- {
- stats->imp_withdraws_ignored++;
- rte_update_unlock();
- return;
- }
+ req->hook->stats.withdraws_ignored++;
+ RT_UNLOCK(tab);
+ return;
}
- recalc:
/* And recalculate the best route */
- rte_hide_dummy_routes(nn, &dummy);
- rte_recalculate(c, nn, new, src);
- rte_unhide_dummy_routes(nn, &dummy);
+ rte_recalculate(tab, hook, nn, new, src);
- rte_update_unlock();
- return;
+ /* Schedule export announcement */
+ ev_send(req->list, hook->export_announce_event);
- drop:
- rte_free(new);
- new = NULL;
- if (nn = net_find(c->table, n))
- goto recalc;
+ /* Done! */
+ RT_UNLOCK(tab);
+}
- rte_update_unlock();
+/* Check rtable for best route to given net whether it would be exported do p */
+int
+rt_examine(rtable_private *t, net_addr *a, struct channel *c, const struct filter *filter)
+{
+ net *n = net_find(t, a);
+
+ if (!n || !n->routes)
+ return 0;
+
+ rte rt = n->routes->rte;
+
+ if (!rte_is_valid(&rt))
+ return 0;
+
+ rte_update_lock(c);
+
+ /* Rest is stripped down export_filter() */
+ int v = c->proto->preexport ? c->proto->preexport(c, &rt) : 0;
+ if (v == RIC_PROCESS)
+ v = (f_run(filter, &rt, c->rte_update_pool, FF_SILENT) <= F_ACCEPT);
+
+ rte_update_unlock(c);
+
+ return v > 0;
}
-/* Independent call to rte_announce(), used from next hop
- recalculation, outside of rte_update(). new must be non-NULL */
-static inline void
-rte_announce_i(rtable *tab, uint type, net *net, rte *new, rte *old,
- rte *new_best, rte *old_best)
+static void
+rt_export_stopped(void *data)
{
- rte_update_lock();
- rte_announce(tab, type, net, new, old, new_best, old_best);
- rte_update_unlock();
+ struct rt_export_hook *hook = data;
+
+ RT_LOCKED(hook->table, tab)
+ {
+ /* Drop pending exports */
+ rt_export_used(tab);
+
+ /* Unlist */
+ rem_node(&hook->n);
+ }
+
+ /* Report the channel as stopped. */
+ hook->stopped(hook->req);
+
+ RT_LOCKED(hook->table, tab)
+ {
+ /* Free the hook together with its coroutine. */
+ rp_free(hook->pool, tab->rp);
+ rt_unlock_table(tab);
+
+ rt_fast_prune_check(tab);
+
+ DBG("Export hook %p in table %s finished uc=%u\n", hook, tab->name, tab->use_count);
+ }
}
+
static inline void
-rte_discard(rte *old) /* Non-filtered route deletion, used during garbage collection */
+rt_set_import_state(struct rt_import_hook *hook, u8 state)
{
- rte_update_lock();
- rte_recalculate(old->sender, old->net, NULL, old->attrs->src);
- rte_update_unlock();
+ hook->last_state_change = current_time();
+ hook->import_state = state;
+
+ if (hook->req->log_state_change)
+ hook->req->log_state_change(hook->req, state);
}
-/* Modify existing route by protocol hook, used for long-lived graceful restart */
static inline void
-rte_modify(rte *old)
+rt_set_export_state(struct rt_export_hook *hook, u8 state)
{
- rte_update_lock();
+ hook->last_state_change = current_time();
+ atomic_store_explicit(&hook->export_state, state, memory_order_release);
- rte *new = old->sender->proto->rte_modify(old, rte_update_pool);
- if (new != old)
- {
- if (new)
- {
- if (!rta_is_cached(new->attrs))
- new->attrs = rta_lookup(new->attrs);
- new->flags = (old->flags & ~REF_MODIFY) | REF_COW;
- }
+ if (hook->req->log_state_change)
+ hook->req->log_state_change(hook->req, state);
+}
- rte_recalculate(old->sender, old->net, new, old->attrs->src);
- }
+void
+rt_request_import(rtable *t, struct rt_import_request *req)
+{
+ RT_LOCK(t);
+ rtable_private *tab = RT_PRIV(t);
+ rt_lock_table(tab);
+
+ ASSERT_DIE(!tab->delete);
+
+ struct rt_import_hook *hook = req->hook = mb_allocz(tab->rp, sizeof(struct rt_import_hook));
+
+ DBG("Lock table %s for import %p req=%p uc=%u\n", tab->name, hook, req, tab->use_count);
- rte_update_unlock();
+ hook->req = req;
+ hook->table = t;
+
+ hook->export_announce_event = ev_new_init(tab->rp, rt_import_announce_exports, hook);
+
+ if (!hook->stale_set)
+ hook->stale_set = hook->stale_valid = hook->stale_pruning = hook->stale_pruned = 1;
+
+ rt_set_import_state(hook, TIS_UP);
+
+ hook->n = (node) {};
+ add_tail(&tab->imports, &hook->n);
+ tab->imports_up++;
+
+ RT_UNLOCK(t);
}
-/* Check rtable for best route to given net whether it would be exported do p */
-int
-rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter)
+void
+rt_stop_import(struct rt_import_request *req, event *stopped)
{
- net *n = net_find(t, a);
- rte *rt = n ? n->routes : NULL;
+ ASSERT_DIE(req->hook);
+ struct rt_import_hook *hook = req->hook;
- if (!rte_is_valid(rt))
- return 0;
+ rtable_private *tab = RT_LOCK(hook->table);
- rte_update_lock();
+ rt_schedule_prune(tab);
- /* Rest is stripped down export_filter() */
- int v = p->preexport ? p->preexport(p, &rt, rte_update_pool) : 0;
- if (v == RIC_PROCESS)
- {
- rte_make_tmp_attrs(&rt, rte_update_pool, NULL);
- v = (f_run(filter, &rt, rte_update_pool, FF_SILENT) <= F_ACCEPT);
- }
+ tab->imports_up--;
+ rt_fast_prune_check(tab);
- /* Discard temporary rte */
- if (rt != n->routes)
- rte_free(rt);
+ rt_set_import_state(hook, TIS_STOP);
- rte_update_unlock();
+ hook->stopped = stopped;
- return v > 0;
+ if (hook->stale_set < hook->stale_valid)
+ if (!--tab->rr_count)
+ rt_schedule_notify(tab);
+
+ RT_UNLOCK(tab);
}
+void
+rt_request_export(rtable *t, struct rt_export_request *req)
+{
+ RT_LOCK(t);
+ rtable_private *tab = RT_PRIV(t);
+ rt_lock_table(tab);
+
+ pool *p = rp_new(tab->rp, tab->loop, "Export hook");
+ struct rt_export_hook *hook = req->hook = mb_allocz(p, sizeof(struct rt_export_hook));
+ hook->pool = p;
+
+ hook->req = req;
+ hook->table = t;
+
+ /* stats zeroed by mb_allocz */
+
+ bmap_init(&hook->seq_map, p, 1024);
+
+ rt_set_export_state(hook, TES_HUNGRY);
+
+ hook->n = (node) {};
+ add_tail(&tab->exports, &hook->n);
+
+ DBG("New export hook %p req %p in table %s uc=%u\n", hook, req, tab->name, tab->use_count);
+
+ hook->event = ev_new_init(p, rt_feed_channel, hook);
+ RT_UNLOCK(t);
+
+ rt_send_export_event(hook);
+}
+
+void
+rt_stop_export(struct rt_export_request *req, void (*stopped)(struct rt_export_request *))
+{
+ ASSERT_DIE(req->hook);
+ struct rt_export_hook *hook = req->hook;
+
+ RT_LOCK(hook->table);
+ rtable_private *tab = RT_PRIV(hook->table);
+
+ /* Stop feeding */
+ ev_postpone(hook->event);
+
+ if (atomic_load_explicit(&hook->export_state, memory_order_relaxed) == TES_FEEDING)
+ fit_get(&tab->fib, &hook->feed_fit);
+
+ hook->event->hook = rt_export_stopped;
+ hook->stopped = stopped;
+
+ rt_send_export_event(hook);
+
+ RT_UNLOCK(hook->table);
+
+ rt_set_export_state(hook, TES_STOP);
+}
/**
* rt_refresh_begin - start a refresh cycle
@@ -1630,21 +1853,48 @@ rt_examine(rtable *t, net_addr *a, struct proto *p, const struct filter *filter)
* routes to the routing table (by rte_update()). After that, all protocol
* routes (more precisely routes with @c as @sender) not sent during the
* refresh cycle but still in the table from the past are pruned. This is
- * implemented by marking all related routes as stale by REF_STALE flag in
- * rt_refresh_begin(), then marking all related stale routes with REF_DISCARD
- * flag in rt_refresh_end() and then removing such routes in the prune loop.
- */
+ * implemented by setting rte->stale_cycle to req->stale_set in rte_update()
+ * and then dropping all routes with old stale_cycle values in table prune loop. */
void
-rt_refresh_begin(rtable *t, struct channel *c)
+rt_refresh_begin(struct rt_import_request *req)
{
- FIB_WALK(&t->fib, net, n)
- {
- rte *e;
- for (e = n->routes; e; e = e->next)
- if (e->sender == c)
- e->flags |= REF_STALE;
- }
- FIB_WALK_END;
+ struct rt_import_hook *hook = req->hook;
+ ASSERT_DIE(hook);
+
+ RT_LOCK(hook->table);
+ rtable_private *tab = RT_PRIV(hook->table);
+
+ ASSERT_DIE(hook->stale_set == hook->stale_valid);
+
+ /* If the pruning routine is too slow */
+ if ((hook->stale_pruned < hook->stale_valid) && (hook->stale_pruned + 128 < hook->stale_valid)
+ || (hook->stale_pruned > hook->stale_valid) && (hook->stale_pruned > hook->stale_valid + 128))
+ {
+ log(L_WARN "Route refresh flood in table %s", tab->name);
+ FIB_WALK(&tab->fib, net, n)
+ {
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (e->rte.sender == req->hook)
+ e->rte.stale_cycle = 0;
+ }
+ FIB_WALK_END;
+ hook->stale_set = 1;
+ hook->stale_valid = 0;
+ hook->stale_pruned = 0;
+ }
+ else if (!++hook->stale_set)
+ {
+ /* Let's reserve the stale_cycle zero value for always-invalid routes */
+ hook->stale_set = 1;
+ hook->stale_valid = 0;
+ }
+
+ tab->rr_count++;
+
+ if (req->trace_routes & D_STATES)
+ log(L_TRACE "%s: route refresh begin [%u]", req->name, hook->stale_set);
+
+ RT_UNLOCK(tab);
}
/**
@@ -1656,45 +1906,24 @@ rt_refresh_begin(rtable *t, struct channel *c)
* hook. See rt_refresh_begin() for description of refresh cycles.
*/
void
-rt_refresh_end(rtable *t, struct channel *c)
+rt_refresh_end(struct rt_import_request *req)
{
- int prune = 0;
+ struct rt_import_hook *hook = req->hook;
+ ASSERT_DIE(hook);
- FIB_WALK(&t->fib, net, n)
- {
- rte *e;
- for (e = n->routes; e; e = e->next)
- if ((e->sender == c) && (e->flags & REF_STALE))
- {
- e->flags |= REF_DISCARD;
- prune = 1;
- }
- }
- FIB_WALK_END;
+ rtable_private *tab = RT_LOCK(hook->table);
+ hook->stale_valid++;
+ ASSERT_DIE(hook->stale_set == hook->stale_valid);
- if (prune)
- rt_schedule_prune(t);
-}
+ rt_schedule_prune(tab);
-void
-rt_modify_stale(rtable *t, struct channel *c)
-{
- int prune = 0;
+ if (req->trace_routes & D_STATES)
+ log(L_TRACE "%s: route refresh end [%u]", req->name, hook->stale_valid);
- FIB_WALK(&t->fib, net, n)
- {
- rte *e;
- for (e = n->routes; e; e = e->next)
- if ((e->sender == c) && (e->flags & REF_STALE) && !(e->flags & REF_FILTERED))
- {
- e->flags |= REF_MODIFY;
- prune = 1;
- }
- }
- FIB_WALK_END;
+ if (!--tab->rr_count)
+ rt_schedule_notify(tab);
- if (prune)
- rt_schedule_prune(t);
+ RT_UNLOCK(tab);
}
/**
@@ -1704,14 +1933,11 @@ rt_modify_stale(rtable *t, struct channel *c)
* This functions dumps contents of a &rte to debug output.
*/
void
-rte_dump(rte *e)
-{
- net *n = e->net;
- debug("%-1N ", n->n.addr);
- debug("PF=%02x pref=%d ", e->pflags, e->pref);
- rta_dump(e->attrs);
- if (e->attrs->src->proto->proto->dump_attrs)
- e->attrs->src->proto->proto->dump_attrs(e);
+rte_dump(struct rte_storage *e)
+{
+ debug("%-1N ", e->rte.net);
+ debug("PF=%02x ", e->rte.pflags);
+ rta_dump(e->rte.attrs);
debug("\n");
}
@@ -1722,20 +1948,22 @@ rte_dump(rte *e)
* This function dumps contents of a given routing table to debug output.
*/
void
-rt_dump(rtable *t)
+rt_dump(rtable *tab)
{
- debug("Dump of routing table <%s>\n", t->name);
+ RT_LOCK(tab);
+ rtable_private *t = RT_PRIV(tab);
+ debug("Dump of routing table <%s>%s\n", t->name, t->delete ? " (deleted)" : "");
#ifdef DEBUGGING
fib_check(&t->fib);
#endif
FIB_WALK(&t->fib, net, n)
{
- rte *e;
- for(e=n->routes; e; e=e->next)
+ for(struct rte_storage *e=n->routes; e; e=e->next)
rte_dump(e);
}
FIB_WALK_END;
debug("\n");
+ RT_UNLOCK(tab);
}
/**
@@ -1753,73 +1981,121 @@ rt_dump_all(void)
rt_dump(t);
}
-static inline void
-rt_schedule_hcu(rtable *tab)
+void
+rt_dump_hooks(rtable *t)
+{
+ RT_LOCK(t);
+ rtable_private *tab = RT_PRIV(t);
+ debug("Dump of hooks in routing table <%s>%s\n", tab->name, tab->delete ? " (deleted)" : "");
+ debug(" nhu_state=%u hcu_scheduled=%u use_count=%d rt_count=%u\n",
+ atomic_load(&tab->nhu_state), ev_active(tab->hcu_event), tab->use_count, tab->rt_count);
+ debug(" last_rt_change=%t gc_time=%t gc_counter=%d prune_state=%u\n",
+ tab->last_rt_change, tab->gc_time, tab->gc_counter, tab->prune_state);
+
+ struct rt_import_hook *ih;
+ WALK_LIST(ih, tab->imports)
+ {
+ ih->req->dump_req(ih->req);
+ debug(" Import hook %p requested by %p: pref=%u"
+ " last_state_change=%t import_state=%u stopped=%p\n",
+ ih, ih->req, ih->stats.pref,
+ ih->last_state_change, ih->import_state, ih->stopped);
+ }
+
+ struct rt_export_hook *eh;
+ WALK_LIST(eh, tab->exports)
+ {
+ eh->req->dump_req(eh->req);
+ debug(" Export hook %p requested by %p:"
+ " refeed_pending=%u last_state_change=%t export_state=%u\n",
+ eh, eh->req, eh->refeed_pending, eh->last_state_change, atomic_load_explicit(&eh->export_state, memory_order_relaxed));
+ }
+ debug("\n");
+ RT_UNLOCK(t);
+}
+
+void
+rt_dump_hooks_all(void)
{
- if (tab->hcu_scheduled)
- return;
+ rtable *t;
+ node *n;
- tab->hcu_scheduled = 1;
- ev_schedule(tab->rt_event);
+ debug("Dump of all table hooks\n");
+
+ WALK_LIST2(t, n, routing_tables, n)
+ rt_dump_hooks(t);
}
static inline void
rt_schedule_nhu(rtable *tab)
{
- if (tab->nhu_state == NHU_CLEAN)
- ev_schedule(tab->rt_event);
+ atomic_fetch_or_explicit(&tab->nhu_state, NHU_SCHEDULED, memory_order_acq_rel);
+ ev_send_loop(tab->loop, tab->nhu_event);
/* state change:
* NHU_CLEAN -> NHU_SCHEDULED
* NHU_RUNNING -> NHU_DIRTY
*/
- tab->nhu_state |= NHU_SCHEDULED;
}
void
-rt_schedule_prune(rtable *tab)
+rt_schedule_prune(rtable_private *tab)
{
if (tab->prune_state == 0)
- ev_schedule(tab->rt_event);
+ ev_send_loop(tab->loop, tab->prune_event);
/* state change 0->1, 2->3 */
tab->prune_state |= 1;
}
+static int
+rt_fast_prune_ready(rtable_private *tab)
+{
+ return EMPTY_LIST(tab->pending_exports) && EMPTY_LIST(tab->exports) && !tab->imports_up;
+}
static void
-rt_event(void *ptr)
+rt_fast_prune_check(rtable_private *tab)
{
- rtable *tab = ptr;
-
- rt_lock_table(tab);
-
- if (tab->hcu_scheduled)
- rt_update_hostcache(tab);
-
- if (tab->nhu_state)
- rt_next_hop_update(tab);
+ if (tab->delete && rt_fast_prune_ready(tab))
+ {
+ tab->prune_state |= 1;
+ ev_send_loop(tab->loop, tab->prune_event);
+ }
+}
- if (tab->prune_state)
- rt_prune_table(tab);
+void
+rt_export_used(rtable_private *tab)
+{
+ if (config->table_debug)
+ log(L_TRACE "%s: Export cleanup requested", tab->name);
- rt_unlock_table(tab);
+ ev_send_loop(tab->loop, tab->ec_event);
}
-
static inline btime
-rt_settled_time(rtable *tab)
+rt_settled_time(rtable_private *tab)
{
ASSUME(tab->base_settle_time != 0);
- return MIN(tab->last_rt_change + tab->config->min_settle_time,
- tab->base_settle_time + tab->config->max_settle_time);
+ btime min_settle_time = tab->rr_count ? tab->config->min_rr_settle_time : tab->config->min_settle_time;
+ btime max_settle_time = tab->rr_count ? tab->config->max_rr_settle_time : tab->config->max_settle_time;
+
+ DBG("settled time computed from %t %t %t %t as %t / %t, now is %t\n",
+ tab->name, tab->last_rt_change, min_settle_time,
+ tab->base_settle_time, max_settle_time,
+ tab->last_rt_change + min_settle_time,
+ tab->base_settle_time + max_settle_time, current_time());
+
+ return MIN(tab->last_rt_change + min_settle_time,
+ tab->base_settle_time + max_settle_time);
}
static void
rt_settle_timer(timer *t)
{
- rtable *tab = t->data;
+ rtable_private *tab = t->data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
if (!tab->base_settle_time)
return;
@@ -1827,7 +2103,7 @@ rt_settle_timer(timer *t)
btime settled_time = rt_settled_time(tab);
if (current_time() < settled_time)
{
- tm_set(tab->settle_timer, settled_time);
+ tm_set_in(tab->settle_timer, settled_time, tab->loop);
return;
}
@@ -1836,11 +2112,11 @@ rt_settle_timer(timer *t)
struct rt_subscription *s;
WALK_LIST(s, tab->subscribers)
- s->hook(s);
+ ev_send(s->event->list, s->event);
}
static void
-rt_kick_settle_timer(rtable *tab)
+rt_kick_settle_timer(rtable_private *tab)
{
tab->base_settle_time = current_time();
@@ -1848,11 +2124,11 @@ rt_kick_settle_timer(rtable *tab)
tab->settle_timer = tm_new_init(tab->rp, rt_settle_timer, tab, 0, 0);
if (!tm_active(tab->settle_timer))
- tm_set(tab->settle_timer, rt_settled_time(tab));
+ tm_set_in(tab->settle_timer, rt_settled_time(tab), tab->loop);
}
static inline void
-rt_schedule_notify(rtable *tab)
+rt_schedule_notify(rtable_private *tab)
{
if (EMPTY_LIST(tab->subscribers))
return;
@@ -1864,33 +2140,40 @@ rt_schedule_notify(rtable *tab)
}
void
-rt_subscribe(rtable *tab, struct rt_subscription *s)
+rt_subscribe(rtable *t, struct rt_subscription *s)
{
- s->tab = tab;
- rt_lock_table(tab);
- add_tail(&tab->subscribers, &s->n);
+ s->tab = t;
+ RT_LOCKED(t, tab)
+ {
+ rt_lock_table(tab);
+ DBG("rt_subscribe(%s)\n", tab->name);
+ add_tail(&tab->subscribers, &s->n);
+ }
}
void
rt_unsubscribe(struct rt_subscription *s)
{
- rem_node(&s->n);
- rt_unlock_table(s->tab);
+ RT_LOCKED(s->tab, tab)
+ {
+ rem_node(&s->n);
+ if (EMPTY_LIST(tab->subscribers) && tm_active(tab->settle_timer))
+ tm_stop(tab->settle_timer);
+ rt_unlock_table(tab);
+ }
}
static void
rt_free(resource *_r)
{
- rtable *r = (rtable *) _r;
+ rtable_private *r = (rtable_private *) _r;
DBG("Deleting routing table %s\n", r->name);
ASSERT_DIE(r->use_count == 0);
-
- if (r->internal)
- return;
-
- r->config->table = NULL;
- rem_node(&r->n);
+ ASSERT_DIE(r->rt_count == 0);
+ ASSERT_DIE(!r->cork_active);
+ ASSERT_DIE(EMPTY_LIST(r->imports));
+ ASSERT_DIE(EMPTY_LIST(r->exports));
if (r->hostcache)
rt_free_hostcache(r);
@@ -1907,14 +2190,14 @@ rt_free(resource *_r)
static void
rt_res_dump(resource *_r)
{
- rtable *r = (rtable *) _r;
+ rtable_private *r = RT_PRIV((rtable *) _r);
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
}
static struct resclass rt_class = {
.name = "Routing table",
- .size = sizeof(struct rtable),
+ .size = sizeof(rtable_private),
.free = rt_free,
.dump = rt_res_dump,
.lookup = NULL,
@@ -1928,11 +2211,16 @@ rt_setup(pool *pp, struct rtable_config *cf)
void *nb = mb_alloc(pp, ns);
ASSERT_DIE(ns - 1 == bsnprintf(nb, ns, "Routing table %s", cf->name));
- pool *p = rp_new(pp, nb);
- mb_move(nb, p);
+ struct birdloop *l = birdloop_new(pp, DOMAIN_ORDER(rtable), nb);
+ pool *p = birdloop_pool(l);
+
+ birdloop_enter(l);
- rtable *t = ralloc(p, &rt_class);
+ rtable_private *t = ralloc(p, &rt_class);
t->rp = p;
+ t->loop = l;
+
+ t->rte_slab = sl_new(p, sizeof(struct rte_storage));
t->name = cf->name;
t->config = cf;
@@ -1940,19 +2228,35 @@ rt_setup(pool *pp, struct rtable_config *cf)
fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL);
- if (!(t->internal = cf->internal))
- {
- init_list(&t->channels);
- hmap_init(&t->id_map, p, 1024);
- hmap_set(&t->id_map, 0);
+ init_list(&t->imports);
+ init_list(&t->exports);
- init_list(&t->subscribers);
+ hmap_init(&t->id_map, p, 1024);
+ hmap_set(&t->id_map, 0);
- t->rt_event = ev_new_init(p, rt_event, t);
- t->last_rt_change = t->gc_time = current_time();
- }
+ init_list(&t->pending_exports);
+ init_list(&t->subscribers);
+
+ t->announce_event = ev_new_init(p, rt_announce_exports, t);
+ t->ec_event = ev_new_init(p, rt_export_cleanup, t);
+ t->prune_event = ev_new_init(p, rt_prune_table, t);
+ t->hcu_event = ev_new_init(p, rt_update_hostcache, t);
+ t->nhu_event = ev_new_init(p, rt_next_hop_update, t);
+
+ t->nhu_event->cork = &rt_cork;
+ t->prune_event->cork = &rt_cork;
+
+ t->last_rt_change = t->gc_time = current_time();
+ t->next_export_seq = 1;
- return t;
+ t->rl_pipe = (struct tbf) TBF_DEFAULT_LOG_LIMITS;
+
+ t->nhu_lp = lp_new_default(p);
+
+ mb_move(nb, p);
+ birdloop_leave(l);
+
+ return (rtable *) t;
}
/**
@@ -1965,13 +2269,11 @@ void
rt_init(void)
{
rta_init();
- rt_table_pool = rp_new(&root_pool, "Routing tables");
- rte_update_pool = lp_new_default(rt_table_pool);
- rte_slab = sl_new(rt_table_pool, sizeof(rte));
+ rt_table_pool = rp_new(&root_pool, &main_birdloop, "Routing tables");
init_list(&routing_tables);
+ ev_init_cork(&rt_cork, "Route Table Cork");
}
-
/**
* rt_prune_table - prune a routing table
*
@@ -1987,12 +2289,15 @@ rt_init(void)
* iteration.
*/
static void
-rt_prune_table(rtable *tab)
+rt_prune_table(void *data)
{
+ rtable_private *tab = data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
+
struct fib_iterator *fit = &tab->prune_fit;
- int limit = 512;
+ int limit = tab->delete ? 16384 : 512;
- struct channel *c;
+ struct rt_import_hook *ih;
node *n, *x;
DBG("Pruning route table %s\n", tab->name);
@@ -2003,12 +2308,24 @@ rt_prune_table(rtable *tab)
if (tab->prune_state == 0)
return;
+ if (tab->delete && !rt_fast_prune_ready(tab))
+ return;
+
+ rt_lock_table(tab);
+
if (tab->prune_state == 1)
{
/* Mark channels to flush */
- WALK_LIST2(c, n, tab->channels, table_node)
- if (c->channel_state == CS_FLUSHING)
- c->flush_active = 1;
+ WALK_LIST2(ih, n, tab->imports, n)
+ if (ih->import_state == TIS_STOP)
+ rt_set_import_state(ih, TIS_FLUSHING);
+ else if ((ih->stale_valid != ih->stale_pruning) && (ih->stale_pruning == ih->stale_pruned))
+ {
+ ih->stale_pruning = ih->stale_valid;
+
+ if (ih->req->trace_routes & D_STATES)
+ log(L_TRACE "%s: table prune after refresh begin [%u]", ih->req->name, ih->stale_pruning);
+ }
FIB_ITERATE_INIT(fit, &tab->fib);
tab->prune_state = 2;
@@ -2017,43 +2334,53 @@ rt_prune_table(rtable *tab)
again:
FIB_ITERATE_START(&tab->fib, fit, net, n)
{
- rte *e;
-
- rescan:
- for (e=n->routes; e; e=e->next)
+ if (tab->delete)
{
- if (e->sender->flush_active || (e->flags & REF_DISCARD))
- {
- if (limit <= 0)
- {
- FIB_ITERATE_PUT(fit);
- ev_schedule(tab->rt_event);
- return;
- }
+ ASSERT_DIE(!n->first);
- rte_discard(e);
- limit--;
+ for (struct rte_storage *e = n->routes, *next; e; e = next)
+ {
+ next = e->next;
- goto rescan;
- }
+ struct rt_import_request *req = e->rte.sender->req;
+ if (req->preimport)
+ req->preimport(req, NULL, &e->rte);
- if (e->flags & REF_MODIFY)
+ tab->rt_count--;
+ hmap_clear(&tab->id_map, e->rte.id);
+ rte_free(e, tab);
+ limit--;
+ }
+
+ n->routes = NULL;
+ }
+ else
+ rescan:
+ for (struct rte_storage *e=n->routes; e; e=e->next)
+ {
+ struct rt_import_hook *s = e->rte.sender;
+
+ if ((s->import_state == TIS_FLUSHING) ||
+ (e->rte.stale_cycle < s->stale_valid) ||
+ (e->rte.stale_cycle > s->stale_set))
{
if (limit <= 0)
{
FIB_ITERATE_PUT(fit);
- ev_schedule(tab->rt_event);
+ ev_send_loop(tab->loop, tab->prune_event);
+ ev_send_loop(tab->loop, tab->announce_event);
+ rt_unlock_table(tab);
return;
}
- rte_modify(e);
+ rte_recalculate(tab, e->rte.sender, n, NULL, e->rte.src);
limit--;
goto rescan;
}
}
- if (!n->routes) /* Orphaned FIB entry */
+ if (!n->routes && !n->first) /* Orphaned FIB entry */
{
FIB_ITERATE_PUT(fit);
fib_delete(&tab->fib, n);
@@ -2070,23 +2397,205 @@ again:
tab->gc_time = current_time();
/* state change 2->0, 3->1 */
- tab->prune_state &= 1;
-
- if (tab->prune_state > 0)
- ev_schedule(tab->rt_event);
+ if (tab->prune_state &= 1)
+ ev_send_loop(tab->loop, tab->prune_event);
- /* FIXME: This should be handled in a better way */
- rt_prune_sources();
+ uint flushed_channels = 0;
/* Close flushed channels */
- WALK_LIST2_DELSAFE(c, n, x, tab->channels, table_node)
- if (c->flush_active)
+ WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
+ if (ih->import_state == TIS_FLUSHING)
+ {
+ ih->flush_seq = tab->next_export_seq;
+ rt_set_import_state(ih, TIS_WAITING);
+ flushed_channels++;
+ }
+ else if (ih->stale_pruning != ih->stale_pruned)
+ {
+ ih->stale_pruned = ih->stale_pruning;
+
+ if (ih->req->trace_routes & D_STATES)
+ log(L_TRACE "%s: table prune after refresh end [%u]", ih->req->name, ih->stale_pruned);
+ }
+
+ /* In some cases, we may want to directly proceed to export cleanup */
+ if (EMPTY_LIST(tab->exports) && flushed_channels)
+ rt_export_cleanup(tab);
+
+ ev_send_loop(tab->loop, tab->announce_event);
+ rt_unlock_table(tab);
+}
+
+static void
+rt_export_cleanup(void *data)
+{
+ rtable_private *tab = data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
+
+ u64 min_seq = ~((u64) 0);
+ struct rt_pending_export *last_export_to_free = NULL;
+ struct rt_pending_export *first_export = tab->first_export;
+
+ struct rt_export_hook *eh;
+ node *n;
+ WALK_LIST2(eh, n, tab->exports, n)
+ {
+ switch (atomic_load_explicit(&eh->export_state, memory_order_acquire))
+ {
+ case TES_DOWN:
+ case TES_HUNGRY:
+ continue;
+
+ case TES_READY:
+ {
+ struct rt_pending_export *last = atomic_load_explicit(&eh->last_export, memory_order_acquire);
+ if (!last)
+ /* No last export means that the channel has exported nothing since last cleanup */
+ goto done;
+
+ else if (min_seq > last->seq)
+ {
+ min_seq = last->seq;
+ last_export_to_free = last;
+ }
+ continue;
+ }
+
+ default:
+ /* It's only safe to cleanup when the export state is idle or regular. No feeding or stopping allowed. */
+ goto done;
+ }
+ }
+
+ tab->first_export = last_export_to_free ? rt_next_export_fast(last_export_to_free) : NULL;
+
+ if (config->table_debug)
+ log(L_TRACE "%s: Export cleanup, old first_export seq %lu, new %lu, min_seq %ld",
+ tab->name,
+ first_export ? first_export->seq : 0,
+ tab->first_export ? tab->first_export->seq : 0,
+ min_seq);
+
+ WALK_LIST2(eh, n, tab->exports, n)
+ {
+ if (atomic_load_explicit(&eh->export_state, memory_order_acquire) != TES_READY)
+ continue;
+
+ struct rt_pending_export *last = atomic_load_explicit(&eh->last_export, memory_order_acquire);
+ if (last == last_export_to_free)
+ {
+ /* This may fail when the channel managed to export more inbetween. This is OK. */
+ atomic_compare_exchange_strong_explicit(
+ &eh->last_export, &last, NULL,
+ memory_order_release,
+ memory_order_relaxed);
+
+ DBG("store hook=%p last_export=NULL\n", eh);
+ }
+ }
+
+ while (first_export && (first_export->seq <= min_seq))
+ {
+ ASSERT_DIE(first_export->new || first_export->old);
+
+ const net_addr *n = first_export->new ?
+ first_export->new->rte.net :
+ first_export->old->rte.net;
+ net *net = SKIP_BACK(struct network, n.addr, (net_addr (*)[0]) n);
+
+ ASSERT_DIE(net->first == first_export);
+
+ if (first_export == net->last)
+ /* The only export here */
+ net->last = net->first = NULL;
+ else
+ /* First is now the next one */
+ net->first = atomic_load_explicit(&first_export->next, memory_order_relaxed);
+
+ /* For now, the old route may be finally freed */
+ if (first_export->old)
+ {
+ rt_rte_trace_in(D_ROUTES, first_export->old->rte.sender->req, &first_export->old->rte, "freed");
+ hmap_clear(&tab->id_map, first_export->old->rte.id);
+ rte_free(first_export->old, tab);
+ }
+
+#ifdef LOCAL_DEBUG
+ memset(first_export, 0xbd, sizeof(struct rt_pending_export));
+#endif
+
+ struct rt_export_block *reb = HEAD(tab->pending_exports);
+ ASSERT_DIE(reb == PAGE_HEAD(first_export));
+
+ u32 pos = (first_export - &reb->export[0]);
+ u32 end = atomic_load_explicit(&reb->end, memory_order_relaxed);
+ ASSERT_DIE(pos < end);
+
+ struct rt_pending_export *next = NULL;
+
+ if (++pos < end)
+ next = &reb->export[pos];
+ else
+ {
+ rem_node(&reb->n);
+
+#ifdef LOCAL_DEBUG
+ memset(reb, 0xbe, page_size);
+#endif
+
+ free_page(reb);
+
+ if (EMPTY_LIST(tab->pending_exports))
+ {
+ if (config->table_debug)
+ log(L_TRACE "%s: Resetting export seq", tab->name);
+
+ node *n;
+ WALK_LIST2(eh, n, tab->exports, n)
+ {
+ if (atomic_load_explicit(&eh->export_state, memory_order_acquire) != TES_READY)
+ continue;
+
+ ASSERT_DIE(atomic_load_explicit(&eh->last_export, memory_order_acquire) == NULL);
+ bmap_reset(&eh->seq_map, 1024);
+ }
+
+ tab->next_export_seq = 1;
+ }
+ else
{
- c->flush_active = 0;
- channel_set_state(c, CS_DOWN);
+ reb = HEAD(tab->pending_exports);
+ next = &reb->export[0];
}
+ }
- return;
+ first_export = next;
+ }
+
+done:;
+ struct rt_import_hook *ih; node *x;
+ WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
+ if (ih->import_state == TIS_WAITING)
+ if (!first_export || (first_export->seq >= ih->flush_seq))
+ {
+ ih->import_state = TIS_CLEARED;
+ ev_send(ih->req->list, ih->export_announce_event);
+ }
+
+ if (EMPTY_LIST(tab->pending_exports) && ev_active(tab->announce_event))
+ ev_postpone(tab->announce_event);
+
+ /* If reduced to at most one export block pending */
+ if (tab->cork_active &&
+ ((!tab->first_export) || (tab->first_export->seq + 128 > tab->next_export_seq)))
+ {
+ tab->cork_active = 0;
+ ev_uncork(&rt_cork);
+ if (config->table_debug)
+ log(L_TRACE "%s: cork released", tab->name);
+ }
+
+ rt_fast_prune_check(tab);
}
void
@@ -2120,7 +2629,7 @@ rta_next_hop_outdated(rta *a)
}
void
-rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls)
+rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls, linpool *lp)
{
a->hostentry = he;
a->dest = he->dest;
@@ -2155,7 +2664,7 @@ no_nexthop:
else
{
nhr = nhp;
- nhp = (nhp ? (nhp->next = lp_alloc(rte_update_pool, NEXTHOP_MAX_SIZE)) : &(a->nh));
+ nhp = (nhp ? (nhp->next = lp_alloc(lp, NEXTHOP_MAX_SIZE)) : &(a->nh));
}
memset(nhp, 0, NEXTHOP_MAX_SIZE);
@@ -2210,8 +2719,8 @@ no_nexthop:
}
}
-static inline rte *
-rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
+static inline struct rte_storage *
+rt_next_hop_update_rte(rtable_private *tab, net *n, rte *old)
{
rta *a = alloca(RTA_MAX_SIZE);
memcpy(a, old->attrs, rta_size(old->attrs));
@@ -2219,58 +2728,71 @@ rt_next_hop_update_rte(rtable *tab UNUSED, rte *old)
mpls_label_stack mls = { .len = a->nh.labels_orig };
memcpy(mls.stack, &a->nh.label[a->nh.labels - mls.len], mls.len * sizeof(u32));
- rta_apply_hostentry(a, old->attrs->hostentry, &mls);
- a->aflags = 0;
+ rta_apply_hostentry(a, old->attrs->hostentry, &mls, tab->nhu_lp);
+ a->cached = 0;
- rte *e = sl_alloc(rte_slab);
- memcpy(e, old, sizeof(rte));
- e->attrs = rta_lookup(a);
+ rte e0 = *old;
+ e0.attrs = rta_lookup(a);
- return e;
+ return rte_store(&e0, n, tab);
}
static inline int
-rt_next_hop_update_net(rtable *tab, net *n)
+rt_next_hop_update_net(rtable_private *tab, net *n)
{
- rte **k, *e, *new, *old_best, **new_best;
+ struct rte_storage *new;
int count = 0;
- int free_old_best = 0;
- old_best = n->routes;
+ struct rte_storage *old_best = n->routes;
if (!old_best)
return 0;
- for (k = &n->routes; e = *k; k = &e->next)
- if (rta_next_hop_outdated(e->attrs))
- {
- new = rt_next_hop_update_rte(tab, e);
- *k = new;
+ for (struct rte_storage *e, **k = &n->routes; e = *k; k = &e->next)
+ if (rta_next_hop_outdated(e->rte.attrs))
+ count++;
+
+ if (!count)
+ return 0;
- rte_trace_in(D_ROUTES, new->sender, new, "updated");
- rte_announce_i(tab, RA_ANY, n, new, e, NULL, NULL);
+ struct rte_multiupdate {
+ struct rte_storage *old, *new;
+ } *updates = alloca(sizeof(struct rte_multiupdate) * count);
+
+ int pos = 0;
+ for (struct rte_storage *e, **k = &n->routes; e = *k; k = &e->next)
+ if (rta_next_hop_outdated(e->rte.attrs))
+ {
+ struct rte_storage *new = rt_next_hop_update_rte(tab, n, &e->rte);
/* Call a pre-comparison hook */
/* Not really an efficient way to compute this */
- if (e->attrs->src->proto->rte_recalculate)
- e->attrs->src->proto->rte_recalculate(tab, n, new, e, NULL);
+ if (e->rte.src->owner->rte_recalculate)
+ e->rte.src->owner->rte_recalculate(tab, n, &new->rte, &e->rte, &old_best->rte);
- if (e != old_best)
- rte_free_quick(e);
- else /* Freeing of the old best rte is postponed */
- free_old_best = 1;
+ updates[pos++] = (struct rte_multiupdate) {
+ .old = e,
+ .new = new,
+ };
- e = new;
- count++;
+ /* Replace the route in the list */
+ new->next = e->next;
+ *k = e = new;
+
+ /* Get a new ID for the route */
+ new->rte.lastmod = current_time();
+ new->rte.id = hmap_first_zero(&tab->id_map);
+ hmap_set(&tab->id_map, new->rte.id);
+
+ lp_flush(tab->nhu_lp);
}
- if (!count)
- return 0;
+ ASSERT_DIE(pos == count);
/* Find the new best route */
- new_best = NULL;
- for (k = &n->routes; e = *k; k = &e->next)
+ struct rte_storage **new_best = NULL;
+ for (struct rte_storage *e, **k = &n->routes; e = *k; k = &e->next)
{
- if (!new_best || rte_better(e, *new_best))
+ if (!new_best || rte_better(&e->rte, &(*new_best)->rte))
new_best = k;
}
@@ -2283,32 +2805,39 @@ rt_next_hop_update_net(rtable *tab, net *n)
n->routes = new;
}
- /* Announce the new best route */
- if (new != old_best)
- rte_trace_in(D_ROUTES, new->sender, new, "updated [best]");
-
- /* Propagate changes */
- rte_announce_i(tab, RA_UNDEF, n, NULL, NULL, n->routes, old_best);
-
- if (free_old_best)
- rte_free_quick(old_best);
+ /* Announce the changes */
+ for (int i=0; i<count; i++)
+ {
+ _Bool nb = (new == updates[i].new), ob = (old_best == updates[i].old);
+ const char *best_indicator[2][2] = {
+ { "autoupdated", "autoupdated [-best]" },
+ { "autoupdated [+best]", "autoupdated [best]" }
+ };
+ rt_rte_trace_in(D_ROUTES, updates[i].new->rte.sender->req, &updates[i].new->rte, best_indicator[nb][ob]);
+ rte_announce(tab, n, updates[i].new, updates[i].old, new, old_best);
+ }
return count;
}
static void
-rt_next_hop_update(rtable *tab)
+rt_next_hop_update(void *data)
{
+ rtable_private *tab = data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
+
struct fib_iterator *fit = &tab->nhu_fit;
int max_feed = 32;
- if (tab->nhu_state == NHU_CLEAN)
+ if (atomic_load_explicit(&tab->nhu_state, memory_order_acquire) == NHU_CLEAN)
return;
- if (tab->nhu_state == NHU_SCHEDULED)
+ rt_lock_table(tab);
+
+ if (atomic_load_explicit(&tab->nhu_state, memory_order_acquire) == NHU_SCHEDULED)
{
FIB_ITERATE_INIT(fit, &tab->fib);
- tab->nhu_state = NHU_RUNNING;
+ ASSERT_DIE(atomic_exchange_explicit(&tab->nhu_state, NHU_RUNNING, memory_order_acq_rel) == NHU_SCHEDULED);
}
FIB_ITERATE_START(&tab->fib, fit, net, n)
@@ -2316,7 +2845,8 @@ rt_next_hop_update(rtable *tab)
if (max_feed <= 0)
{
FIB_ITERATE_PUT(fit);
- ev_schedule(tab->rt_event);
+ ev_send_loop(tab->loop, tab->nhu_event);
+ rt_unlock_table(tab);
return;
}
max_feed -= rt_next_hop_update_net(tab, n);
@@ -2327,10 +2857,12 @@ rt_next_hop_update(rtable *tab)
* NHU_DIRTY -> NHU_SCHEDULED
* NHU_RUNNING -> NHU_CLEAN
*/
- tab->nhu_state &= 1;
+ if (atomic_fetch_and_explicit(&tab->nhu_state, NHU_SCHEDULED, memory_order_acq_rel) != NHU_RUNNING)
+ ev_send_loop(tab->loop, tab->nhu_event);
+
+ ev_send_loop(tab->loop, tab->announce_event);
- if (tab->nhu_state != NHU_CLEAN)
- ev_schedule(tab->rt_event);
+ rt_unlock_table(tab);
}
@@ -2352,6 +2884,10 @@ rt_new_table(struct symbol *s, uint addr_type)
c->gc_min_time = 5;
c->min_settle_time = 1 S;
c->max_settle_time = 20 S;
+ c->min_rr_settle_time = 30 S;
+ c->max_rr_settle_time = 90 S;
+ c->cork_limit = 4 * page_size / sizeof(struct rt_pending_export);
+ c->owner = new_config;
add_tail(&new_config->tables, &c->n);
@@ -2371,7 +2907,7 @@ rt_new_table(struct symbol *s, uint addr_type)
* configuration.
*/
void
-rt_lock_table(rtable *r)
+rt_lock_table(rtable_private *r)
{
r->use_count++;
}
@@ -2385,16 +2921,11 @@ rt_lock_table(rtable *r)
* for deletion by configuration changes.
*/
void
-rt_unlock_table(rtable *r)
+rt_unlock_table(rtable_private *r)
{
- if (!--r->use_count && r->deleted)
- {
- struct config *conf = r->deleted;
-
- /* Delete the routing table by freeing its pool */
- rt_shutdown(r);
- config_del_obstacle(conf);
- }
+ if (!--r->use_count && r->delete &&
+ !r->prune_state && !atomic_load_explicit(&r->nhu_state, memory_order_acquire))
+ birdloop_stop_self(r->loop, r->delete, r);
}
static struct rtable_config *
@@ -2404,6 +2935,21 @@ rt_find_table_config(struct config *cf, char *name)
return (sym && (sym->class == SYM_TABLE)) ? sym->table : NULL;
}
+static void
+rt_done(void *data)
+{
+ RT_LOCKED((rtable *) data, t)
+ {
+ struct rtable_config *tc = t->config;
+ struct config *c = tc->owner;
+
+ tc->table = NULL;
+ rem_node(&t->n);
+
+ config_del_obstacle(c);
+ }
+}
+
/**
* rt_commit - commit new routing table configuration
* @new: new configuration
@@ -2426,14 +2972,15 @@ rt_commit(struct config *new, struct config *old)
{
WALK_LIST(o, old->tables)
{
- rtable *ot = o->table;
- if (!ot->deleted)
+ RT_LOCK(o->table);
+ rtable_private *ot = RT_PRIV(o->table);
+ if (!ot->delete)
{
r = rt_find_table_config(new, o->name);
if (r && (r->addr_type == o->addr_type) && !new->shutdown)
{
DBG("\t%s: same\n", o->name);
- r->table = ot;
+ r->table = (rtable *) ot;
ot->name = r->name;
ot->config = r;
if (o->sorted != r->sorted)
@@ -2442,12 +2989,13 @@ rt_commit(struct config *new, struct config *old)
else
{
DBG("\t%s: deleted\n", o->name);
- ot->deleted = old;
- config_add_obstacle(old);
rt_lock_table(ot);
+ ot->delete = rt_done;
+ config_add_obstacle(old);
rt_unlock_table(ot);
}
}
+ RT_UNLOCK(o->table);
}
}
@@ -2461,19 +3009,6 @@ rt_commit(struct config *new, struct config *old)
DBG("\tdone\n");
}
-static inline void
-do_feed_channel(struct channel *c, net *n, rte *e)
-{
- rte_update_lock();
- if (c->ra_mode == RA_ACCEPTED)
- rt_notify_accepted(c, n, NULL, NULL, c->refeeding);
- else if (c->ra_mode == RA_MERGED)
- rt_notify_merged(c, n, NULL, NULL, e, e, c->refeeding);
- else /* RA_BASIC */
- rt_notify_basic(c, n, e, e, c->refeeding);
- rte_update_unlock();
-}
-
/**
* rt_feed_channel - advertise all routes to a channel
* @c: channel to be fed
@@ -2483,370 +3018,120 @@ do_feed_channel(struct channel *c, net *n, rte *e)
* has something to do. (We avoid transferring all the routes in single pass in
* order not to monopolize CPU time.)
*/
-int
-rt_feed_channel(struct channel *c)
+static void
+rt_feed_channel(void *data)
{
+ struct rt_export_hook *c = data;
+
struct fib_iterator *fit = &c->feed_fit;
int max_feed = 256;
- ASSERT(c->export_state == ES_FEEDING);
-
- if (!c->feed_active)
- {
- FIB_ITERATE_INIT(fit, &c->table->fib);
- c->feed_active = 1;
- }
-
- FIB_ITERATE_START(&c->table->fib, fit, net, n)
- {
- rte *e = n->routes;
- if (max_feed <= 0)
- {
- FIB_ITERATE_PUT(fit);
- return 0;
- }
-
- if ((c->ra_mode == RA_OPTIMAL) ||
- (c->ra_mode == RA_ACCEPTED) ||
- (c->ra_mode == RA_MERGED))
- if (rte_is_valid(e))
- {
- /* In the meantime, the protocol may fell down */
- if (c->export_state != ES_FEEDING)
- goto done;
-
- do_feed_channel(c, n, e);
- max_feed--;
- }
-
- if (c->ra_mode == RA_ANY)
- for(e = n->routes; e; e = e->next)
- {
- /* In the meantime, the protocol may fell down */
- if (c->export_state != ES_FEEDING)
- goto done;
-
- if (!rte_is_valid(e))
- continue;
-
- do_feed_channel(c, n, e);
- max_feed--;
- }
- }
- FIB_ITERATE_END;
-
-done:
- c->feed_active = 0;
- return 1;
-}
-
-/**
- * rt_feed_baby_abort - abort protocol feeding
- * @c: channel
- *
- * This function is called by the protocol code when the protocol stops or
- * ceases to exist during the feeding.
- */
-void
-rt_feed_channel_abort(struct channel *c)
-{
- if (c->feed_active)
- {
- /* Unlink the iterator */
- fit_get(&c->table->fib, &c->feed_fit);
- c->feed_active = 0;
- }
-}
-
-
-/*
- * Import table
- */
-
-int
-rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
-{
- struct rtable *tab = c->in_table;
- rte *old, **pos;
- net *net;
-
- if (new)
+ rtable_private *tab;
+ if (c->export_state == TES_HUNGRY)
{
- net = net_get(tab, n);
+ rt_set_export_state(c, TES_FEEDING);
- if (!new->pref)
- new->pref = c->preference;
+ tab = RT_LOCK(c->table);
- if (!rta_is_cached(new->attrs))
- new->attrs = rta_lookup(new->attrs);
+ struct rt_pending_export *rpe = rt_last_export(tab);
+ DBG("store hook=%p last_export=%p seq=%lu\n", c, rpe, rpe ? rpe->seq : 0);
+ atomic_store_explicit(&c->last_export, rpe, memory_order_relaxed);
+
+ FIB_ITERATE_INIT(&c->feed_fit, &tab->fib);
}
else
- {
- net = net_find(tab, n);
+ tab = RT_LOCK(c->table);
- if (!net)
- goto drop_withdraw;
- }
+ ASSERT_DIE(c->export_state == TES_FEEDING);
- /* Find the old rte */
- for (pos = &net->routes; old = *pos; pos = &old->next)
- if (old->attrs->src == src)
+redo:
+ FIB_ITERATE_START(&tab->fib, fit, net, n)
{
- if (new && rte_same(old, new))
- {
- /* Refresh the old rte, continue with update to main rtable */
- if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
+ if (max_feed <= 0)
{
- old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
- return 1;
- }
-
- goto drop_update;
- }
-
- /* Move iterator if needed */
- if (old == c->reload_next_rte)
- c->reload_next_rte = old->next;
-
- /* Remove the old rte */
- *pos = old->next;
- rte_free_quick(old);
- tab->rt_count--;
-
- break;
- }
-
- if (!new)
- {
- if (!old)
- goto drop_withdraw;
-
- if (!net->routes)
- fib_delete(&tab->fib, net);
-
- return 1;
- }
-
- struct channel_limit *l = &c->rx_limit;
- if (l->action && !old)
- {
- if (tab->rt_count >= l->limit)
- channel_notify_limit(c, l, PLD_RX, tab->rt_count);
-
- if (l->state == PLS_BLOCKED)
- {
- /* Required by rte_trace_in() */
- new->net = net;
-
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
- goto drop_update;
- }
- }
-
- /* Insert the new rte */
- rte *e = rte_do_cow(new);
- e->flags |= REF_COW;
- e->net = net;
- e->sender = c;
- e->lastmod = current_time();
- e->next = *pos;
- *pos = e;
- tab->rt_count++;
- return 1;
-
-drop_update:
- c->stats.imp_updates_received++;
- c->stats.imp_updates_ignored++;
- rte_free(new);
-
- if (!net->routes)
- fib_delete(&tab->fib, net);
-
- return 0;
-
-drop_withdraw:
- c->stats.imp_withdraws_received++;
- c->stats.imp_withdraws_ignored++;
- return 0;
-}
-
-int
-rt_reload_channel(struct channel *c)
-{
- struct rtable *tab = c->in_table;
- struct fib_iterator *fit = &c->reload_fit;
- int max_feed = 64;
-
- ASSERT(c->channel_state == CS_UP);
+ FIB_ITERATE_PUT(fit);
+ rt_send_export_event(c);
- if (!c->reload_active)
- {
- FIB_ITERATE_INIT(fit, &tab->fib);
- c->reload_active = 1;
- }
+ RT_UNLOCK(c->table);
+ return;
+ }
- do {
- for (rte *e = c->reload_next_rte; e; e = e->next)
- {
- if (max_feed-- <= 0)
+ if (atomic_load_explicit(&c->export_state, memory_order_acquire) != TES_FEEDING)
{
- c->reload_next_rte = e;
- debug("%s channel reload burst split (max_feed=%d)", c->proto->name, max_feed);
- return 0;
+ RT_UNLOCK(c->table);
+ return;
}
- rte_update2(c, e->net->n.addr, rte_do_cow(e), e->attrs->src);
- }
-
- c->reload_next_rte = NULL;
-
- FIB_ITERATE_START(&tab->fib, fit, net, n)
- {
- if (c->reload_next_rte = n->routes)
+ if (!n->routes || !rte_is_valid(&n->routes->rte))
+ ; /* if no route, do nothing */
+ else if (c->req->export_bulk)
{
- FIB_ITERATE_PUT_NEXT(fit, &tab->fib);
- break;
- }
- }
- FIB_ITERATE_END;
- }
- while (c->reload_next_rte);
-
- c->reload_active = 0;
- return 1;
-}
-
-void
-rt_reload_channel_abort(struct channel *c)
-{
- if (c->reload_active)
- {
- /* Unlink the iterator */
- fit_get(&c->in_table->fib, &c->reload_fit);
- c->reload_next_rte = NULL;
- c->reload_active = 0;
- }
-}
-
-void
-rt_prune_sync(rtable *t, int all)
-{
- struct fib_iterator fit;
-
- FIB_ITERATE_INIT(&fit, &t->fib);
+ uint count = rte_feed_count(n);
+ if (count)
+ {
+ rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(n, feed, count);
-again:
- FIB_ITERATE_START(&t->fib, &fit, net, n)
- {
- rte *e, **ee = &n->routes;
+ struct rt_pending_export *rpe_last, *rpe_first = n->first;
+ for (struct rt_pending_export *rpe = rpe_first; rpe; rpe = rpe_next(rpe, NULL))
+ rpe_last = rpe;
- while (e = *ee)
- {
- if (all || (e->flags & (REF_STALE | REF_DISCARD)))
- {
- *ee = e->next;
- rte_free_quick(e);
- t->rt_count--;
- }
- else
- ee = &e->next;
- }
+ FIB_ITERATE_PUT_NEXT(fit, &tab->fib);
+ RT_UNLOCK(c->table);
- if (all || !n->routes)
- {
- FIB_ITERATE_PUT(&fit);
- fib_delete(&t->fib, n);
- goto again;
- }
- }
- FIB_ITERATE_END;
-}
+ c->req->export_bulk(c->req, n->n.addr, NULL, feed, count);
+ RT_LOCK(c->table);
-/*
- * Export table
- */
+ for (struct rt_pending_export *rpe = rpe_first; rpe; rpe = rpe_next(rpe, NULL))
+ {
+ rpe_mark_seen(c, rpe);
+ if (rpe == rpe_last)
+ break;
+ ASSERT_DIE(rpe->seq < rpe_last->seq);
+ }
-int
-rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, int refeed)
-{
- struct rtable *tab = c->out_table;
- struct rte_src *src;
- rte *old, **pos;
- net *net;
+ max_feed -= count;
- if (new)
- {
- net = net_get(tab, n);
- src = new->attrs->src;
+ goto redo;
+ }
+ }
+ else if (c->req->export_one)
+ {
+ struct rt_pending_export rpe = { .new = n->routes, .new_best = n->routes };
- rte_store_tmp_attrs(new, rte_update_pool, NULL);
+ struct rt_pending_export *rpe_last, *rpe_first = n->first;
+ for (struct rt_pending_export *rpe = rpe_first; rpe; rpe = rpe_next(rpe, NULL))
+ rpe_last = rpe;
- if (!rta_is_cached(new->attrs))
- new->attrs = rta_lookup(new->attrs);
- }
- else
- {
- net = net_find(tab, n);
- src = old0->attrs->src;
+ FIB_ITERATE_PUT_NEXT(fit, &tab->fib);
+ RT_UNLOCK(c->table);
- if (!net)
- goto drop_withdraw;
- }
+ c->req->export_one(c->req, n->n.addr, &rpe);
- /* Find the old rte */
- for (pos = &net->routes; old = *pos; pos = &old->next)
- if ((c->ra_mode != RA_ANY) || (old->attrs->src == src))
- {
- if (new && rte_same(old, new))
- {
- /* REF_STALE / REF_DISCARD not used in export table */
- /*
- if (old->flags & (REF_STALE | REF_DISCARD | REF_MODIFY))
+ RT_LOCK(c->table);
+ for (struct rt_pending_export *rpe = rpe_first; rpe; rpe = rpe_next(rpe, NULL))
{
- old->flags &= ~(REF_STALE | REF_DISCARD | REF_MODIFY);
- return 1;
+ rpe_mark_seen(c, rpe);
+ if (rpe == rpe_last)
+ break;
+ ASSERT_DIE(rpe->seq < rpe_last->seq);
}
- */
- goto drop_update;
+ max_feed--;
+ goto redo;
}
-
- /* Remove the old rte */
- *pos = old->next;
- rte_free_quick(old);
- tab->rt_count--;
-
- break;
+ else
+ bug("Export request must always provide an export method");
}
+ FIB_ITERATE_END;
- if (!new)
- {
- if (!old)
- goto drop_withdraw;
-
- if (!net->routes)
- fib_delete(&tab->fib, net);
-
- return 1;
- }
-
- /* Insert the new rte */
- rte *e = rte_do_cow(new);
- e->flags |= REF_COW;
- e->net = net;
- e->sender = c;
- e->lastmod = current_time();
- e->next = *pos;
- *pos = e;
- tab->rt_count++;
- return 1;
+ c->event->hook = rt_export_hook;
+ rt_send_export_event(c);
-drop_update:
- return refeed;
+ RT_UNLOCK(c->table);
-drop_withdraw:
- return 0;
+ rt_set_export_state(c, TES_READY);
}
@@ -2953,7 +3238,7 @@ hc_delete_hostentry(struct hostcache *hc, pool *p, struct hostentry *he)
}
static void
-rt_init_hostcache(rtable *tab)
+rt_init_hostcache(rtable_private *tab)
{
struct hostcache *hc = mb_allocz(tab->rp, sizeof(struct hostcache));
init_list(&hc->hostentries);
@@ -2969,7 +3254,7 @@ rt_init_hostcache(rtable *tab)
}
static void
-rt_free_hostcache(rtable *tab)
+rt_free_hostcache(rtable_private *tab)
{
struct hostcache *hc = tab->hostcache;
@@ -2992,13 +3277,13 @@ rt_free_hostcache(rtable *tab)
}
static void
-rt_notify_hostcache(rtable *tab, net *net)
+rt_notify_hostcache(rtable_private *tab, net *net)
{
- if (tab->hcu_scheduled)
+ if (ev_active(tab->hcu_event))
return;
if (trie_match_net(tab->hostcache->trie, net->n.addr))
- rt_schedule_hcu(tab);
+ ev_send_loop(tab->loop, tab->hcu_event);
}
static int
@@ -3021,41 +3306,17 @@ rt_get_igp_metric(rte *rt)
if (ea)
return ea->u.data;
- rta *a = rt->attrs;
-
-#ifdef CONFIG_OSPF
- if ((a->source == RTS_OSPF) ||
- (a->source == RTS_OSPF_IA) ||
- (a->source == RTS_OSPF_EXT1))
- return rt->u.ospf.metric1;
-#endif
-
-#ifdef CONFIG_RIP
- if (a->source == RTS_RIP)
- return rt->u.rip.metric;
-#endif
-
-#ifdef CONFIG_BGP
- if (a->source == RTS_BGP)
- {
- u64 metric = bgp_total_aigp_metric(rt);
- return (u32) MIN(metric, (u64) IGP_METRIC_UNKNOWN);
- }
-#endif
-
-#ifdef CONFIG_BABEL
- if (a->source == RTS_BABEL)
- return rt->u.babel.metric;
-#endif
-
- if (a->source == RTS_DEVICE)
+ if (rt->attrs->source == RTS_DEVICE)
return 0;
+ if (rt->src->owner->class->rte_igp_metric)
+ return rt->src->owner->class->rte_igp_metric(rt);
+
return IGP_METRIC_UNKNOWN;
}
static int
-rt_update_hostentry(rtable *tab, struct hostentry *he)
+rt_update_hostentry(rtable_private *tab, struct hostentry *he)
{
rta *old_src = he->src;
int direct = 0;
@@ -3072,11 +3333,12 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
net *n = net_route(tab, &he_addr);
if (n)
{
- rte *e = n->routes;
- rta *a = e->attrs;
- pxlen = n->n.addr->pxlen;
+ struct rte_storage *e = n->routes;
+ rta *a = e->rte.attrs;
+ word pref = a->pref;
- if (a->hostentry)
+ for (struct rte_storage *ee = n->routes; ee; ee = ee->next)
+ if ((ee->rte.attrs->pref >= pref) && ee->rte.attrs->hostentry)
{
/* Recursive route should not depend on another recursive route */
log(L_WARN "Next hop address %I resolvable through recursive route for %N",
@@ -3084,6 +3346,8 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
goto done;
}
+ pxlen = n->n.addr->pxlen;
+
if (a->dest == RTD_UNICAST)
{
for (struct nexthop *nh = &(a->nh); nh; nh = nh->next)
@@ -3104,7 +3368,7 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
he->src = rta_clone(a);
he->dest = a->dest;
he->nexthop_linkable = !direct;
- he->igp_metric = rt_get_igp_metric(e);
+ he->igp_metric = rt_get_igp_metric(&e->rte);
}
done:
@@ -3116,8 +3380,11 @@ done:
}
static void
-rt_update_hostcache(rtable *tab)
+rt_update_hostcache(void *data)
{
+ rtable_private *tab = data;
+ ASSERT_DIE(birdloop_inside(tab->loop));
+
struct hostcache *hc = tab->hostcache;
struct hostentry *he;
node *n, *x;
@@ -3138,15 +3405,15 @@ rt_update_hostcache(rtable *tab)
if (rt_update_hostentry(tab, he))
rt_schedule_nhu(he->tab);
}
-
- tab->hcu_scheduled = 0;
}
struct hostentry *
-rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
+rt_get_hostentry(rtable *t, ip_addr a, ip_addr ll, rtable *dep)
{
struct hostentry *he;
+ rtable_private *tab = RT_LOCK(t);
+
if (!tab->hostcache)
rt_init_hostcache(tab);
@@ -3154,10 +3421,13 @@ rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
struct hostcache *hc = tab->hostcache;
for (he = hc->hash_table[k >> hc->hash_shift]; he != NULL; he = he->next)
if (ipa_equal(he->addr, a) && (he->tab == dep))
- return he;
+ goto done;
he = hc_new_hostentry(hc, tab->rp, a, ipa_zero(ll) ? a : ll, dep, k);
rt_update_hostentry(tab, he);
+
+done:
+ RT_UNLOCK(t);
return he;
}