summaryrefslogtreecommitdiff
path: root/nest
diff options
context:
space:
mode:
Diffstat (limited to 'nest')
-rw-r--r--nest/config.Y4
-rw-r--r--nest/proto.c521
-rw-r--r--nest/protocol.h83
-rw-r--r--nest/route.h145
-rw-r--r--nest/rt-show.c36
-rw-r--r--nest/rt-table.c843
6 files changed, 1028 insertions, 604 deletions
diff --git a/nest/config.Y b/nest/config.Y
index a1d901ab..29d6b0db 100644
--- a/nest/config.Y
+++ b/nest/config.Y
@@ -820,8 +820,10 @@ CF_CLI(DUMP NEIGHBORS,,, [[Dump neighbor cache]])
{ neigh_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP ATTRIBUTES,,, [[Dump attribute cache]])
{ rta_dump_all(); cli_msg(0, ""); } ;
-CF_CLI(DUMP ROUTES,,, [[Dump routing table]])
+CF_CLI(DUMP ROUTES,,, [[Dump routes]])
{ rt_dump_all(); cli_msg(0, ""); } ;
+CF_CLI(DUMP TABLES,,, [[Dump table connections]])
+{ rt_dump_hooks_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP PROTOCOLS,,, [[Dump protocol information]])
{ protos_dump_all(); cli_msg(0, ""); } ;
CF_CLI(DUMP FILTER ALL,,, [[Dump all filters in linearized form]])
diff --git a/nest/proto.c b/nest/proto.c
index 2009ff1f..09582d2e 100644
--- a/nest/proto.c
+++ b/nest/proto.c
@@ -43,8 +43,7 @@ static int graceful_restart_state;
static u32 graceful_restart_locks;
static char *p_states[] = { "DOWN", "START", "UP", "STOP" };
-static char *c_states[] = { "DOWN", "START", "UP", "FLUSHING" };
-static char *e_states[] = { "DOWN", "FEEDING", "READY" };
+static char *c_states[] = { "DOWN", "START", "UP", "STOP", "RESTART" };
extern struct protocol proto_unix_iface;
@@ -55,12 +54,14 @@ static char *proto_state_name(struct proto *p);
static void channel_init_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
static void channel_update_limit(struct channel *c, struct limit *l, int dir, struct channel_limit *cf);
static void channel_reset_limit(struct channel *c, struct limit *l, int dir);
+static void channel_feed_end(struct channel *c);
+static void channel_export_stopped(struct rt_export_request *req);
static inline int proto_is_done(struct proto *p)
{ return (p->proto_state == PS_DOWN) && (p->active_channels == 0); }
static inline int channel_is_active(struct channel *c)
-{ return (c->channel_state == CS_START) || (c->channel_state == CS_UP); }
+{ return (c->channel_state != CS_DOWN); }
static inline int channel_reloadable(struct channel *c)
{ return c->proto->reload_routes && c->reloadable; }
@@ -68,10 +69,46 @@ static inline int channel_reloadable(struct channel *c)
static inline void
channel_log_state_change(struct channel *c)
{
- if (c->export_state)
- CD(c, "State changed to %s/%s", c_states[c->channel_state], e_states[c->export_state]);
- else
- CD(c, "State changed to %s", c_states[c->channel_state]);
+ CD(c, "State changed to %s", c_states[c->channel_state]);
+}
+
+void
+channel_import_log_state_change(struct rt_import_request *req, u8 state)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ CD(c, "Channel import state changed to %s", rt_import_state_name(state));
+}
+
+void
+channel_export_log_state_change(struct rt_export_request *req, u8 state)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ CD(c, "Channel export state changed to %s", rt_export_state_name(state));
+
+ switch (state)
+ {
+ case TES_FEEDING:
+ if (c->proto->feed_begin)
+ c->proto->feed_begin(c, !c->refeeding);
+ break;
+ case TES_READY:
+ channel_feed_end(c);
+ break;
+ }
+}
+
+static void
+channel_dump_import_req(struct rt_import_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ debug(" Channel %s.%s import request %p\n", c->proto->name, c->name, req);
+}
+
+static void
+channel_dump_export_req(struct rt_export_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
}
static void
@@ -141,6 +178,15 @@ proto_find_channel_by_name(struct proto *p, const char *n)
return NULL;
}
+rte * channel_preimport(struct rt_import_request *req, rte *new, rte *old);
+
+void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+
+
/**
* proto_add_channel - connect protocol to a routing table
* @p: protocol instance
@@ -165,6 +211,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->channel = cf->channel;
c->proto = p;
c->table = cf->table->table;
+ rt_lock_table(c->table);
c->in_filter = cf->in_filter;
c->out_filter = cf->out_filter;
@@ -182,7 +229,6 @@ proto_add_channel(struct proto *p, struct channel_config *cf)
c->rpki_reload = cf->rpki_reload;
c->channel_state = CS_DOWN;
- c->export_state = ES_DOWN;
c->last_state_change = current_time();
c->reloadable = 1;
@@ -204,6 +250,7 @@ proto_remove_channel(struct proto *p UNUSED, struct channel *c)
CD(c, "Removed", c->name);
+ rt_unlock_table(c->table);
rem_node(&c->n);
mb_free(c);
}
@@ -224,7 +271,7 @@ proto_pause_channels(struct proto *p)
struct channel *c;
WALK_LIST(c, p->channels)
if (!c->disabled && channel_is_active(c))
- channel_set_state(c, CS_START);
+ channel_set_state(c, CS_PAUSE);
}
static void
@@ -233,7 +280,7 @@ proto_stop_channels(struct proto *p)
struct channel *c;
WALK_LIST(c, p->channels)
if (!c->disabled && channel_is_active(c))
- channel_set_state(c, CS_FLUSHING);
+ channel_set_state(c, CS_STOP);
}
static void
@@ -245,69 +292,6 @@ proto_remove_channels(struct proto *p)
}
static void
-channel_schedule_feed(struct channel *c, int initial)
-{
- // DBG("%s: Scheduling meal\n", p->name);
- ASSERT(c->channel_state == CS_UP);
-
- c->export_state = ES_FEEDING;
- c->refeeding = !initial;
-
- ev_schedule_work(c->feed_event);
-}
-
-static void
-channel_feed_loop(void *ptr)
-{
- struct channel *c = ptr;
-
- if (c->export_state != ES_FEEDING)
- return;
-
- /* Start feeding */
- if (!c->feed_active)
- {
- if (c->proto->feed_begin)
- c->proto->feed_begin(c, !c->refeeding);
-
- c->refeed_pending = 0;
- }
-
- // DBG("Feeding protocol %s continued\n", p->name);
- if (!rt_feed_channel(c))
- {
- ev_schedule_work(c->feed_event);
- return;
- }
-
- /* Reset export limit if the feed ended with acceptable number of exported routes */
- if (c->refeeding &&
- (c->limit_active & (1 << PLD_OUT)) &&
- (c->refeed_count <= c->out_limit.max))
- {
- log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, c->out_limit.max);
- channel_reset_limit(c, &c->out_limit, PLD_OUT);
-
- /* Continue in feed - it will process routing table again from beginning */
- c->refeed_count = 0;
- ev_schedule_work(c->feed_event);
- return;
- }
-
- // DBG("Feeding protocol %s finished\n", p->name);
- c->export_state = ES_READY;
- channel_log_state_change(c);
-
- if (c->proto->feed_end)
- c->proto->feed_end(c);
-
- /* Restart feeding */
- if (c->refeed_pending)
- channel_request_feeding(c);
-}
-
-
-static void
channel_roa_in_changed(struct rt_subscription *s)
{
struct channel *c = s->data;
@@ -325,14 +309,12 @@ static void
channel_roa_out_changed(struct rt_subscription *s)
{
struct channel *c = s->data;
- int active = (c->export_state == ES_FEEDING);
+ CD(c, "Feeding triggered by RPKI change");
- CD(c, "Feeding triggered by RPKI change%s", active ? " - already active" : "");
+ c->refeed_pending = 1;
- if (!active)
- channel_request_feeding(c);
- else
- c->refeed_pending = 1;
+ if (c->out_req.hook)
+ rt_stop_export(&c->out_req, channel_export_stopped);
}
/* Temporary code, subscriptions should be changed to resources */
@@ -444,34 +426,189 @@ channel_roa_unsubscribe_all(struct channel *c)
}
static void
-channel_start_export(struct channel *c)
+channel_start_import(struct channel *c)
{
+ if (c->in_req.hook)
+ {
+ log(L_WARN "%s.%s: Attempted to start channel's already started import", c->proto->name, c->name);
+ return;
+ }
+
+ int nlen = strlen(c->name) + strlen(c->proto->name) + 2;
+ char *rn = mb_allocz(c->proto->pool, nlen);
+ bsprintf(rn, "%s.%s", c->proto->name, c->name);
+
+ c->in_req = (struct rt_import_request) {
+ .name = rn,
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_dump_import_req,
+ .log_state_change = channel_import_log_state_change,
+ .preimport = channel_preimport,
+ .rte_modify = c->proto->rte_modify,
+ };
+
ASSERT(c->channel_state == CS_UP);
- ASSERT(c->export_state == ES_DOWN);
- channel_schedule_feed(c, 1); /* Sets ES_FEEDING */
+ channel_reset_limit(c, &c->rx_limit, PLD_RX);
+ channel_reset_limit(c, &c->in_limit, PLD_IN);
+
+ memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
+
+ DBG("%s.%s: Channel start import req=%p\n", c->proto->name, c->name, &c->in_req);
+ rt_request_import(c->table, &c->in_req);
}
static void
-channel_stop_export(struct channel *c)
+channel_start_export(struct channel *c)
{
- /* Need to abort feeding */
- if (c->export_state == ES_FEEDING)
- rt_feed_channel_abort(c);
+ if (c->out_req.hook)
+ {
+ log(L_WARN "%s.%s: Attempted to start channel's already started export", c->proto->name, c->name);
+ return;
+ }
- c->export_state = ES_DOWN;
+ ASSERT(c->channel_state == CS_UP);
+ int nlen = strlen(c->name) + strlen(c->proto->name) + 2;
+ char *rn = mb_allocz(c->proto->pool, nlen);
+ bsprintf(rn, "%s.%s", c->proto->name, c->name);
+
+ c->out_req = (struct rt_export_request) {
+ .name = rn,
+ .trace_routes = c->debug | c->proto->debug,
+ .dump_req = channel_dump_export_req,
+ .log_state_change = channel_export_log_state_change,
+ };
+
+ bmap_init(&c->export_map, c->proto->pool, 1024);
+ bmap_init(&c->export_reject_map, c->proto->pool, 1024);
channel_reset_limit(c, &c->out_limit, PLD_OUT);
- bmap_reset(&c->export_map, 1024);
- bmap_reset(&c->export_reject_map, 1024);
+
+ memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
+
+ switch (c->ra_mode) {
+ case RA_OPTIMAL:
+ c->out_req.export_one = rt_notify_optimal;
+ break;
+ case RA_ANY:
+ c->out_req.export_one = rt_notify_any;
+ c->out_req.export_bulk = rt_feed_any;
+ break;
+ case RA_ACCEPTED:
+ c->out_req.export_bulk = rt_notify_accepted;
+ break;
+ case RA_MERGED:
+ c->out_req.export_bulk = rt_notify_merged;
+ break;
+ default:
+ bug("Unknown route announcement mode");
+ }
+
+ DBG("%s.%s: Channel start export req=%p\n", c->proto->name, c->name, &c->out_req);
+ rt_request_export(c->table, &c->out_req);
}
+static void
+channel_check_stopped(struct channel *c)
+{
+ switch (c->channel_state)
+ {
+ case CS_STOP:
+ if (c->out_req.hook || c->in_req.hook)
+ return;
+
+ channel_set_state(c, CS_DOWN);
+ ev_schedule(c->proto->event);
+
+ break;
+ case CS_PAUSE:
+ if (c->out_req.hook)
+ return;
+
+ channel_set_state(c, CS_START);
+ break;
+ default:
+ bug("Stopped channel in a bad state: %d", c->channel_state);
+ }
+
+ DBG("%s.%s: Channel requests/hooks stopped (in state %s)\n", c->proto->name, c->name, c_states[c->channel_state]);
+}
+
+void
+channel_import_stopped(struct rt_import_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+
+ req->hook = NULL;
+
+ if (c->in_table)
+ rt_prune_sync(c->in_table, 1);
+
+ mb_free(c->in_req.name);
+ c->in_req.name = NULL;
+
+ channel_check_stopped(c);
+}
+
+static void
+channel_export_stopped(struct rt_export_request *req)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+
+ /* The hook has already stopped */
+ req->hook = NULL;
+
+ if (c->refeed_pending)
+ {
+ c->refeeding = 1;
+ c->refeed_pending = 0;
+ rt_request_export(c->table, req);
+ return;
+ }
+
+ /* Free the routes from out_table */
+ if (c->out_table)
+ rt_prune_sync(c->out_table, 1);
+
+ mb_free(c->out_req.name);
+ c->out_req.name = NULL;
+
+ channel_check_stopped(c);
+}
+
+static void
+channel_feed_end(struct channel *c)
+{
+ struct rt_export_request *req = &c->out_req;
+
+ /* Reset export limit if the feed ended with acceptable number of exported routes */
+ struct limit *l = &c->out_limit;
+ if (c->refeeding &&
+ (c->limit_active & (1 << PLD_OUT)) &&
+ (c->refeed_count <= l->max) &&
+ (l->count <= l->max))
+ {
+ log(L_INFO "Protocol %s resets route export limit (%u)", c->proto->name, l->max);
+
+ c->refeed_pending = 1;
+ rt_stop_export(req, channel_export_stopped);
+ return;
+ }
+
+ if (c->proto->feed_end)
+ c->proto->feed_end(c);
+
+ if (c->refeed_pending)
+ rt_stop_export(req, channel_export_stopped);
+ else
+ c->refeeding = 0;
+}
/* Called by protocol for reload from in_table */
void
channel_schedule_reload(struct channel *c)
{
- ASSERT(c->channel_state == CS_UP);
+ ASSERT(c->in_req.hook);
rt_reload_channel_abort(c);
ev_schedule_work(c->reload_event);
@@ -497,23 +634,6 @@ channel_reload_loop(void *ptr)
channel_request_reload(c);
}
-static void
-channel_reset_import(struct channel *c)
-{
- /* Need to abort feeding */
- ev_postpone(c->reload_event);
- rt_reload_channel_abort(c);
-
- rt_prune_sync(c->in_table, 1);
-}
-
-static void
-channel_reset_export(struct channel *c)
-{
- /* Just free the routes */
- rt_prune_sync(c->out_table, 1);
-}
-
/* Called by protocol to activate in_table */
void
channel_setup_in_table(struct channel *c)
@@ -545,22 +665,11 @@ channel_setup_out_table(struct channel *c)
static void
channel_do_start(struct channel *c)
{
- rt_lock_table(c->table);
- add_tail(&c->table->channels, &c->table_node);
c->proto->active_channels++;
- c->feed_event = ev_new_init(c->proto->pool, channel_feed_loop, c);
-
- bmap_init(&c->export_map, c->proto->pool, 1024);
- bmap_init(&c->export_reject_map, c->proto->pool, 1024);
- memset(&c->export_stats, 0, sizeof(struct export_stats));
- memset(&c->import_stats, 0, sizeof(struct import_stats));
-
- channel_reset_limit(c, &c->rx_limit, PLD_RX);
- channel_reset_limit(c, &c->in_limit, PLD_IN);
- channel_reset_limit(c, &c->out_limit, PLD_OUT);
-
CALL(c->channel->start, c);
+
+ channel_start_import(c);
}
static void
@@ -575,9 +684,31 @@ channel_do_up(struct channel *c)
}
static void
-channel_do_flush(struct channel *c)
+channel_do_pause(struct channel *c)
+{
+ /* Need to abort feeding */
+ if (c->reload_event)
+ {
+ ev_postpone(c->reload_event);
+ rt_reload_channel_abort(c);
+ }
+
+ /* Stop export */
+ if (c->out_req.hook)
+ rt_stop_export(&c->out_req, channel_export_stopped);
+
+ channel_roa_unsubscribe_all(c);
+
+ bmap_free(&c->export_map);
+ bmap_free(&c->export_reject_map);
+}
+
+static void
+channel_do_stop(struct channel *c)
{
- rt_schedule_prune(c->table);
+ /* Stop import */
+ if (c->in_req.hook)
+ rt_stop_import(&c->in_req, channel_import_stopped);
c->gr_wait = 0;
if (c->gr_lock)
@@ -586,30 +717,21 @@ channel_do_flush(struct channel *c)
CALL(c->channel->shutdown, c);
/* This have to be done in here, as channel pool is freed before channel_do_down() */
- bmap_free(&c->export_map);
- bmap_free(&c->export_reject_map);
c->in_table = NULL;
c->reload_event = NULL;
c->out_table = NULL;
-
- channel_roa_unsubscribe_all(c);
}
static void
channel_do_down(struct channel *c)
{
- ASSERT(!c->feed_active && !c->reload_active);
+ ASSERT(!c->reload_active);
- rem_node(&c->table_node);
- rt_unlock_table(c->table);
c->proto->active_channels--;
- if (c->in_limit.count || c->rx_limit.count)
- bug("%s: Channel %s is down but still has some routes", c->proto->name, c->name);
-
// bmap_free(&c->export_map);
- memset(&c->import_stats, 0, sizeof(struct import_stats));
- memset(&c->export_stats, 0, sizeof(struct export_stats));
+ memset(&c->import_stats, 0, sizeof(struct channel_import_stats));
+ memset(&c->export_stats, 0, sizeof(struct channel_export_stats));
c->in_table = NULL;
c->reload_event = NULL;
@@ -628,7 +750,6 @@ void
channel_set_state(struct channel *c, uint state)
{
uint cs = c->channel_state;
- uint es = c->export_state;
DBG("%s reporting channel %s state transition %s -> %s\n", c->proto->name, c->name, c_states[cs], c_states[state]);
if (state == cs)
@@ -640,20 +761,11 @@ channel_set_state(struct channel *c, uint state)
switch (state)
{
case CS_START:
- ASSERT(cs == CS_DOWN || cs == CS_UP);
+ ASSERT(cs == CS_DOWN || cs == CS_PAUSE);
if (cs == CS_DOWN)
channel_do_start(c);
- if (es != ES_DOWN)
- channel_stop_export(c);
-
- if (c->in_table && (cs == CS_UP))
- channel_reset_import(c);
-
- if (c->out_table && (cs == CS_UP))
- channel_reset_export(c);
-
break;
case CS_UP:
@@ -668,23 +780,24 @@ channel_set_state(struct channel *c, uint state)
channel_do_up(c);
break;
- case CS_FLUSHING:
- ASSERT(cs == CS_START || cs == CS_UP);
+ case CS_PAUSE:
+ ASSERT(cs == CS_UP);
- if (es != ES_DOWN)
- channel_stop_export(c);
+ if (cs == CS_UP)
+ channel_do_pause(c);
+ break;
- if (c->in_table && (cs == CS_UP))
- channel_reset_import(c);
+ case CS_STOP:
+ ASSERT(cs == CS_UP || cs == CS_START || cs == CS_PAUSE);
- if (c->out_table && (cs == CS_UP))
- channel_reset_export(c);
+ if (cs == CS_UP)
+ channel_do_pause(c);
- channel_do_flush(c);
+ channel_do_stop(c);
break;
case CS_DOWN:
- ASSERT(cs == CS_FLUSHING);
+ ASSERT(cs == CS_STOP);
channel_do_down(c);
break;
@@ -709,35 +822,16 @@ channel_set_state(struct channel *c, uint state)
void
channel_request_feeding(struct channel *c)
{
- ASSERT(c->channel_state == CS_UP);
-
- CD(c, "Feeding requested");
-
- /* Do nothing if we are still waiting for feeding */
- if (c->export_state == ES_DOWN)
- return;
-
- /* If we are already feeding, we want to restart it */
- if (c->export_state == ES_FEEDING)
- {
- /* Unless feeding is in initial state */
- if (!c->feed_active)
- return;
+ ASSERT(c->out_req.hook);
- rt_feed_channel_abort(c);
- }
-
- /* Track number of exported routes during refeed */
- c->refeed_count = 0;
-
- channel_schedule_feed(c, 0); /* Sets ES_FEEDING */
- channel_log_state_change(c);
+ c->refeed_pending = 1;
+ rt_stop_export(&c->out_req, channel_export_stopped);
}
static void
channel_request_reload(struct channel *c)
{
- ASSERT(c->channel_state == CS_UP);
+ ASSERT(c->in_req.hook);
ASSERT(channel_reloadable(c));
CD(c, "Reload requested");
@@ -861,6 +955,7 @@ channel_reconfigure(struct channel *c, struct channel_config *cf)
c->merge_limit = cf->merge_limit;
c->preference = cf->preference;
c->debug = cf->debug;
+ c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug;
c->in_keep_filtered = cf->in_keep_filtered;
c->rpki_reload = cf->rpki_reload;
@@ -975,8 +1070,8 @@ proto_event(void *ptr)
if (proto_is_done(p))
{
- if (p->proto->cleanup)
- p->proto->cleanup(p);
+ rfree(p->pool);
+ p->pool = NULL;
p->active = 0;
proto_log_state_change(p);
@@ -1528,7 +1623,7 @@ graceful_restart_done(timer *t UNUSED)
WALK_LIST(c, p->channels)
{
/* Resume postponed export of routes */
- if ((c->channel_state == CS_UP) && c->gr_wait && c->proto->rt_notify)
+ if ((c->channel_state == CS_UP) && c->gr_wait && p->rt_notify)
channel_start_export(c);
/* Cleanup */
@@ -1618,7 +1713,11 @@ protos_dump_all(void)
struct proto *p;
WALK_LIST(p, proto_list)
{
- debug(" protocol %s state %s\n", p->name, p_states[p->proto_state]);
+#define DPF(x) (p->x ? " " #x : "")
+ debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s%s\n",
+ p->name, p, p_states[p->proto_state], p->active_channels,
+ DPF(disabled), DPF(active), DPF(do_start), DPF(do_stop), DPF(reconfiguring));
+#undef DPF
struct channel *c;
WALK_LIST(c, p->channels)
@@ -1628,6 +1727,9 @@ protos_dump_all(void)
debug("\tInput filter: %s\n", filter_name(c->in_filter));
if (c->out_filter)
debug("\tOutput filter: %s\n", filter_name(c->out_filter));
+ debug("\tChannel state: %s/%s/%s\n", c_states[c->channel_state],
+ c->in_req.hook ? rt_import_state_name(rt_import_get_state(c->in_req.hook)) : "-",
+ c->out_req.hook ? rt_export_state_name(rt_export_get_state(c->out_req.hook)) : "-");
}
if (p->proto->dump && (p->proto_state != PS_DOWN))
@@ -1933,8 +2035,6 @@ proto_do_down(struct proto *p)
{
p->down_code = 0;
neigh_prune();
- rfree(p->pool);
- p->pool = NULL;
/* Shutdown is finished in the protocol event */
if (proto_is_done(p))
@@ -2029,8 +2129,16 @@ proto_state_name(struct proto *p)
static void
channel_show_stats(struct channel *c)
{
- struct import_stats *is = &c->import_stats;
- struct export_stats *es = &c->export_stats;
+ struct channel_import_stats *ch_is = &c->import_stats;
+ struct channel_export_stats *ch_es = &c->export_stats;
+ struct rt_import_stats *rt_is = c->in_req.hook ? &c->in_req.hook->stats : NULL;
+ struct rt_export_stats *rt_es = c->out_req.hook ? &c->out_req.hook->stats : NULL;
+
+#define SON(ie, item) ((ie) ? (ie)->item : 0)
+#define SCI(item) SON(ch_is, item)
+#define SCE(item) SON(ch_es, item)
+#define SRI(item) SON(rt_is, item)
+#define SRE(item) SON(rt_es, item)
u32 rx_routes = c->rx_limit.count;
u32 in_routes = c->in_limit.count;
@@ -2038,24 +2146,31 @@ channel_show_stats(struct channel *c)
if (c->in_keep_filtered)
cli_msg(-1006, " Routes: %u imported, %u filtered, %u exported, %u preferred",
- in_routes, (rx_routes - in_routes), out_routes, is->pref);
+ in_routes, (rx_routes - in_routes), out_routes, SRI(pref));
else
cli_msg(-1006, " Routes: %u imported, %u exported, %u preferred",
- in_routes, out_routes, is->pref);
-
- cli_msg(-1006, " Route change stats: received rejected filtered ignored accepted");
- cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u",
- is->updates_received, is->updates_invalid,
- is->updates_filtered, is->updates_ignored,
- is->updates_accepted);
- cli_msg(-1006, " Import withdraws: %10u %10u --- %10u %10u",
- is->withdraws_received, is->withdraws_invalid,
- is->withdraws_ignored, is->withdraws_accepted);
- cli_msg(-1006, " Export updates: %10u %10u %10u --- %10u",
- es->updates_received, es->updates_rejected,
- es->updates_filtered, es->updates_accepted);
- cli_msg(-1006, " Export withdraws: %10u --- --- --- %10u",
- es->withdraws_received, es->withdraws_accepted);
+ in_routes, out_routes, SRI(pref));
+
+ cli_msg(-1006, " Route change stats: received rejected filtered ignored RX limit IN limit accepted");
+ cli_msg(-1006, " Import updates: %10u %10u %10u %10u %10u %10u %10u",
+ SCI(updates_received), SCI(updates_invalid),
+ SCI(updates_filtered), SRI(updates_ignored),
+ SCI(updates_limited_rx), SCI(updates_limited_in),
+ SRI(updates_accepted));
+ cli_msg(-1006, " Import withdraws: %10u %10u --- %10u --- %10u",
+ SCI(withdraws_received), SCI(withdraws_invalid),
+ SRI(withdraws_ignored), SRI(withdraws_accepted));
+ cli_msg(-1006, " Export updates: %10u %10u %10u --- %10u %10u",
+ SRE(updates_received), SCE(updates_rejected),
+ SCE(updates_filtered), SCE(updates_limited), SCE(updates_accepted));
+ cli_msg(-1006, " Export withdraws: %10u --- --- --- ---%10u",
+ SRE(withdraws_received), SCE(withdraws_accepted));
+
+#undef SRI
+#undef SRE
+#undef SCI
+#undef SCE
+#undef SON
}
void
@@ -2073,6 +2188,8 @@ channel_show_info(struct channel *c)
{
cli_msg(-1006, " Channel %s", c->name);
cli_msg(-1006, " State: %s", c_states[c->channel_state]);
+ cli_msg(-1006, " Import state: %s", rt_import_state_name(rt_import_get_state(c->in_req.hook)));
+ cli_msg(-1006, " Export state: %s", rt_export_state_name(rt_export_get_state(c->out_req.hook)));
cli_msg(-1006, " Table: %s", c->table->name);
cli_msg(-1006, " Preference: %d", c->preference);
cli_msg(-1006, " Input filter: %s", filter_name(c->in_filter));
diff --git a/nest/protocol.h b/nest/protocol.h
index c1978914..7447cbf0 100644
--- a/nest/protocol.h
+++ b/nest/protocol.h
@@ -77,7 +77,6 @@ struct protocol {
void (*dump)(struct proto *); /* Debugging dump */
int (*start)(struct proto *); /* Start the instance */
int (*shutdown)(struct proto *); /* Stop the instance */
- void (*cleanup)(struct proto *); /* Called after shutdown when protocol became hungry/down */
void (*get_status)(struct proto *, byte *buf); /* Get instance status (for `show protocols' command) */
void (*get_route_info)(struct rte *, byte *buf); /* Get route information (for `show route' command) */
int (*get_attr)(const struct eattr *, byte *buf, int buflen); /* ASCIIfy dynamic attribute (returns GA_*) */
@@ -133,30 +132,6 @@ struct proto_config {
};
/* Protocol statistics */
-struct import_stats {
- /* Import - from protocol to core */
- u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
- u32 updates_received; /* Number of route updates received */
- u32 updates_invalid; /* Number of route updates rejected as invalid */
- u32 updates_filtered; /* Number of route updates rejected by filters */
- u32 updates_ignored; /* Number of route updates rejected as already in route table */
- u32 updates_accepted; /* Number of route updates accepted and imported */
- u32 withdraws_received; /* Number of route withdraws received */
- u32 withdraws_invalid; /* Number of route withdraws rejected as invalid */
- u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
- u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
-};
-
-struct export_stats {
- /* Export - from core to protocol */
- u32 updates_received; /* Number of route updates received */
- u32 updates_rejected; /* Number of route updates rejected by protocol */
- u32 updates_filtered; /* Number of route updates rejected by filters */
- u32 updates_accepted; /* Number of route updates accepted and exported */
- u32 withdraws_received; /* Number of route withdraws received */
- u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
-};
-
struct proto {
node n; /* Node in global proto_list */
struct protocol *proto; /* Protocol */
@@ -512,7 +487,6 @@ struct channel_config {
struct channel {
node n; /* Node in proto->channels */
- node table_node; /* Node in table->channels */
const char *name; /* Channel name (may be NULL) */
const struct channel_class *channel;
@@ -531,10 +505,28 @@ struct channel {
u8 limit_actions[PLD_MAX]; /* Limit actions enum */
u8 limit_active; /* Flags for active limits */
- struct event *feed_event; /* Event responsible for feeding */
- struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
- struct import_stats import_stats; /* Import statistics */
- struct export_stats export_stats; /* Export statistics */
+ struct channel_import_stats {
+ /* Import - from protocol to core */
+ u32 updates_received; /* Number of route updates received */
+ u32 updates_invalid; /* Number of route updates rejected as invalid */
+ u32 updates_filtered; /* Number of route updates rejected by filters */
+ u32 updates_limited_rx; /* Number of route updates exceeding the rx_limit */
+ u32 updates_limited_in; /* Number of route updates exceeding the in_limit */
+ u32 withdraws_received; /* Number of route withdraws received */
+ u32 withdraws_invalid; /* Number of route withdraws rejected as invalid */
+ } import_stats;
+
+ struct channel_export_stats {
+ /* Export - from core to protocol */
+ u32 updates_rejected; /* Number of route updates rejected by protocol */
+ u32 updates_filtered; /* Number of route updates rejected by filters */
+ u32 updates_accepted; /* Number of route updates accepted and exported */
+ u32 updates_limited; /* Number of route updates exceeding the out_limit */
+ u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
+ } export_stats;
+
+ struct rt_import_request in_req; /* Table import connection */
+ struct rt_export_request out_req; /* Table export connection */
u32 refeed_count; /* Number of routes exported during refeed regardless of out_limit */
@@ -548,10 +540,7 @@ struct channel {
u8 stale; /* Used in reconfiguration */
u8 channel_state;
- u8 export_state; /* Route export state (ES_*, see below) */
- u8 feed_active;
- u8 flush_active;
- u8 refeeding; /* We are refeeding (valid only if export_state == ES_FEEDING) */
+ u8 refeeding; /* Refeeding the channel. */
u8 reloadable; /* Hook reload_routes() is allowed on the channel */
u8 gr_lock; /* Graceful restart mechanism should wait for this channel */
u8 gr_wait; /* Route export to channel is postponed until graceful restart */
@@ -599,34 +588,34 @@ struct channel {
* restricted by that and is on volition of the protocol. Generally, channels
* are opened in protocols' start() hooks when going to PS_UP.
*
- * CS_FLUSHING - The transitional state between initialized channel and closed
+ * CS_STOP - The transitional state between initialized channel and closed
* channel. The channel is still initialized, but no route exchange is allowed.
* Instead, the associated table is running flush loop to remove routes imported
* through the channel. After that, the channel changes state to CS_DOWN and
* is detached from the table (the table is unlocked and the channel is unlinked
- * from it). Unlike other states, the CS_FLUSHING state is not explicitly
+ * from it). Unlike other states, the CS_STOP state is not explicitly
* entered or left by the protocol. A protocol may request to close a channel
* (by calling channel_close()), which causes the channel to change state to
- * CS_FLUSHING and later to CS_DOWN. Also note that channels are closed
+ * CS_STOP and later to CS_DOWN. Also note that channels are closed
* automatically by the core when the protocol is going down.
*
+ * CS_PAUSE - Almost the same as CS_STOP, just the table import is kept and
+ * the table export is stopped before transitioning to CS_START.
+ *
* Allowed transitions:
*
* CS_DOWN -> CS_START / CS_UP
- * CS_START -> CS_UP / CS_FLUSHING
- * CS_UP -> CS_START / CS_FLUSHING
- * CS_FLUSHING -> CS_DOWN (automatic)
+ * CS_START -> CS_UP / CS_STOP
+ * CS_UP -> CS_PAUSE / CS_STOP
+ * CS_PAUSE -> CS_START (automatic)
+ * CS_STOP -> CS_DOWN (automatic)
*/
#define CS_DOWN 0
#define CS_START 1
#define CS_UP 2
-#define CS_FLUSHING 3
-
-#define ES_DOWN 0
-#define ES_FEEDING 1
-#define ES_READY 2
-
+#define CS_STOP 3
+#define CS_PAUSE 4
struct channel_config *proto_cf_find_channel(struct proto_config *p, uint net_type);
static inline struct channel_config *proto_cf_main_channel(struct proto_config *pc)
@@ -644,7 +633,7 @@ void channel_schedule_reload(struct channel *c);
static inline void channel_init(struct channel *c) { channel_set_state(c, CS_START); }
static inline void channel_open(struct channel *c) { channel_set_state(c, CS_UP); }
-static inline void channel_close(struct channel *c) { channel_set_state(c, CS_FLUSHING); }
+static inline void channel_close(struct channel *c) { channel_set_state(c, CS_STOP); }
void channel_request_feeding(struct channel *c);
void *channel_config_new(const struct channel_class *cc, const char *name, uint net_type, struct proto_config *proto);
diff --git a/nest/route.h b/nest/route.h
index e4507d4a..d0568133 100644
--- a/nest/route.h
+++ b/nest/route.h
@@ -2,6 +2,7 @@
* BIRD Internet Routing Daemon -- Routing Table
*
* (c) 1998--2000 Martin Mares <mj@ucw.cz>
+ * (c) 2019--2021 Maria Matejka <mq@jmq.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@@ -17,6 +18,7 @@
struct ea_list;
struct protocol;
struct proto;
+struct channel;
struct rte_src;
struct symbol;
struct timer;
@@ -160,12 +162,12 @@ typedef struct rtable {
struct slab *rte_slab; /* Slab to allocate route objects */
struct fib fib;
char *name; /* Name of this table */
- list channels; /* List of attached channels (struct channel) */
uint addr_type; /* Type of address data stored in table (NET_*) */
int use_count; /* Number of protocols using this table */
u32 rt_count; /* Number of routes in the table */
- byte internal; /* Internal table of a protocol */
+ list imports; /* Registered route importers */
+ list exports; /* Registered route exporters */
struct hmap id_map;
struct hostcache *hostcache;
@@ -182,6 +184,7 @@ typedef struct rtable {
byte prune_state; /* Table prune state, 1 -> scheduled, 2-> running */
byte hcu_scheduled; /* Hostcache update is scheduled */
byte nhu_state; /* Next Hop Update state */
+ byte internal; /* This table is internal for some other object */
struct fib_iterator prune_fit; /* Rtable prune FIB iterator */
struct fib_iterator nhu_fit; /* Next Hop Update FIB iterator */
struct tbf rl_pipe; /* Rate limiting token buffer for pipe collisions */
@@ -238,7 +241,7 @@ typedef struct rte {
struct rta *attrs; /* Attributes of this route */
const net_addr *net; /* Network this RTE belongs to */
struct rte_src *src; /* Route source that created the route */
- struct channel *sender; /* Channel used to send the route to the routing table */
+ struct rt_import_hook *sender; /* Import hook used to send the route to the routing table */
btime lastmod; /* Last modified (set by table) */
u32 id; /* Table specific route id */
byte flags; /* Table-specific flags */
@@ -262,11 +265,126 @@ struct rte_storage {
#define REF_MODIFY 16 /* Route is scheduled for modify */
/* Route is valid for propagation (may depend on other flags in the future), accepts NULL */
-static inline int rte_is_valid(rte *r) { return r && !(r->flags & REF_FILTERED); }
+static inline int rte_is_valid(const rte *r) { return r && !(r->flags & REF_FILTERED); }
/* Route just has REF_FILTERED flag */
-static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED); }
+static inline int rte_is_filtered(const rte *r) { return !!(r->flags & REF_FILTERED); }
+
+
+/* Table-channel connections */
+
+struct rt_import_request {
+ struct rt_import_hook *hook; /* The table part of importer */
+ char *name;
+ u8 trace_routes;
+
+ void (*dump_req)(struct rt_import_request *req);
+ void (*log_state_change)(struct rt_import_request *req, u8 state);
+ /* Preimport is called when the @new route is just-to-be inserted, replacing @old.
+ * Return a route (may be different or modified in-place) to continue or NULL to withdraw. */
+ struct rte *(*preimport)(struct rt_import_request *req, struct rte *new, struct rte *old);
+ struct rte *(*rte_modify)(struct rte *, struct linpool *);
+};
+
+struct rt_import_hook {
+ node n;
+ rtable *table; /* The connected table */
+ struct rt_import_request *req; /* The requestor */
+
+ struct rt_import_stats {
+ /* Import - from protocol to core */
+ u32 pref; /* Number of routes selected as best in the (adjacent) routing table */
+ u32 updates_ignored; /* Number of route updates rejected as already in route table */
+ u32 updates_accepted; /* Number of route updates accepted and imported */
+ u32 withdraws_ignored; /* Number of route withdraws rejected as already not in route table */
+ u32 withdraws_accepted; /* Number of route withdraws accepted and processed */
+ } stats;
+
+ btime last_state_change; /* Time of last state transition */
+
+ u8 import_state; /* IS_* */
+
+ void (*stopped)(struct rt_import_request *); /* Stored callback when import is stopped */
+};
+
+struct rt_pending_export {
+ struct rte_storage *new, *new_best, *old, *old_best;
+};
+
+struct rt_export_request {
+ struct rt_export_hook *hook; /* Table part of the export */
+ char *name;
+ u8 trace_routes;
+
+ /* There are two methods of export. You can either request feeding every single change
+ * or feeding the whole route feed. In case of regular export, &export_one is preferred.
+ * Anyway, when feeding, &export_bulk is preferred, falling back to &export_one.
+ * Thus, for RA_OPTIMAL, &export_one is only set,
+ * for RA_MERGED and RA_ACCEPTED, &export_bulk is only set
+ * and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
+ */
+ void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
+ void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count);
+
+ void (*dump_req)(struct rt_export_request *req);
+ void (*log_state_change)(struct rt_export_request *req, u8);
+};
+
+struct rt_export_hook {
+ node n;
+ rtable *table; /* The connected table */
+
+ pool *pool;
+ linpool *lp;
+
+ struct rt_export_request *req; /* The requestor */
+
+ struct rt_export_stats {
+ /* Export - from core to protocol */
+ u32 updates_received; /* Number of route updates received */
+ u32 withdraws_received; /* Number of route withdraws received */
+ } stats;
+
+ struct fib_iterator feed_fit; /* Routing table iterator used during feeding */
+
+ btime last_state_change; /* Time of last state transition */
+
+ u8 refeed_pending; /* Refeeding and another refeed is scheduled */
+ u8 export_state; /* Route export state (TES_*, see below) */
+
+ struct event *event; /* Event running all the export operations */
+
+ void (*stopped)(struct rt_export_request *); /* Stored callback when export is stopped */
+};
+
+#define TIS_DOWN 0
+#define TIS_UP 1
+#define TIS_STOP 2
+#define TIS_FLUSHING 3
+#define TIS_WAITING 4
+#define TIS_CLEARED 5
+#define TIS_MAX 6
+
+#define TES_DOWN 0
+#define TES_HUNGRY 1
+#define TES_FEEDING 2
+#define TES_READY 3
+#define TES_STOP 4
+#define TES_MAX 5
+
+void rt_request_import(rtable *tab, struct rt_import_request *req);
+void rt_request_export(rtable *tab, struct rt_export_request *req);
+
+void rt_stop_import(struct rt_import_request *, void (*stopped)(struct rt_import_request *));
+void rt_stop_export(struct rt_export_request *, void (*stopped)(struct rt_export_request *));
+
+const char *rt_import_state_name(u8 state);
+const char *rt_export_state_name(u8 state);
+
+static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
+static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
+void rte_import(struct rt_import_request *req, const net_addr *net, rte *new, struct rte_src *src);
/* Types of route announcement, also used as flags */
#define RA_UNDEF 0 /* Undefined RA type */
@@ -281,6 +399,7 @@ static inline int rte_is_filtered(rte *r) { return !!(r->flags & REF_FILTERED);
#define RIC_REJECT -1 /* Rejected by protocol */
#define RIC_DROP -2 /* Silently dropped by protocol */
+#define rte_update channel_rte_import
/**
* rte_update - enter a new update to a routing table
* @c: channel doing the update
@@ -335,23 +454,24 @@ static inline net *net_get(rtable *tab, const net_addr *addr) { return (net *) f
void *net_route(rtable *tab, const net_addr *n);
int net_roa_check(rtable *tab, const net_addr *n, u32 asn);
int rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filter);
-rte *rt_export_merged_show(struct channel *c, net *n, linpool *pool);
-void rt_refresh_begin(rtable *t, struct channel *c);
-void rt_refresh_end(rtable *t, struct channel *c);
-void rt_modify_stale(rtable *t, struct channel *c);
+rte *rt_export_merged(struct channel *c, rte ** feed, uint count, linpool *pool, int silent);
+void rt_refresh_begin(rtable *t, struct rt_import_request *);
+void rt_refresh_end(rtable *t, struct rt_import_request *);
+void rt_modify_stale(rtable *t, struct rt_import_request *);
void rt_schedule_prune(rtable *t);
void rte_dump(struct rte_storage *);
void rte_free(struct rte_storage *, rtable *);
struct rte_storage *rte_store(const rte *, net *net, rtable *);
void rt_dump(rtable *);
void rt_dump_all(void);
-int rt_feed_channel(struct channel *c);
-void rt_feed_channel_abort(struct channel *c);
+void rt_dump_hooks(rtable *);
+void rt_dump_hooks_all(void);
int rt_reload_channel(struct channel *c);
void rt_reload_channel_abort(struct channel *c);
void rt_refeed_channel(struct channel *c);
void rt_prune_sync(rtable *t, int all);
-int rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old, struct rte_storage **old_exported);
+int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
+int rte_update_out(struct channel *c, const net_addr *n, rte *new, const rte *old, struct rte_storage **old_exported);
struct rtable_config *rt_new_table(struct symbol *s, uint addr_type);
@@ -377,6 +497,7 @@ struct rt_show_data {
struct channel *export_channel;
struct config *running_on_config;
struct krt_proto *kernel;
+ struct rt_export_hook *kernel_export_hook;
int export_mode, primary_only, filtered, stats, show_for;
int table_open; /* Iteration (fit) is open */
diff --git a/nest/rt-show.c b/nest/rt-show.c
index ffbc0a90..235d72e4 100644
--- a/nest/rt-show.c
+++ b/nest/rt-show.c
@@ -98,6 +98,29 @@ rt_show_rte(struct cli *c, byte *ia, rte *e, struct rt_show_data *d, int primary
rta_show(c, a);
}
+static uint
+rte_feed_count(net *n)
+{
+ uint count = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTE_OR_NULL(e)))
+ count++;
+ return count;
+}
+
+static void
+rte_feed_obtain(net *n, rte **feed, uint count)
+{
+ uint i = 0;
+ for (struct rte_storage *e = n->routes; e; e = e->next)
+ if (rte_is_valid(RTE_OR_NULL(e)))
+ {
+ ASSERT_DIE(i < count);
+ feed[i++] = &e->rte;
+ }
+ ASSERT_DIE(i == count);
+}
+
static void
rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
{
@@ -128,7 +151,7 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
struct rte e = er->rte;
/* Export channel is down, do not try to export routes to it */
- if (ec && (ec->export_state == ES_DOWN))
+ if (ec && !ec->out_req.hook)
goto skip;
if (d->export_mode == RSEM_EXPORTED)
@@ -143,7 +166,14 @@ rt_show_net(struct cli *c, net *n, struct rt_show_data *d)
{
/* Special case for merged export */
pass = 1;
- rte *em = rt_export_merged_show(ec, n, c->show_pool);
+ uint count = rte_feed_count(n);
+ if (!count)
+ goto skip;
+
+ rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(n, feed, count);
+ rte *em = rt_export_merged(ec, feed, count, c->show_pool, 1);
+
if (em)
e = *em;
else
@@ -315,7 +345,7 @@ rt_show_get_default_tables(struct rt_show_data *d)
{
WALK_LIST(c, d->export_protocol->channels)
{
- if (c->export_state == ES_DOWN)
+ if (!c->out_req.hook)
continue;
tab = rt_show_add_table(d, c->table);
diff --git a/nest/rt-table.c b/nest/rt-table.c
index 66e63acf..e7ff2816 100644
--- a/nest/rt-table.c
+++ b/nest/rt-table.c
@@ -50,13 +50,7 @@ pool *rt_table_pool;
static linpool *rte_update_pool;
list routing_tables;
-
-struct rt_pending_export {
- struct rte_storage *new; /* New route */
- struct rte_storage *new_best; /* New best route */
- struct rte_storage *old; /* Old route */
- struct rte_storage *old_best; /* Old best route */
-};
+list deleted_routing_tables;
static void rt_free_hostcache(rtable *tab);
static void rt_notify_hostcache(rtable *tab, net *net);
@@ -64,6 +58,40 @@ static void rt_update_hostcache(rtable *tab);
static void rt_next_hop_update(rtable *tab);
static inline void rt_prune_table(rtable *tab);
static inline void rt_schedule_notify(rtable *tab);
+static void rt_feed_channel(void *);
+
+const char *rt_import_state_name_array[TIS_MAX] = {
+ [TIS_DOWN] = "DOWN",
+ [TIS_UP] = "UP",
+ [TIS_STOP] = "STOP",
+ [TIS_FLUSHING] = "FLUSHING",
+ [TIS_WAITING] = "WAITING",
+ [TIS_CLEARED] = "CLEARED",
+};
+
+const char *rt_export_state_name_array[TES_MAX] = {
+ [TES_DOWN] = "DOWN",
+ [TES_HUNGRY] = "HUNGRY",
+ [TES_FEEDING] = "FEEDING",
+ [TES_READY] = "READY",
+ [TES_STOP] = "STOP"
+};
+
+const char *rt_import_state_name(u8 state)
+{
+ if (state >= TIS_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_import_state_name_array[state];
+}
+
+const char *rt_export_state_name(u8 state)
+{
+ if (state >= TES_MAX)
+ return "!! INVALID !!";
+ else
+ return rt_export_state_name_array[state];
+}
/* Like fib_route(), but skips empty net entries */
@@ -356,26 +384,43 @@ rte_mergable(rte *pri, rte *sec)
}
static void
-rte_trace(struct channel *c, rte *e, int dir, const char *msg)
+rte_trace(const char *name, const rte *e, int dir, const char *msg)
{
- log(L_TRACE "%s.%s %c %s %N %uL %uG %s",
- c->proto->name, c->name ?: "?", dir, msg, e->net, e->src->private_id, e->src->global_id,
+ log(L_TRACE "%s %c %s %N %uL %uG %s",
+ name, dir, msg, e->net, e->src->private_id, e->src->global_id,
rta_dest_name(e->attrs->dest));
}
static inline void
-rte_trace_in(uint flag, struct channel *c, rte *e, const char *msg)
+channel_rte_trace_in(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
- rte_trace(c, e, '>', msg);
+ rte_trace(c->in_req.name, e, '>', msg);
}
static inline void
-rte_trace_out(uint flag, struct channel *c, rte *e, const char *msg)
+channel_rte_trace_out(uint flag, struct channel *c, const rte *e, const char *msg)
{
if ((c->debug & flag) || (c->proto->debug & flag))
- rte_trace(c, e, '<', msg);
+ rte_trace(c->out_req.name, e, '<', msg);
+}
+
+static inline void
+rt_rte_trace_in(uint flag, struct rt_import_request *req, const rte *e, const char *msg)
+{
+ if (req->trace_routes & flag)
+ rte_trace(req->name, e, '>', msg);
+}
+
+#if 0
+// seems to be unused at all
+static inline void
+rt_rte_trace_out(uint flag, struct rt_export_request *req, const rte *e, const char *msg)
+{
+ if (req->trace_routes & flag)
+ rte_trace(req->name, e, '<', msg);
}
+#endif
static uint
rte_feed_count(net *n)
@@ -405,7 +450,7 @@ export_filter_(struct channel *c, rte *rt, linpool *pool, int silent)
{
struct proto *p = c->proto;
const struct filter *filter = c->out_filter;
- struct export_stats *stats = &c->export_stats;
+ struct channel_export_stats *stats = &c->export_stats;
/* Do nothing if we have already rejected the route */
if (silent && bmap_test(&c->export_reject_map, rt->id))
@@ -419,14 +464,14 @@ export_filter_(struct channel *c, rte *rt, linpool *pool, int silent)
stats->updates_rejected++;
if (v == RIC_REJECT)
- rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
+ channel_rte_trace_out(D_FILTERS, c, rt, "rejected by protocol");
goto reject_noset;
}
if (v > 0)
{
if (!silent)
- rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
+ channel_rte_trace_out(D_FILTERS, c, rt, "forced accept by protocol");
goto accept;
}
@@ -439,7 +484,7 @@ export_filter_(struct channel *c, rte *rt, linpool *pool, int silent)
goto reject;
stats->updates_filtered++;
- rte_trace_out(D_FILTERS, c, rt, "filtered out");
+ channel_rte_trace_out(D_FILTERS, c, rt, "filtered out");
goto reject;
}
@@ -453,7 +498,6 @@ export_filter_(struct channel *c, rte *rt, linpool *pool, int silent)
bmap_set(&c->export_reject_map, rt->id);
reject_noset:
- /* Invalidate the route */
/* Discard temporary rte */
return NULL;
}
@@ -465,19 +509,19 @@ export_filter(struct channel *c, rte *rt, int silent)
}
static void
-do_rt_notify(struct channel *c, const net_addr *net, rte *new, rte *old, int refeed)
+do_rt_notify(struct channel *c, const net_addr *net, rte *new, const rte *old)
{
struct proto *p = c->proto;
- struct export_stats *stats = &c->export_stats;
+ struct channel_export_stats *stats = &c->export_stats;
- if (refeed && new)
+ if (c->refeeding && new)
c->refeed_count++;
if (!old && new)
if (CHANNEL_LIMIT_PUSH(c, OUT))
{
stats->updates_rejected++;
- rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
+ channel_rte_trace_out(D_FILTERS, c, new, "rejected [limit]");
return;
}
@@ -490,7 +534,7 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, rte *old, int ref
{
if (!rte_update_out(c, net, new, old, &old_exported))
{
- rte_trace_out(D_ROUTES, c, new, "idempotent");
+ channel_rte_trace_out(D_ROUTES, c, new, "idempotent");
return;
}
}
@@ -509,11 +553,11 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, rte *old, int ref
if (p->debug & D_ROUTES)
{
if (new && old)
- rte_trace_out(D_ROUTES, c, new, "replaced");
+ channel_rte_trace_out(D_ROUTES, c, new, "replaced");
else if (new)
- rte_trace_out(D_ROUTES, c, new, "added");
+ channel_rte_trace_out(D_ROUTES, c, new, "added");
else if (old)
- rte_trace_out(D_ROUTES, c, old, "removed");
+ channel_rte_trace_out(D_ROUTES, c, old, "removed");
}
p->rt_notify(p, c, net, new, old_exported ? &old_exported->rte : old);
@@ -523,14 +567,9 @@ do_rt_notify(struct channel *c, const net_addr *net, rte *new, rte *old, int ref
}
static void
-rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old, int refeed)
+rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old)
{
if (new)
- c->export_stats.updates_received++;
- else
- c->export_stats.withdraws_received++;
-
- if (new)
new = export_filter(c, new, 0);
if (old && !bmap_test(&c->export_map, old->id))
@@ -539,14 +578,17 @@ rt_notify_basic(struct channel *c, const net_addr *net, rte *new, rte *old, int
if (!new && !old)
return;
- do_rt_notify(c, net, new, old, refeed);
+ do_rt_notify(c, net, new, old);
}
-static void
-rt_notify_accepted(struct channel *c, const net_addr *n, struct rt_pending_export *rpe,
- struct rte **feed, uint count, int refeed)
+void
+rt_notify_accepted(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe,
+ struct rte **feed, uint count)
{
- rte nb0, *new_best = NULL, *old_best = NULL;
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+
+ rte nb0, *new_best = NULL;
+ const rte *old_best = NULL;
for (uint i = 0; i < count; i++)
{
@@ -554,7 +596,7 @@ rt_notify_accepted(struct channel *c, const net_addr *n, struct rt_pending_expor
continue;
/* Has been already rejected, won't bother with it */
- if (!refeed && bmap_test(&c->export_reject_map, feed[i]->id))
+ if (!c->refeeding && bmap_test(&c->export_reject_map, feed[i]->id))
continue;
/* Previously exported */
@@ -564,7 +606,7 @@ rt_notify_accepted(struct channel *c, const net_addr *n, struct rt_pending_expor
if (!new_best)
{
DBG("rt_notify_accepted: idempotent\n");
- return;
+ goto done;
}
/* is superseded */
@@ -604,10 +646,16 @@ rt_notify_accepted(struct channel *c, const net_addr *n, struct rt_pending_expor
if (!new_best && !old_best)
{
DBG("rt_notify_accepted: nothing to export\n");
- return;
+ goto done;
}
- do_rt_notify(c, n, new_best, old_best, refeed);
+ do_rt_notify(c, n, new_best, old_best);
+
+done:
+ /* Drop the old stored rejection if applicable.
+ * new->id == old->id happens when updating hostentries. */
+ if (rpe && rpe->old && (!rpe->new || (rpe->new->rte.id != rpe->old->rte.id)))
+ bmap_clear(&c->export_reject_map, rpe->old->rte.id);
}
@@ -617,20 +665,21 @@ nexthop_merge_rta(struct nexthop *nhs, rta *a, linpool *pool, int max)
return nexthop_merge(nhs, &(a->nh), 1, 0, max, pool);
}
-static rte *
-rt_export_merged(struct channel *c, struct rte **feed, uint count, linpool *pool, int silent, int refeed)
+rte *
+rt_export_merged(struct channel *c, struct rte **feed, uint count, linpool *pool, int silent)
{
_Thread_local static rte rloc;
// struct proto *p = c->proto;
struct nexthop *nhs = NULL;
- rte *best0 = feed[0], *best = NULL;
+ rte *best0 = feed[0];
+ rte *best = NULL;
if (!rte_is_valid(best0))
return NULL;
/* Already rejected, no need to re-run the filter */
- if (!refeed && bmap_test(&c->export_reject_map, best0->id))
+ if (!c->refeeding && bmap_test(&c->export_reject_map, best0->id))
return NULL;
rloc = *best0;
@@ -672,19 +721,12 @@ rt_export_merged(struct channel *c, struct rte **feed, uint count, linpool *pool
return best;
}
-rte *
-rt_export_merged_show(struct channel *c, net *n, linpool *pool)
+void
+rt_notify_merged(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe,
+ struct rte **feed, uint count)
{
- uint count = rte_feed_count(n);
- rte **feed = alloca(count * sizeof(rte *));
- rte_feed_obtain(n, feed, count);
- return rt_export_merged(c, feed, count, pool, 1, 0);
-}
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
-static void
-rt_notify_merged(struct channel *c, const net_addr *n, struct rt_pending_export *rpe,
- struct rte **feed, uint count, int refeed)
-{
// struct proto *p = c->proto;
#if 0 /* TODO: Find whether this check is possible when processing multiple changes at once. */
@@ -724,28 +766,59 @@ rt_notify_merged(struct channel *c, const net_addr *n, struct rt_pending_export
*/
/* Prepare new merged route */
- rte *new_merged = count ? rt_export_merged(c, feed, count, rte_update_pool, 0, refeed) : NULL;
+ rte *new_merged = count ? rt_export_merged(c, feed, count, rte_update_pool, 0) : NULL;
- if (!new_merged && !old_best)
- return;
+ if (new_merged || old_best)
+ do_rt_notify(c, n, new_merged, old_best);
- do_rt_notify(c, n, new_merged, old_best, refeed);
+ /* Drop the old stored rejection if applicable.
+ * new->id == old->id happens when updating hostentries. */
+ if (rpe && rpe->old && (!rpe->new || (rpe->new->rte.id != rpe->old->rte.id)))
+ bmap_clear(&c->export_reject_map, rpe->old->rte.id);
}
-static void
-rt_notify_bulk(struct channel *c, const net_addr *n, struct rt_pending_export *rpe,
- struct rte **feed, uint count, int refeed)
+void
+rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ rte n0;
+
+ if (rpe->new_best != rpe->old_best)
+ rt_notify_basic(c, net, RTE_COPY(rpe->new_best, &n0), RTE_OR_NULL(rpe->old_best));
+
+ /* Drop the old stored rejection if applicable.
+ * new->id == old->id happens when updating hostentries. */
+ if (rpe->old && (!rpe->new || (rpe->new->rte.id != rpe->old->rte.id)))
+ bmap_clear(&c->export_reject_map, rpe->old->rte.id);
+}
+
+void
+rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe)
{
- switch (c->ra_mode)
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ rte n0;
+
+ if (rpe->new != rpe->old)
+ rt_notify_basic(c, net, RTE_COPY(rpe->new, &n0), RTE_OR_NULL(rpe->old));
+
+ /* Drop the old stored rejection if applicable.
+ * new->id == old->id happens when updating hostentries. */
+ if (rpe->old && (!rpe->new || (rpe->new->rte.id != rpe->old->rte.id)))
+ bmap_clear(&c->export_reject_map, rpe->old->rte.id);
+}
+
+void
+rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, rte **feed, uint count)
+{
+ struct channel *c = SKIP_BACK(struct channel, out_req, req);
+
+ for (uint i=0; i<count; i++)
{
- case RA_ACCEPTED:
- return rt_notify_accepted(c, n, rpe, feed, count, refeed);
- case RA_MERGED:
- return rt_notify_merged(c, n, rpe, feed, count, refeed);
+ rte n0 = *feed[i];
+ rt_notify_basic(c, net, &n0, NULL);
}
}
-
/**
* rte_announce - announce a routing table change
* @tab: table the route has been added to
@@ -799,9 +872,9 @@ rte_announce(rtable *tab, net *net, struct rte_storage *new, struct rte_storage
if (new_best != old_best)
{
if (new_best)
- new_best->rte.sender->import_stats.pref++;
+ new_best->rte.sender->stats.pref++;
if (old_best)
- old_best->rte.sender->import_stats.pref--;
+ old_best->rte.sender->stats.pref--;
if (tab->hostcache)
rt_notify_hostcache(tab, net);
@@ -809,46 +882,37 @@ rte_announce(rtable *tab, net *net, struct rte_storage *new, struct rte_storage
rt_schedule_notify(tab);
- struct channel *c; node *n;
- WALK_LIST2(c, n, tab->channels, table_node)
+ struct rt_pending_export rpe = { .new = new, .old = old, .new_best = new_best, .old_best = old_best };
+ uint count = rte_feed_count(net);
+ rte **feed = NULL;
+ if (count)
{
- if (c->export_state == ES_DOWN)
- continue;
-
- rte n0;
- switch (c->ra_mode)
- {
- case RA_OPTIMAL:
- if (new_best != old_best)
- rt_notify_basic(c, net->n.addr, RTE_COPY(new_best, &n0), RTE_OR_NULL(old_best), 0);
- break;
+ feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(net, feed, count);
+ }
- case RA_ANY:
- if (new != old)
- rt_notify_basic(c, net->n.addr, RTE_COPY(new, &n0), RTE_OR_NULL(old), 0);
- break;
+ struct rt_export_hook *eh;
+ WALK_LIST(eh, tab->exports)
+ {
+ if (eh->export_state == TES_STOP)
+ continue;
- case RA_ACCEPTED:
- case RA_MERGED:
- {
- struct rt_pending_export rpe = { .new = new, .old = old, .new_best = new_best, .old_best = old_best };
- uint count = rte_feed_count(net);
- rte **feed = alloca(count * sizeof(rte *));
- rte_feed_obtain(net, feed, count);
- rt_notify_bulk(c, net->n.addr, &rpe, feed, count, 0);
- break;
- }
- }
+ if (new)
+ eh->stats.updates_received++;
+ else
+ eh->stats.withdraws_received++;
- /* Drop the old stored rejection if applicable.
- * new->id == old->id happens when updating hostentries. */
- if (old && (!new || (new->rte.id != old->rte.id)))
- bmap_clear(&c->export_reject_map, old->rte.id);
+ if (eh->req->export_one)
+ eh->req->export_one(eh->req, net->n.addr, &rpe);
+ else if (eh->req->export_bulk)
+ eh->req->export_bulk(eh->req, net->n.addr, &rpe, feed, count);
+ else
+ bug("Export request must always provide an export method");
}
}
static inline int
-rte_validate(rte *e)
+rte_validate(struct channel *ch, rte *e)
{
int c;
const net_addr *n = e->net;
@@ -856,7 +920,7 @@ rte_validate(rte *e)
if (!net_validate(n))
{
log(L_WARN "Ignoring bogus prefix %N received via %s",
- n, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
@@ -866,21 +930,21 @@ rte_validate(rte *e)
if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK))
{
log(L_WARN "Ignoring bogus route %N received via %s",
- n, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
if (net_type_match(n, NB_DEST) == !e->attrs->dest)
{
log(L_WARN "Ignoring route %N with invalid dest %d received via %s",
- n, e->attrs->dest, e->sender->proto->name);
+ n, e->attrs->dest, ch->proto->name);
return 0;
}
if ((e->attrs->dest == RTD_UNICAST) && !nexthop_is_sorted(&(e->attrs->nh)))
{
log(L_WARN "Ignoring unsorted multipath route %N received via %s",
- n, e->sender->proto->name);
+ n, ch->proto->name);
return 0;
}
@@ -901,11 +965,11 @@ rte_same(rte *x, rte *y)
static inline int rte_is_ok(rte *e) { return e && !rte_is_filtered(e); }
static void
-rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
+rte_recalculate(struct rt_import_hook *c, net *net, rte *new, struct rte_src *src)
{
- struct proto *p = c->proto;
+ struct rt_import_request *req = c->req;
struct rtable *table = c->table;
- struct import_stats *stats = &c->import_stats;
+ struct rt_import_stats *stats = &c->stats;
struct rte_storage *old_best_stored = net->routes, *old_stored = NULL;
rte *old_best = old_best_stored ? &old_best_stored->rte : NULL;
rte *old = NULL;
@@ -922,7 +986,7 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
* source protocol to this routing table through transparent
* pipes, which is not allowed.
* We log that and ignore the route. */
- if (old->sender->proto != p)
+ if (old->sender != c)
{
if (!old->generation && !new->generation)
bug("Two protocols claim to author a route with the same rte_src in table %s: %N %s/%u:%u",
@@ -930,8 +994,6 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
log_rl(&table->rl_pipe, L_ERR "Route source collision in table %s: %N %s/%u:%u",
c->table->name, net->n.addr, old->src->proto->name, old->src->private_id, old->src->global_id);
-
- return;
}
if (new && rte_same(old, new))
@@ -943,13 +1005,10 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
if (!rte_is_filtered(new))
{
stats->updates_ignored++;
- rte_trace_in(D_ROUTES, c, new, "ignored");
+ rt_rte_trace_in(D_ROUTES, req, new, "ignored");
}
-
- return;
}
-
*before_old = (*before_old)->next;
table->rt_count--;
}
@@ -960,57 +1019,12 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
return;
}
+ if (req->preimport)
+ new = req->preimport(req, new, old);
+
int new_ok = rte_is_ok(new);
int old_ok = rte_is_ok(old);
- if (!c->in_table)
- {
- if (!old && new)
- if (CHANNEL_LIMIT_PUSH(c, RX))
- {
- /* In receive limit the situation is simple, old is NULL so
- we just free new and exit like nothing happened */
-
- stats->updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
- return;
- }
-
- if (old && !new)
- CHANNEL_LIMIT_POP(c, RX);
- }
-
- if (!old_ok && new_ok)
- if (CHANNEL_LIMIT_PUSH(c, IN))
- {
- /* In import limit the situation is more complicated. We
- shouldn't just drop the route, we should handle it like
- it was filtered. We also have to continue the route
- processing if old or new is non-NULL, but we should exit
- if both are NULL as this case is probably assumed to be
- already handled. */
-
- stats->updates_ignored++;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
-
- if (c->in_keep_filtered)
- new->flags |= REF_FILTERED;
- else
- new = NULL;
-
- /* Note that old && !new could be possible when
- c->in_keep_filtered changed in the recent past. */
-
- if (!old && !new)
- return;
-
- new_ok = 0;
- goto skip_stats1;
- }
-
- if (old_ok && !new_ok)
- CHANNEL_LIMIT_POP(c, IN);
-
if (new_ok)
stats->updates_accepted++;
else if (old_ok)
@@ -1021,7 +1035,6 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
if (old_ok || new_ok)
table->last_rt_change = current_time();
- skip_stats1:;
struct rte_storage *new_stored = new ? rte_store(new, net, table) : NULL;
if (table->config->sorted)
@@ -1126,19 +1139,16 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
}
/* Log the route change */
- if ((c->debug & D_ROUTES) || (p->debug & D_ROUTES))
+ if (new_ok)
+ rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, new_stored == net->routes ? "added [best]" : "added");
+ else if (old_ok)
{
- if (new_ok)
- rte_trace(c, &new_stored->rte, '>', new_stored == net->routes ? "added [best]" : "added");
- else if (old_ok)
- {
- if (old != old_best)
- rte_trace(c, old, '>', "removed");
- else if (net->routes && rte_is_ok(&net->routes->rte))
- rte_trace(c, old, '>', "removed [replaced]");
- else
- rte_trace(c, old, '>', "removed [sole]");
- }
+ if (old != old_best)
+ rt_rte_trace_in(D_ROUTES, req, old, "removed");
+ else if (net->routes && rte_is_ok(&net->routes->rte))
+ rt_rte_trace_in(D_ROUTES, req, old, "removed [replaced]");
+ else
+ rt_rte_trace_in(D_ROUTES, req, old, "removed [sole]");
}
/* Propagate the route change */
@@ -1150,10 +1160,13 @@ rte_recalculate(struct channel *c, net *net, rte *new, struct rte_src *src)
(table->gc_time + table->config->gc_min_time <= current_time()))
rt_schedule_prune(table);
+#if 0
+ /* Enable and reimplement these callbacks if anybody wants to use them */
if (old_ok && p->rte_remove)
p->rte_remove(net, old);
if (new_ok && p->rte_insert)
p->rte_insert(net, &new_stored->rte);
+#endif
if (old)
{
@@ -1179,89 +1192,116 @@ rte_update_unlock(void)
lp_flush(rte_update_pool);
}
-static int rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
+rte *
+channel_preimport(struct rt_import_request *req, rte *new, rte *old)
+{
+ struct channel *c = SKIP_BACK(struct channel, in_req, req);
+
+ if (new && !old)
+ if (CHANNEL_LIMIT_PUSH(c, RX))
+ return NULL;
+
+ if (!new && old)
+ CHANNEL_LIMIT_POP(c, RX);
+
+ int new_in = new && !rte_is_filtered(new);
+ int old_in = old && !rte_is_filtered(old);
+
+ if (new_in && !old_in)
+ if (CHANNEL_LIMIT_PUSH(c, IN))
+ if (c->in_keep_filtered)
+ {
+ new->flags |= REF_FILTERED;
+ return new;
+ }
+ else
+ return NULL;
+
+ if (!new_in && old_in)
+ CHANNEL_LIMIT_POP(c, IN);
+
+ return new;
+}
+
+static void rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src);
void
rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{
+ if (!c->in_req.hook)
+ return;
+
+ ASSERT(c->channel_state == CS_UP);
+
if (c->in_table && !rte_update_in(c, n, new, src))
return;
- struct import_stats *stats = &c->import_stats;
- const struct filter *filter = c->in_filter;
- net *nn;
+ return rte_update_direct(c, n, new, src);
+}
- ASSERT(c->channel_state == CS_UP);
+static void
+rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
+{
+ const struct filter *filter = c->in_filter;
+ struct channel_import_stats *stats = &c->import_stats;
rte_update_lock();
if (new)
{
new->net = n;
- new->sender = c;
+
+ int fr;
stats->updates_received++;
- if (!rte_validate(new))
+ if (!rte_validate(c, new))
{
- rte_trace_in(D_FILTERS, c, new, "invalid");
+ channel_rte_trace_in(D_FILTERS, c, new, "invalid");
stats->updates_invalid++;
- goto drop;
+ new = NULL;
}
-
- if (filter == FILTER_REJECT)
+ else if ((filter == FILTER_REJECT) ||
+ ((fr = f_run(filter, new, rte_update_pool, 0)) > F_ACCEPT))
{
stats->updates_filtered++;
- rte_trace_in(D_FILTERS, c, new, "filtered out");
+ channel_rte_trace_in(D_FILTERS, c, new, "filtered out");
- if (! c->in_keep_filtered)
- goto drop;
-
- /* new is a private copy, i could modify it */
- new->flags |= REF_FILTERED;
+ if (c->in_keep_filtered)
+ new->flags |= REF_FILTERED;
+ else
+ new = NULL;
}
- else if (filter)
- {
- int fr = f_run(filter, new, rte_update_pool, 0);
- if (fr > F_ACCEPT)
- {
- stats->updates_filtered++;
- rte_trace_in(D_FILTERS, c, new, "filtered out");
+ }
+ else
+ stats->withdraws_received++;
- if (! c->in_keep_filtered)
- goto drop;
+ rte_import(&c->in_req, n, new, src);
- new->flags |= REF_FILTERED;
- }
- }
+ rte_update_unlock();
+}
+void
+rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rte_src *src)
+{
+ struct rt_import_hook *hook = req->hook;
+ if (!hook)
+ return;
+
+ net *nn;
+ if (new)
+ {
/* Use the actual struct network, not the dummy one */
- nn = net_get(c->table, n);
+ nn = net_get(hook->table, n);
new->net = nn->n.addr;
+ new->sender = hook;
}
- else
+ else if (!(nn = net_find(hook->table, n)))
{
- stats->withdraws_received++;
-
- if (!(nn = net_find(c->table, n)) || !src)
- {
- stats->withdraws_ignored++;
- rte_update_unlock();
- return;
- }
+ req->hook->stats.withdraws_ignored++;
+ return;
}
- recalc:
/* And recalculate the best route */
- rte_recalculate(c, nn, new, src);
-
- rte_update_unlock();
- return;
-
- drop:
- new = NULL;
- if (nn = net_find(c->table, n))
- goto recalc;
-
- rte_update_unlock();
+ rte_recalculate(hook, nn, new, src);
}
/* Independent call to rte_announce(), used from next hop
@@ -1289,7 +1329,7 @@ rte_modify(net *net, rte *old)
{
rte_update_lock();
- rte *new = old->sender->proto->rte_modify(old, rte_update_pool);
+ rte *new = old->sender->req->rte_modify(old, rte_update_pool);
if (new != old)
{
if (new)
@@ -1327,6 +1367,127 @@ rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filte
return v > 0;
}
+static void
+rt_export_stopped(void *data)
+{
+ struct rt_export_hook *hook = data;
+ rtable *tab = hook->table;
+
+ /* Unlist */
+ rem_node(&hook->n);
+
+ /* Reporting the channel as stopped. */
+ hook->stopped(hook->req);
+
+ /* Freeing the hook together with its coroutine. */
+ rfree(hook->pool);
+ rt_unlock_table(tab);
+
+ DBG("Export hook %p in table %s finished uc=%u\n", hook, tab->name, tab->use_count);
+}
+
+
+static inline void
+rt_set_import_state(struct rt_import_hook *hook, u8 state)
+{
+ hook->last_state_change = current_time();
+ hook->import_state = state;
+
+ if (hook->req->log_state_change)
+ hook->req->log_state_change(hook->req, state);
+}
+
+static inline void
+rt_set_export_state(struct rt_export_hook *hook, u8 state)
+{
+ hook->last_state_change = current_time();
+ hook->export_state = state;
+
+ if (hook->req->log_state_change)
+ hook->req->log_state_change(hook->req, state);
+}
+
+void
+rt_request_import(rtable *tab, struct rt_import_request *req)
+{
+ rt_lock_table(tab);
+
+ struct rt_import_hook *hook = req->hook = mb_allocz(tab->rp, sizeof(struct rt_import_hook));
+
+ DBG("Lock table %s for import %p req=%p uc=%u\n", tab->name, hook, req, tab->use_count);
+
+ hook->req = req;
+ hook->table = tab;
+
+ rt_set_import_state(hook, TIS_UP);
+
+ hook->n = (node) {};
+ add_tail(&tab->imports, &hook->n);
+}
+
+void
+rt_stop_import(struct rt_import_request *req, void (*stopped)(struct rt_import_request *))
+{
+ ASSERT_DIE(req->hook);
+ struct rt_import_hook *hook = req->hook;
+
+ rt_schedule_prune(hook->table);
+
+ rt_set_import_state(hook, TIS_STOP);
+
+ hook->stopped = stopped;
+}
+
+void
+rt_request_export(rtable *tab, struct rt_export_request *req)
+{
+ rt_lock_table(tab);
+
+ pool *p = rp_new(tab->rp, "Export hook");
+ struct rt_export_hook *hook = req->hook = mb_allocz(p, sizeof(struct rt_export_hook));
+ hook->pool = p;
+ hook->lp = lp_new_default(p);
+
+ hook->req = req;
+ hook->table = tab;
+
+ /* stats zeroed by mb_allocz */
+
+ rt_set_export_state(hook, TES_HUNGRY);
+
+ hook->n = (node) {};
+ add_tail(&tab->exports, &hook->n);
+
+ FIB_ITERATE_INIT(&hook->feed_fit, &tab->fib);
+
+ DBG("New export hook %p req %p in table %s uc=%u\n", hook, req, tab->name, tab->use_count);
+
+ rt_set_export_state(hook, TES_FEEDING);
+
+ hook->event = ev_new_init(p, rt_feed_channel, hook);
+ ev_schedule_work(hook->event);
+}
+
+void
+rt_stop_export(struct rt_export_request *req, void (*stopped)(struct rt_export_request *))
+{
+ ASSERT_DIE(req->hook);
+ struct rt_export_hook *hook = req->hook;
+
+ rtable *tab = hook->table;
+
+ /* Stop feeding */
+ ev_postpone(hook->event);
+
+ if (hook->export_state == TES_FEEDING)
+ fit_get(&tab->fib, &hook->feed_fit);
+
+ hook->event->hook = rt_export_stopped;
+ hook->stopped = stopped;
+
+ rt_set_export_state(hook, TES_STOP);
+ ev_schedule(hook->event);
+}
/**
* rt_refresh_begin - start a refresh cycle
@@ -1343,12 +1504,12 @@ rt_examine(rtable *t, net_addr *a, struct channel *c, const struct filter *filte
* flag in rt_refresh_end() and then removing such routes in the prune loop.
*/
void
-rt_refresh_begin(rtable *t, struct channel *c)
+rt_refresh_begin(rtable *t, struct rt_import_request *req)
{
FIB_WALK(&t->fib, net, n)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
- if (e->rte.sender == c)
+ if (e->rte.sender == req->hook)
e->rte.flags |= REF_STALE;
}
FIB_WALK_END;
@@ -1363,14 +1524,14 @@ rt_refresh_begin(rtable *t, struct channel *c)
* hook. See rt_refresh_begin() for description of refresh cycles.
*/
void
-rt_refresh_end(rtable *t, struct channel *c)
+rt_refresh_end(rtable *t, struct rt_import_request *req)
{
int prune = 0;
FIB_WALK(&t->fib, net, n)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
- if ((e->rte.sender == c) && (e->rte.flags & REF_STALE))
+ if ((e->rte.sender == req->hook) && (e->rte.flags & REF_STALE))
{
e->rte.flags |= REF_DISCARD;
prune = 1;
@@ -1383,14 +1544,14 @@ rt_refresh_end(rtable *t, struct channel *c)
}
void
-rt_modify_stale(rtable *t, struct channel *c)
+rt_modify_stale(rtable *t, struct rt_import_request *req)
{
int prune = 0;
FIB_WALK(&t->fib, net, n)
{
for (struct rte_storage *e = n->routes; e; e = e->next)
- if ((e->rte.sender == c) && (e->rte.flags & REF_STALE) && !(e->rte.flags & REF_FILTERED))
+ if ((e->rte.sender == req->hook) && (e->rte.flags & REF_STALE) && !(e->rte.flags & REF_FILTERED))
{
e->rte.flags |= REF_MODIFY;
prune = 1;
@@ -1426,7 +1587,7 @@ rte_dump(struct rte_storage *e)
void
rt_dump(rtable *t)
{
- debug("Dump of routing table <%s>\n", t->name);
+ debug("Dump of routing table <%s>%s\n", t->name, t->deleted ? " (deleted)" : "");
#ifdef DEBUGGING
fib_check(&t->fib);
#endif
@@ -1452,6 +1613,54 @@ rt_dump_all(void)
WALK_LIST2(t, n, routing_tables, n)
rt_dump(t);
+
+ WALK_LIST2(t, n, deleted_routing_tables, n)
+ rt_dump(t);
+}
+
+void
+rt_dump_hooks(rtable *tab)
+{
+ debug("Dump of hooks in routing table <%s>%s\n", tab->name, tab->deleted ? " (deleted)" : "");
+ debug(" nhu_state=%u hcu_scheduled=%u use_count=%d rt_count=%u\n",
+ tab->nhu_state, tab->hcu_scheduled, tab->use_count, tab->rt_count);
+ debug(" last_rt_change=%t gc_time=%t gc_counter=%d prune_state=%u\n",
+ tab->last_rt_change, tab->gc_time, tab->gc_counter, tab->prune_state);
+
+ struct rt_import_hook *ih;
+ WALK_LIST(ih, tab->imports)
+ {
+ ih->req->dump_req(ih->req);
+ debug(" Import hook %p requested by %p: pref=%u"
+ " last_state_change=%t import_state=%u stopped=%p\n",
+ ih, ih->req, ih->stats.pref,
+ ih->last_state_change, ih->import_state, ih->stopped);
+ }
+
+ struct rt_export_hook *eh;
+ WALK_LIST(eh, tab->exports)
+ {
+ eh->req->dump_req(eh->req);
+ debug(" Export hook %p requested by %p:"
+ " refeed_pending=%u last_state_change=%t export_state=%u stopped=%p\n",
+ eh, eh->req, eh->refeed_pending, eh->last_state_change, eh->export_state, eh->stopped);
+ }
+ debug("\n");
+}
+
+void
+rt_dump_hooks_all(void)
+{
+ rtable *t;
+ node *n;
+
+ debug("Dump of all table hooks\n");
+
+ WALK_LIST2(t, n, routing_tables, n)
+ rt_dump_hooks(t);
+
+ WALK_LIST2(t, n, deleted_routing_tables, n)
+ rt_dump_hooks(t);
}
static inline void
@@ -1569,6 +1778,7 @@ rt_subscribe(rtable *tab, struct rt_subscription *s)
{
s->tab = tab;
rt_lock_table(tab);
+ DBG("rt_subscribe(%s)\n", tab->name);
add_tail(&tab->subscribers, &s->n);
}
@@ -1645,7 +1855,8 @@ rt_setup(pool *pp, struct rtable_config *cf)
if (!(t->internal = cf->internal))
{
- init_list(&t->channels);
+ init_list(&t->imports);
+ init_list(&t->exports);
hmap_init(&t->id_map, p, 1024);
hmap_set(&t->id_map, 0);
@@ -1673,6 +1884,7 @@ rt_init(void)
rt_table_pool = rp_new(&root_pool, "Routing tables");
rte_update_pool = lp_new_default(rt_table_pool);
init_list(&routing_tables);
+ init_list(&deleted_routing_tables);
}
@@ -1696,7 +1908,7 @@ rt_prune_table(rtable *tab)
struct fib_iterator *fit = &tab->prune_fit;
int limit = 512;
- struct channel *c;
+ struct rt_import_hook *ih;
node *n, *x;
DBG("Pruning route table %s\n", tab->name);
@@ -1710,9 +1922,9 @@ rt_prune_table(rtable *tab)
if (tab->prune_state == 1)
{
/* Mark channels to flush */
- WALK_LIST2(c, n, tab->channels, table_node)
- if (c->channel_state == CS_FLUSHING)
- c->flush_active = 1;
+ WALK_LIST2(ih, n, tab->imports, n)
+ if (ih->import_state == TIS_STOP)
+ rt_set_import_state(ih, TIS_FLUSHING);
FIB_ITERATE_INIT(fit, &tab->fib);
tab->prune_state = 2;
@@ -1724,7 +1936,7 @@ again:
rescan:
for (struct rte_storage *e=n->routes; e; e=e->next)
{
- if (e->rte.sender->flush_active || (e->rte.flags & REF_DISCARD))
+ if ((e->rte.sender->import_state == TIS_FLUSHING) || (e->rte.flags & REF_DISCARD))
{
if (limit <= 0)
{
@@ -1774,21 +1986,18 @@ again:
/* state change 2->0, 3->1 */
tab->prune_state &= 1;
- if (tab->prune_state > 0)
- ev_schedule(tab->rt_event);
-
- /* FIXME: This should be handled in a better way */
rt_prune_sources();
/* Close flushed channels */
- WALK_LIST2_DELSAFE(c, n, x, tab->channels, table_node)
- if (c->flush_active)
- {
- c->flush_active = 0;
- channel_set_state(c, CS_DOWN);
- }
-
- return;
+ WALK_LIST2_DELSAFE(ih, n, x, tab->imports, n)
+ if (ih->import_state == TIS_FLUSHING)
+ {
+ rt_set_import_state(ih, TIS_CLEARED);
+ ih->stopped(ih->req);
+ rem_node(&ih->n);
+ mb_free(ih);
+ rt_unlock_table(tab);
+ }
}
void
@@ -1996,7 +2205,7 @@ rt_next_hop_update_net(rtable *tab, net *n)
{
_Bool nb = (new == updates[i].new), ob = (old_best == updates[i].old);
const char *best_indicator[2][2] = { { "updated", "updated [-best]" }, { "updated [+best]", "updated [best]" } };
- rte_trace_in(D_ROUTES, new->rte.sender, &updates[i].new->rte, best_indicator[nb][ob]);
+ rt_rte_trace_in(D_ROUTES, updates[i].new->rte.sender->req, &updates[i].new->rte, best_indicator[nb][ob]);
rte_announce_i(tab, n, updates[i].new, updates[i].old, new, old_best);
}
@@ -2171,25 +2380,6 @@ rt_commit(struct config *new, struct config *old)
DBG("\tdone\n");
}
-static inline void
-do_feed_channel(struct channel *c, net *n, rte *e)
-{
- rte_update_lock();
- if ((c->ra_mode == RA_ACCEPTED) || (c->ra_mode == RA_MERGED))
- {
- uint count = rte_feed_count(n);
- rte **feed = alloca(count * sizeof(rte *));
- rte_feed_obtain(n, feed, count);
- rt_notify_bulk(c, n->n.addr, NULL, feed, count, c->refeeding);
- }
- else /* RA_BASIC */
- {
- rte e0 = *e;
- rt_notify_basic(c, n->n.addr, &e0, &e0, c->refeeding);
- }
- rte_update_unlock();
-}
-
/**
* rt_feed_channel - advertise all routes to a channel
* @c: channel to be fed
@@ -2199,79 +2389,55 @@ do_feed_channel(struct channel *c, net *n, rte *e)
* has something to do. (We avoid transferring all the routes in single pass in
* order not to monopolize CPU time.)
*/
-int
-rt_feed_channel(struct channel *c)
+static void
+rt_feed_channel(void *data)
{
+ struct rt_export_hook *c = data;
+
struct fib_iterator *fit = &c->feed_fit;
int max_feed = 256;
- ASSERT(c->export_state == ES_FEEDING);
-
- if (!c->feed_active)
- {
- FIB_ITERATE_INIT(fit, &c->table->fib);
- c->feed_active = 1;
- }
+ ASSERT(c->export_state == TES_FEEDING);
FIB_ITERATE_START(&c->table->fib, fit, net, n)
{
- struct rte_storage *e = n->routes;
if (max_feed <= 0)
{
FIB_ITERATE_PUT(fit);
- return 0;
+ ev_schedule_work(c->event);
+ return;
}
- if ((c->ra_mode == RA_OPTIMAL) ||
- (c->ra_mode == RA_ACCEPTED) ||
- (c->ra_mode == RA_MERGED))
- if (e && rte_is_valid(&e->rte))
- {
- /* In the meantime, the protocol may fell down */
- if (c->export_state != ES_FEEDING)
- goto done;
+ if (c->export_state != TES_FEEDING)
+ goto done;
- do_feed_channel(c, n, &e->rte);
- max_feed--;
- }
-
- if (c->ra_mode == RA_ANY)
- for(e = n->routes; e; e = e->next)
- {
- /* In the meantime, the protocol may fell down */
- if (c->export_state != ES_FEEDING)
- goto done;
-
- if (!rte_is_valid(&e->rte))
- continue;
-
- do_feed_channel(c, n, &e->rte);
- max_feed--;
- }
+ if (c->req->export_bulk)
+ {
+ uint count = rte_feed_count(n);
+ if (count)
+ {
+ rte_update_lock();
+ rte **feed = alloca(count * sizeof(rte *));
+ rte_feed_obtain(n, feed, count);
+ struct rt_pending_export rpe = { .new_best = n->routes };
+ c->req->export_bulk(c->req, n->n.addr, &rpe, feed, count);
+ max_feed -= count;
+ rte_update_unlock();
+ }
+ }
+ else if (n->routes && rte_is_valid(&n->routes->rte))
+ {
+ rte_update_lock();
+ struct rt_pending_export rpe = { .new = n->routes, .new_best = n->routes };
+ c->req->export_one(c->req, n->n.addr, &rpe);
+ max_feed--;
+ rte_update_unlock();
+ }
}
FIB_ITERATE_END;
done:
- c->feed_active = 0;
- return 1;
-}
-
-/**
- * rt_feed_baby_abort - abort protocol feeding
- * @c: channel
- *
- * This function is called by the protocol code when the protocol stops or
- * ceases to exist during the feeding.
- */
-void
-rt_feed_channel_abort(struct channel *c)
-{
- if (c->feed_active)
- {
- /* Unlink the iterator */
- fit_get(&c->table->fib, &c->feed_fit);
- c->feed_active = 0;
- }
+ rt_set_export_state(c, TES_READY);
}
@@ -2279,7 +2445,7 @@ rt_feed_channel_abort(struct channel *c)
* Import table
*/
-static int
+int
rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *src)
{
struct rtable *tab = c->in_table;
@@ -2332,7 +2498,7 @@ rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *sr
/* Required by rte_trace_in() */
new->net = n;
- rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
+ channel_rte_trace_in(D_FILTERS, c, new, "ignored [limit]");
goto drop_update;
}
}
@@ -2349,7 +2515,6 @@ rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *sr
/* Insert the new rte */
struct rte_storage *e = rte_store(new, net, tab);
- e->rte.sender = c;
e->rte.lastmod = current_time();
e->next = *pos;
*pos = e;
@@ -2358,7 +2523,7 @@ rte_update_in(struct channel *c, const net_addr *n, rte *new, struct rte_src *sr
drop_update:
c->import_stats.updates_received++;
- c->import_stats.updates_ignored++;
+ c->in_req.hook->stats.updates_ignored++;
if (!net->routes)
fib_delete(&tab->fib, net);
@@ -2367,7 +2532,7 @@ drop_update:
drop_withdraw:
c->import_stats.withdraws_received++;
- c->import_stats.withdraws_ignored++;
+ c->in_req.hook->stats.withdraws_ignored++;
return 0;
}
@@ -2397,7 +2562,7 @@ rt_reload_channel(struct channel *c)
}
rte r = e->rte;
- rte_update(c, r.net, &r, r.src);
+ rte_update_direct(c, r.net, &r, r.src);
}
c->reload_next_rte = NULL;
@@ -2470,7 +2635,7 @@ again:
*/
int
-rte_update_out(struct channel *c, const net_addr *n, rte *new, rte *old0, struct rte_storage **old_exported)
+rte_update_out(struct channel *c, const net_addr *n, rte *new, const rte *old0, struct rte_storage **old_exported)
{
struct rtable *tab = c->out_table;
struct rte_src *src;