diff options
author | Maria Matejka <mq@ucw.cz> | 2022-06-16 23:24:56 +0200 |
---|---|---|
committer | Maria Matejka <mq@ucw.cz> | 2022-06-20 11:56:38 +0200 |
commit | becab5072d6d84d6f9c9402387a9e1c14dcc384d (patch) | |
tree | d9d7a016655d271f7cc4d7b1cc04e7ce7cad69ec /nest/proto.c | |
parent | 8c92f47ac77f267368b6d6bd161689a0c0bc5e5a (diff) |
Import tables are stored as an attribute layer inside the main tables.
The separate import tables were too memory-greedy, there is no need for
them being stored as full-sized tables.
Diffstat (limited to 'nest/proto.c')
-rw-r--r-- | nest/proto.c | 77 |
1 files changed, 38 insertions, 39 deletions
diff --git a/nest/proto.c b/nest/proto.c index 77817888..5e67d940 100644 --- a/nest/proto.c +++ b/nest/proto.c @@ -224,7 +224,7 @@ proto_add_channel(struct proto *p, struct channel_config *cf) c->preference = cf->preference; c->debug = cf->debug; c->merge_limit = cf->merge_limit; - c->in_keep_filtered = cf->in_keep_filtered; + c->in_keep = cf->in_keep; c->rpki_reload = cf->rpki_reload; c->channel_state = CS_DOWN; @@ -294,7 +294,7 @@ static void channel_roa_in_changed(struct rt_subscription *s) { struct channel *c = s->data; - int active = c->reload_event && ev_active(c->reload_event); + int active = !!c->reload_req.hook; CD(c, "Reload triggered by RPKI change%s", active ? " - already active" : ""); @@ -379,7 +379,7 @@ channel_roa_subscribe_filter(struct channel *c, int dir) #ifdef CONFIG_BGP /* No automatic reload for BGP channels without in_table / out_table */ if (c->channel == &channel_bgp) - valid = dir ? !!c->in_table : !!c->out_table; + valid = dir ? ((c->in_keep & RIK_PREFILTER) == RIK_PREFILTER) : !!c->out_table; #endif struct filter_iterator fit; @@ -534,9 +534,6 @@ channel_import_stopped(struct rt_import_request *req) req->hook = NULL; - if (c->in_table) - rt_prune_sync(c->in_table, 1); - mb_free(c->in_req.name); c->in_req.name = NULL; @@ -603,43 +600,48 @@ channel_schedule_reload(struct channel *c) { ASSERT(c->in_req.hook); - rt_reload_channel_abort(c); - ev_schedule_work(c->reload_event); + rt_request_export(c->table, &c->reload_req); } static void -channel_reload_loop(void *ptr) +channel_reload_stopped(struct rt_export_request *req) { - struct channel *c = ptr; - - /* Start reload */ - if (!c->reload_active) - c->reload_pending = 0; - - if (!rt_reload_channel(c)) - { - ev_schedule_work(c->reload_event); - return; - } + struct channel *c = SKIP_BACK(struct channel, reload_req, req); /* Restart reload */ if (c->reload_pending) channel_request_reload(c); } +static void +channel_reload_log_state_change(struct rt_export_request *req, u8 state) +{ + if (state == TES_READY) + rt_stop_export(req, channel_reload_stopped); +} + +static void +channel_reload_dump_req(struct rt_export_request *req) +{ + struct channel *c = SKIP_BACK(struct channel, reload_req, req); + debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req); +} + +void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, rte **feed, uint count); + /* Called by protocol to activate in_table */ void channel_setup_in_table(struct channel *c) { - struct rtable_config *cf = mb_allocz(c->proto->pool, sizeof(struct rtable_config)); - - cf->name = "import"; - cf->addr_type = c->net_type; - cf->internal = 1; - - c->in_table = rt_setup(c->proto->pool, cf); + c->reload_req = (struct rt_export_request) { + .name = mb_sprintf(c->proto->pool, "%s.%s.import", c->proto->name, c->name), + .trace_routes = c->debug | c->proto->debug, + .export_bulk = channel_reload_export_bulk, + .dump_req = channel_reload_dump_req, + .log_state_change = channel_reload_log_state_change, + }; - c->reload_event = ev_new_init(c->proto->pool, channel_reload_loop, c); + c->in_keep |= RIK_PREFILTER; } /* Called by protocol to activate out_table */ @@ -680,10 +682,10 @@ static void channel_do_pause(struct channel *c) { /* Need to abort feeding */ - if (c->reload_event) + if (c->reload_req.hook) { - ev_postpone(c->reload_event); - rt_reload_channel_abort(c); + c->reload_pending = 0; + rt_stop_export(&c->reload_req, channel_reload_stopped); } /* Stop export */ @@ -710,15 +712,13 @@ channel_do_stop(struct channel *c) CALL(c->channel->shutdown, c); /* This have to be done in here, as channel pool is freed before channel_do_down() */ - c->in_table = NULL; - c->reload_event = NULL; c->out_table = NULL; } static void channel_do_down(struct channel *c) { - ASSERT(!c->reload_active); + ASSERT(!c->reload_req.hook); c->proto->active_channels--; @@ -726,8 +726,6 @@ channel_do_down(struct channel *c) memset(&c->import_stats, 0, sizeof(struct channel_import_stats)); memset(&c->export_stats, 0, sizeof(struct channel_export_stats)); - c->in_table = NULL; - c->reload_event = NULL; c->out_table = NULL; /* The in_table and out_table are going to be freed by freeing their resource pools. */ @@ -922,7 +920,9 @@ int channel_reconfigure(struct channel *c, struct channel_config *cf) { /* FIXME: better handle these changes, also handle in_keep_filtered */ - if ((c->table != cf->table->table) || (cf->ra_mode && (c->ra_mode != cf->ra_mode))) + if ((c->table != cf->table->table) || + (cf->ra_mode && (c->ra_mode != cf->ra_mode)) || + (cf->in_keep != c->in_keep)) return 0; /* Note that filter_same() requires arguments in (new, old) order */ @@ -949,7 +949,6 @@ channel_reconfigure(struct channel *c, struct channel_config *cf) c->preference = cf->preference; c->debug = cf->debug; c->in_req.trace_routes = c->out_req.trace_routes = c->debug | c->proto->debug; - c->in_keep_filtered = cf->in_keep_filtered; c->rpki_reload = cf->rpki_reload; /* Execute channel-specific reconfigure hook */ @@ -2099,7 +2098,7 @@ channel_show_stats(struct channel *c) u32 in_routes = c->in_limit.count; u32 out_routes = c->out_limit.count; - if (c->in_keep_filtered) + if (c->in_keep) cli_msg(-1006, " Routes: %u imported, %u filtered, %u exported, %u preferred", in_routes, (rx_routes - in_routes), out_routes, SRI(pref)); else |