diff options
Diffstat (limited to 'nest')
-rw-r--r-- | nest/cli.c | 10 | ||||
-rw-r--r-- | nest/cli.h | 1 | ||||
-rw-r--r-- | nest/iface.c | 7 | ||||
-rw-r--r-- | nest/locks.h | 1 | ||||
-rw-r--r-- | nest/password.h | 2 | ||||
-rw-r--r-- | nest/proto.c | 74 | ||||
-rw-r--r-- | nest/protocol.h | 14 | ||||
-rw-r--r-- | nest/rt-attr.c | 20 | ||||
-rw-r--r-- | nest/rt-show.c | 3 | ||||
-rw-r--r-- | nest/rt-table.c | 75 | ||||
-rw-r--r-- | nest/rt.h | 3 |
11 files changed, 133 insertions, 77 deletions
@@ -262,7 +262,7 @@ cli_command(struct cli *c) log(L_TRACE "CLI: %s", c->rx_buf); bzero(&f, sizeof(f)); f.mem = c->parser_pool; - f.pool = rp_new(c->pool, "Config"); + f.pool = rp_new(c->pool, the_bird_domain.the_bird, "Config"); init_list(&f.symbols); cf_read_hook = cli_cmd_read_hook; cli_rh_pos = c->rx_buf; @@ -309,7 +309,7 @@ cli_event(void *data) cli * cli_new(struct birdsock *sock) { - pool *p = rp_new(cli_pool, "CLI"); + pool *p = rp_new(cli_pool, the_bird_domain.the_bird, "CLI"); cli *c = mb_alloc(p, sizeof(cli)); bzero(c, sizeof(cli)); @@ -417,11 +417,11 @@ cli_free(cli *c) if (defer) { - rfree(c->sock); + sk_close(c->sock); c->sock = NULL; } else - rfree(c->pool); + rp_free(c->pool); } /** @@ -433,7 +433,7 @@ cli_free(cli *c) void cli_init(void) { - cli_pool = rp_new(&root_pool, "CLI"); + cli_pool = rp_new(&root_pool, the_bird_domain.the_bird, "CLI"); init_list(&cli_log_hooks); cli_log_inited = 1; } @@ -10,6 +10,7 @@ #define _BIRD_CLI_H_ #include "lib/resource.h" +#include "lib/lists.h" #include "lib/event.h" #define CLI_RX_BUF_SIZE 4096 diff --git a/nest/iface.c b/nest/iface.c index f1938664..a024b943 100644 --- a/nest/iface.c +++ b/nest/iface.c @@ -1018,12 +1018,15 @@ if_choose_router_id(struct iface_patt *mask, u32 old_id) void if_init(void) { - if_pool = rp_new(&root_pool, "Interfaces"); + iface_domain = DOMAIN_NEW(attrs, "Interfaces"); + + IFACE_LOCK; + if_pool = rp_new(&root_pool, iface_domain.attrs, "Interfaces"); init_list(&global_iface_list); iface_sub_slab = sl_new(if_pool, sizeof(struct iface_notification)); strcpy(default_vrf.name, "default"); neigh_init(if_pool); - iface_domain = DOMAIN_NEW(attrs, "Interfaces"); + IFACE_UNLOCK; } /* diff --git a/nest/locks.h b/nest/locks.h index 04571e69..993e296b 100644 --- a/nest/locks.h +++ b/nest/locks.h @@ -10,6 +10,7 @@ #define _BIRD_LOCKS_H_ #include "lib/resource.h" +#include "lib/lists.h" #include "lib/event.h" /* diff --git a/nest/password.h b/nest/password.h index 53168bb7..335b9cd4 100644 --- a/nest/password.h +++ b/nest/password.h @@ -10,6 +10,8 @@ #ifndef PASSWORD_H #define PASSWORD_H +#include "lib/lists.h" + struct password_item { node n; const char *password; /* Key data, null terminated */ diff --git a/nest/proto.c b/nest/proto.c index cd6a3faa..32183c9d 100644 --- a/nest/proto.c +++ b/nest/proto.c @@ -23,7 +23,7 @@ #include "filter/f-inst.h" pool *proto_pool; -list STATIC_LIST_INIT(proto_list); +static TLIST_LIST(proto) global_proto_list; static list STATIC_LIST_INIT(protocol_list); @@ -367,6 +367,7 @@ channel_roa_subscribe(struct channel *c, rtable *tab, int dir) .name = mb_sprintf(c->proto->pool, "%s.%s.roa-%s.%s", c->proto->name, c->name, dir ? "in" : "out", tab->name), .list = proto_work_list(c->proto), + .pool = c->proto->pool, .trace_routes = c->debug | c->proto->debug, .dump_req = channel_dump_roa_req, .export_one = channel_export_one_roa, @@ -495,6 +496,7 @@ channel_start_export(struct channel *c) c->out_req = (struct rt_export_request) { .name = mb_sprintf(c->proto->pool, "%s.%s", c->proto->name, c->name), .list = proto_work_list(c->proto), + .pool = c->proto->pool, .addr = c->out_subprefix, .addr_mode = c->out_subprefix ? TE_ADDR_IN : TE_ADDR_NONE, .trace_routes = c->debug | c->proto->debug, @@ -685,6 +687,7 @@ channel_setup_in_table(struct channel *c) c->reload_req = (struct rt_export_request) { .name = mb_sprintf(c->proto->pool, "%s.%s.import", c->proto->name, c->name), .list = proto_work_list(c->proto), + .pool = c->proto->pool, .trace_routes = c->debug | c->proto->debug, .export_bulk = channel_reload_export_bulk, .dump_req = channel_reload_dump_req, @@ -1116,8 +1119,11 @@ proto_cleanup(struct proto *p) { CALL(p->proto->cleanup, p); - rfree(p->pool); - p->pool = NULL; + if (p->pool) + { + rp_free(p->pool); + p->pool = NULL; + } p->active = 0; proto_log_state_change(p); @@ -1129,13 +1135,14 @@ proto_loop_stopped(void *ptr) { struct proto *p = ptr; - birdloop_enter(&main_birdloop); + ASSERT_DIE(birdloop_inside(&main_birdloop)); + ASSERT_DIE(p->loop != &main_birdloop); + p->pool = NULL; /* is freed by birdloop_free() */ birdloop_free(p->loop); p->loop = &main_birdloop; - proto_cleanup(p); - birdloop_leave(&main_birdloop); + proto_cleanup(p); } static void @@ -1190,7 +1197,7 @@ proto_new(struct proto_config *cf) } static struct proto * -proto_init(struct proto_config *c, node *n) +proto_init(struct proto_config *c, struct proto *after) { struct protocol *pr = c->protocol; struct proto *p = pr->init(c); @@ -1199,7 +1206,7 @@ proto_init(struct proto_config *c, node *n) p->proto_state = PS_DOWN; p->last_state_change = current_time(); p->vrf = c->vrf; - insert_node(&p->n, n); + proto_add_after(&global_proto_list, p, after); p->event = ev_new_init(proto_pool, proto_event, p); @@ -1214,13 +1221,16 @@ proto_start(struct proto *p) DBG("Kicking %s up\n", p->name); PD(p, "Starting"); - p->pool = rp_newf(proto_pool, "Protocol %s", p->cf->name); - if (graceful_restart_state == GRS_INIT) p->gr_recovery = 1; if (p->cf->loop_order != DOMAIN_ORDER(the_bird)) - p->loop = birdloop_new(p->pool, p->cf->loop_order, p->pool->name, p->cf->loop_max_latency); + { + p->loop = birdloop_new(proto_pool, p->cf->loop_order, p->cf->loop_max_latency, "Protocol %s", p->cf->name); + p->pool = birdloop_pool(p->loop); + } + else + p->pool = rp_newf(proto_pool, the_bird_domain.the_bird, "Protocol %s", p->cf->name); p->iface_sub.target = proto_event_list(p); @@ -1430,8 +1440,6 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty struct proto_config *oc, *nc; struct symbol *sym; struct proto *p; - node *n; - DBG("protos_commit:\n"); if (old) @@ -1518,8 +1526,8 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty } struct proto *first_dev_proto = NULL; + struct proto *after = NULL; - n = NODE &(proto_list.head); WALK_LIST(nc, new->protos) if (!nc->proto) { @@ -1527,14 +1535,14 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty if (old) log(L_INFO "Adding protocol %s", nc->name); - p = proto_init(nc, n); - n = NODE p; + p = proto_init(nc, after); + after = p; if (p->proto == &proto_unix_iface) first_dev_proto = p; } else - n = NODE nc->proto; + after = nc->proto; DBG("Protocol start\n"); @@ -1552,7 +1560,7 @@ protos_commit(struct config *new, struct config *old, int force_reconfig, int ty } /* Start all new protocols */ - WALK_LIST_DELSAFE(p, n, proto_list) + WALK_TLIST_DELSAFE(proto, p, &global_proto_list) proto_rethink_goal(p); } @@ -1574,18 +1582,19 @@ proto_rethink_goal(struct proto *p) if (p->reconfiguring && !p->active) { struct proto_config *nc = p->cf_new; - node *n = p->n.prev; + struct proto *after = p->n.prev; + DBG("%s has shut down for reconfiguration\n", p->name); p->cf->proto = NULL; config_del_obstacle(p->cf->global); proto_remove_channels(p); - rem_node(&p->n); + proto_rem_node(&global_proto_list, p); rfree(p->event); mb_free(p->message); mb_free(p); if (!nc) return; - p = proto_init(nc, n); + p = proto_init(nc, after); } /* Determine what state we want to reach */ @@ -1601,7 +1610,7 @@ proto_rethink_goal(struct proto *p) struct proto * proto_spawn(struct proto_config *cf, uint disabled) { - struct proto *p = proto_init(cf, TAIL(proto_list)); + struct proto *p = proto_init(cf, global_proto_list.last); p->disabled = disabled; proto_rethink_goal(p); return p; @@ -1697,8 +1706,7 @@ graceful_restart_done(timer *t UNUSED) log(L_INFO "Graceful restart done"); graceful_restart_state = GRS_DONE; - struct proto *p; - WALK_LIST(p, proto_list) + WALK_TLIST(proto, p, &global_proto_list) { if (!p->gr_recovery) continue; @@ -1794,8 +1802,7 @@ protos_dump_all(void) { debug("Protocols:\n"); - struct proto *p; - WALK_LIST(p, proto_list) PROTO_LOCKED_FROM_MAIN(p) + WALK_TLIST(proto, p, &global_proto_list) PROTO_LOCKED_FROM_MAIN(p) { #define DPF(x) (p->x ? " " #x : "") debug(" protocol %s (%p) state %s with %d active channels flags: %s%s%s%s\n", @@ -1854,7 +1861,7 @@ protos_build(void) { protos_build_gen(); - proto_pool = rp_new(&root_pool, "Protocols"); + proto_pool = rp_new(&root_pool, the_bird_domain.the_bird, "Protocols"); } @@ -2481,10 +2488,9 @@ proto_apply_cmd_symbol(const struct symbol *s, void (* cmd)(struct proto *, uint static void proto_apply_cmd_patt(const char *patt, void (* cmd)(struct proto *, uintptr_t, int), uintptr_t arg) { - struct proto *p; int cnt = 0; - WALK_LIST(p, proto_list) + WALK_TLIST(proto, p, &global_proto_list) if (!patt || patmatch(patt, p->name)) PROTO_LOCKED_FROM_MAIN(p) cmd(p, arg, cnt++); @@ -2511,7 +2517,7 @@ proto_apply_cmd(struct proto_spec ps, void (* cmd)(struct proto *, uintptr_t, in struct proto * proto_get_named(struct symbol *sym, struct protocol *pr) { - struct proto *p, *q; + struct proto *p; if (sym) { @@ -2525,7 +2531,7 @@ proto_get_named(struct symbol *sym, struct protocol *pr) else { p = NULL; - WALK_LIST(q, proto_list) + WALK_TLIST(proto, q, &global_proto_list) if ((q->proto == pr) && (q->proto_state != PS_DOWN)) { if (p) @@ -2562,9 +2568,9 @@ proto_iterate_named(struct symbol *sym, struct protocol *proto, struct proto *ol } else { - for (struct proto *p = !old ? HEAD(proto_list) : NODE_NEXT(old); - NODE_VALID(p); - p = NODE_NEXT(p)) + for (struct proto *p = old ? old->n.next : global_proto_list.first; + p; + p = p->n.next) { if ((p->proto == proto) && (p->proto_state != PS_DOWN)) { diff --git a/nest/protocol.h b/nest/protocol.h index 01153162..02ec5c15 100644 --- a/nest/protocol.h +++ b/nest/protocol.h @@ -116,9 +116,16 @@ struct proto_config { /* Protocol-specific data follow... */ }; +#define TLIST_PREFIX proto +#define TLIST_TYPE struct proto +#define TLIST_ITEM n +#define TLIST_WANT_WALK +#define TLIST_WANT_ADD_TAIL +#define TLIST_WANT_ADD_AFTER + /* Protocol statistics */ struct proto { - node n; /* Node in global proto_list */ + TLIST_DEFAULT_NODE; /* Node in global proto_list */ struct protocol *proto; /* Protocol */ struct proto_config *cf; /* Configuration data */ struct proto_config *cf_new; /* Configuration we want to switch to after shutdown (NULL=delete) */ @@ -198,6 +205,8 @@ struct proto { /* Hic sunt protocol-specific data */ }; +#include "lib/tlists.h" + struct proto_spec { const void *ptr; int patt; @@ -271,6 +280,8 @@ struct proto *proto_iterate_named(struct symbol *sym, struct protocol *proto, st #define PROTO_LOCKED_FROM_MAIN(p) for (struct birdloop *_proto_loop = PROTO_ENTER_FROM_MAIN(p); _proto_loop; PROTO_LEAVE_FROM_MAIN(_proto_loop), (_proto_loop = NULL)) +static inline struct domain_generic *proto_domain(struct proto *p) +{ return birdloop_domain(p->loop); } #define CMD_RELOAD 0 #define CMD_RELOAD_IN 1 @@ -284,7 +295,6 @@ proto_get_router_id(struct proto_config *pc) extern pool *proto_pool; -extern list proto_list; /* * Each protocol instance runs two different state machines: diff --git a/nest/rt-attr.c b/nest/rt-attr.c index 903926f6..38612a4e 100644 --- a/nest/rt-attr.c +++ b/nest/rt-attr.c @@ -605,9 +605,16 @@ ea_register(pool *p, struct ea_class *def) struct ea_class_ref * ea_register_alloc(pool *p, struct ea_class cl) { + struct ea_class_ref *ref; + + RTA_LOCK; struct ea_class *clp = ea_class_find_by_name(cl.name); if (clp && clp->type == cl.type) - return ea_ref_class(p, clp); + { + ref = ea_ref_class(p, clp); + RTA_UNLOCK; + return ref; + } uint namelen = strlen(cl.name) + 1; @@ -619,14 +626,18 @@ ea_register_alloc(pool *p, struct ea_class cl) memcpy(cla->name, cl.name, namelen); cla->cl.name = cla->name; - return ea_register(p, &cla->cl); + ref = ea_register(p, &cla->cl); + RTA_UNLOCK; + return ref; } void ea_register_init(struct ea_class *clp) { + RTA_LOCK; ASSERT_DIE(!ea_class_find_by_name(clp->name)); ea_register(&root_pool, clp); + RTA_UNLOCK; } struct ea_class * @@ -1598,7 +1609,8 @@ rta_init(void) { attrs_domain = DOMAIN_NEW(attrs, "Attributes"); - rta_pool = rp_new(&root_pool, "Attributes"); + RTA_LOCK; + rta_pool = rp_new(&root_pool, attrs_domain.attrs, "Attributes"); for (uint i=0; i<ARRAY_SIZE(ea_slab_sizes); i++) ea_slab[i] = sl_new(rta_pool, ea_slab_sizes[i]); @@ -1607,6 +1619,8 @@ rta_init(void) rte_src_init(); ea_class_init(); + RTA_UNLOCK; + /* These attributes are required to be first for nice "show route" output */ ea_register_init(&ea_gen_nexthop); ea_register_init(&ea_gen_hostentry); diff --git a/nest/rt-show.c b/nest/rt-show.c index a5c7dc8f..7a8b629b 100644 --- a/nest/rt-show.c +++ b/nest/rt-show.c @@ -227,7 +227,7 @@ rt_show_export_stopped_cleanup(struct rt_export_request *req) req->hook = NULL; /* And free the CLI (deferred) */ - rfree(d->cli->pool); + rp_free(d->cli->pool); } static int @@ -288,6 +288,7 @@ rt_show_cont(struct rt_show_data *d) .addr = d->addr, .name = "CLI Show Route", .list = &global_work_list, + .pool = c->pool, .export_bulk = rt_show_net_export_bulk, .dump_req = rt_show_dump_req, .log_state_change = rt_show_log_state_change, diff --git a/nest/rt-table.c b/nest/rt-table.c index b18727b1..f1e3c8f7 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -2027,7 +2027,7 @@ rt_export_stopped(struct rt_export_hook *hook) rem_node(&hook->n); /* Free the hook itself together with its pool */ - rfree(hook->pool); + rp_free(hook->pool); } static inline void @@ -2131,7 +2131,7 @@ rt_table_export_start_locked(struct rtable_private *tab, struct rt_export_reques struct rt_exporter *re = &tab->exporter.e; rt_lock_table(tab); - req->hook = rt_alloc_export(re, sizeof(struct rt_table_export_hook)); + req->hook = rt_alloc_export(re, req->pool, sizeof(struct rt_table_export_hook)); req->hook->req = req; struct rt_table_export_hook *hook = SKIP_BACK(struct rt_table_export_hook, h, req->hook); @@ -2212,9 +2212,9 @@ rt_request_export_other(struct rt_exporter *re, struct rt_export_request *req) } struct rt_export_hook * -rt_alloc_export(struct rt_exporter *re, uint size) +rt_alloc_export(struct rt_exporter *re, pool *pp, uint size) { - pool *p = rp_new(re->rp, "Export hook"); + pool *p = rp_new(pp, pp->domain, "Export hook"); struct rt_export_hook *hook = mb_allocz(p, size); hook->pool = p; @@ -2709,13 +2709,14 @@ rt_flowspec_link(rtable *src_pub, rtable *dst_pub) if (!ln) { - pool *p = src->rp; + pool *p = birdloop_pool(dst_pub->loop); ln = mb_allocz(p, sizeof(struct rt_flowspec_link)); ln->src = src_pub; ln->dst = dst_pub; ln->req = (struct rt_export_request) { .name = mb_sprintf(p, "%s.flowspec.notifier", dst_pub->name), .list = birdloop_event_list(dst_pub->loop), + .pool = p, .trace_routes = src->config->debug, .dump_req = rt_flowspec_dump_req, .log_state_change = rt_flowspec_log_state_change, @@ -2781,8 +2782,6 @@ rt_free(resource *_r) { struct rtable_private *r = SKIP_BACK(struct rtable_private, r, _r); - DOMAIN_FREE(rtable, r->lock); - DBG("Deleting routing table %s\n", r->name); ASSERT_DIE(r->use_count == 0); @@ -2845,10 +2844,22 @@ rt_setup(pool *pp, struct rtable_config *cf) { ASSERT_DIE(birdloop_inside(&main_birdloop)); - pool *p = rp_newf(pp, "Routing table %s", cf->name); + /* Start the service thread */ + struct birdloop *loop = birdloop_new(pp, DOMAIN_ORDER(service), 0, "Routing table service %s", cf->name); + birdloop_enter(loop); + pool *sp = birdloop_pool(loop); + + /* Create the table domain and pool */ + DOMAIN(rtable) dom = DOMAIN_NEW(rtable, cf->name); + LOCK_DOMAIN(rtable, dom); + pool *p = rp_newf(sp, dom.rtable, "Routing table data %s", cf->name); + + /* Create the actual table */ struct rtable_private *t = ralloc(p, &rt_class); t->rp = p; + t->loop = loop; + t->lock = dom; t->rte_slab = sl_new(p, sizeof(struct rte_storage)); @@ -2859,8 +2870,6 @@ rt_setup(pool *pp, struct rtable_config *cf) if (t->id >= rtable_max_id) rtable_max_id = t->id + 1; - t->lock = DOMAIN_NEW(rtable, t->name); - fib_init(&t->fib, p, t->addr_type, sizeof(net), OFFSETOF(net, n), 0, NULL); if (cf->trie_used) @@ -2906,9 +2915,9 @@ rt_setup(pool *pp, struct rtable_config *cf) t->flowspec_trie->ipv4 = (t->addr_type == NET_FLOW4); } - /* Start the service thread */ - t->loop = birdloop_new(p, DOMAIN_ORDER(service), mb_sprintf(p, "Routing table %s", t->name), 0); - birdloop_enter(t->loop); + UNLOCK_DOMAIN(rtable, dom); + + /* Setup the service thread flag handler */ birdloop_flag_set_handler(t->loop, &t->fh); birdloop_leave(t->loop); @@ -2925,7 +2934,7 @@ void rt_init(void) { rta_init(); - rt_table_pool = rp_new(&root_pool, "Routing tables"); + rt_table_pool = rp_new(&root_pool, the_bird_domain.the_bird, "Routing tables"); init_list(&routing_tables); init_list(&deleted_routing_tables); ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release"); @@ -4063,21 +4072,23 @@ rt_shutdown(void *tab_) static void rt_delete(void *tab_) { - birdloop_enter(&main_birdloop); + ASSERT_DIE(birdloop_inside(&main_birdloop)); /* We assume that nobody holds the table reference now as use_count is zero. * Anyway the last holder may still hold the lock. Therefore we lock and * unlock it the last time to be sure that nobody is there. */ struct rtable_private *tab = RT_LOCK((rtable *) tab_); struct config *conf = tab->deleted; + DOMAIN(rtable) dom = tab->lock; RT_UNLOCK(RT_PUB(tab)); + /* Everything is freed by freeing the loop */ birdloop_free(tab->loop); - rfree(tab->rp); config_del_obstacle(conf); - birdloop_leave(&main_birdloop); + /* Also drop the domain */ + DOMAIN_FREE(rtable, dom); } @@ -4632,18 +4643,9 @@ rt_init_hostcache(struct rtable_private *tab) .data = tab, }; - hc->req = (struct rt_export_request) { - .name = mb_sprintf(tab->rp, "%s.hcu.notifier", tab->name), - .list = birdloop_event_list(tab->loop), - .trace_routes = tab->config->debug, - .dump_req = hc_notify_dump_req, - .log_state_change = hc_notify_log_state_change, - .export_one = hc_notify_export_one, - }; - - rt_table_export_start_locked(tab, &hc->req); - tab->hostcache = hc; + + ev_send_loop(tab->loop, &hc->update); } static void @@ -4771,9 +4773,24 @@ rt_update_hostcache(void *data) RT_LOCKED((rtable *) data, tab) { - struct hostcache *hc = tab->hostcache; + /* Finish initialization */ + if (!hc->req.name) + { + hc->req = (struct rt_export_request) { + .name = mb_sprintf(tab->rp, "%s.hcu.notifier", tab->name), + .list = birdloop_event_list(tab->loop), + .pool = tab->rp, + .trace_routes = tab->config->debug, + .dump_req = hc_notify_dump_req, + .log_state_change = hc_notify_log_state_change, + .export_one = hc_notify_export_one, + }; + + rt_table_export_start_locked(tab, &hc->req); + } + /* Shutdown shortcut */ if (!hc->req.hook) RT_RETURN(tab); @@ -294,6 +294,7 @@ struct rt_export_request { u8 addr_mode; /* Network prefilter mode (TE_ADDR_*) */ event_list *list; /* Where to schedule export events */ + pool *pool; /* Pool to use for allocations */ /* There are two methods of export. You can either request feeding every single change * or feeding the whole route feed. In case of regular export, &export_one is preferred. @@ -438,7 +439,7 @@ int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe); */ void rt_init_export(struct rt_exporter *re, struct rt_export_hook *hook); -struct rt_export_hook *rt_alloc_export(struct rt_exporter *re, uint size); +struct rt_export_hook *rt_alloc_export(struct rt_exporter *re, pool *pool, uint size); void rt_stop_export_common(struct rt_export_hook *hook); void rt_export_stopped(struct rt_export_hook *hook); void rt_exporter_init(struct rt_exporter *re); |