summaryrefslogtreecommitdiff
path: root/nest
diff options
context:
space:
mode:
authorMaria Matejka <mq@ucw.cz>2022-05-31 12:51:34 +0200
committerMaria Matejka <mq@ucw.cz>2022-05-31 12:51:34 +0200
commitea109ce3e3474dd10d7592c44d2371b794f5c867 (patch)
treebfd5e54e1fa8f50efd5456a54dc1ebf508c1804c /nest
parent1493695c6ba2b169523f7c2097fac8e8343352fe (diff)
parent4fe9881d625f10e44109a649e369a413bd98de71 (diff)
Merge commit '4fe9881d625f10e44109a649e369a413bd98de71' into haugesund
Diffstat (limited to 'nest')
-rw-r--r--nest/rt-attr.c56
-rw-r--r--nest/rt-table.c130
-rw-r--r--nest/rt.h44
3 files changed, 148 insertions, 82 deletions
diff --git a/nest/rt-attr.c b/nest/rt-attr.c
index bd7ca425..cf3ab659 100644
--- a/nest/rt-attr.c
+++ b/nest/rt-attr.c
@@ -114,10 +114,48 @@ struct ea_class ea_gen_nexthop = {
.type = T_NEXTHOP_LIST,
};
-struct ea_class ea_mpls_labels = {
- .name = "mpls_labels",
- .type = T_CLIST,
+/*
+ * ea_set_hostentry() acquires hostentry from hostcache.
+ * New hostentry has zero use count. Cached rta locks its
+ * hostentry (increases its use count), uncached rta does not lock it.
+ * Hostentry with zero use count is removed asynchronously
+ * during host cache update, therefore it is safe to hold
+ * such hostentry temporarily as long as you hold the table lock.
+ *
+ * There is no need to hold a lock for hostentry->dep table, because that table
+ * contains routes responsible for that hostentry, and therefore is non-empty if
+ * given hostentry has non-zero use count. If the hostentry has zero use count,
+ * the entry is removed before dep is referenced.
+ *
+ * The protocol responsible for routes with recursive next hops should hold a
+ * lock for a 'source' table governing that routes (argument tab),
+ * because its routes reference hostentries related to the governing table.
+ * When all such routes are
+ * removed, rtas are immediately removed achieving zero uc. Then the 'source'
+ * table lock could be immediately released, although hostentries may still
+ * exist - they will be freed together with the 'source' table.
+ */
+
+ static void
+ea_gen_hostentry_stored(const eattr *ea)
+{
+ struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
+ had->he->uc++;
+}
+
+static void
+ea_gen_hostentry_freed(const eattr *ea)
+{
+ struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr;
+ had->he->uc--;
+}
+
+struct ea_class ea_gen_hostentry = {
+ .name = "hostentry",
+ .type = T_HOSTENTRY,
.readonly = 1,
+ .stored = ea_gen_hostentry_stored,
+ .freed = ea_gen_hostentry_freed,
};
const char * rta_dest_names[RTD_MAX] = {
@@ -876,6 +914,8 @@ ea_list_ref(ea_list *l)
struct ea_class *cl = ea_class_global[a->id];
ASSERT_DIE(cl && cl->uc);
+
+ CALL(cl->stored, a);
cl->uc++;
}
}
@@ -890,6 +930,8 @@ ea_list_unref(ea_list *l)
struct ea_class *cl = ea_class_global[a->id];
ASSERT_DIE(cl && cl->uc);
+
+ CALL(cl->freed, a);
if (!--cl->uc)
ea_class_free(cl);
}
@@ -1206,9 +1248,7 @@ rta_hash(rta *a)
{
u64 h;
mem_hash_init(&h);
-#define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f));
#define BMIX(f) mem_hash_mix_num(&h, a->f);
- MIX(hostentry);
BMIX(dest);
#undef MIX
@@ -1219,7 +1259,6 @@ static inline int
rta_same(rta *x, rta *y)
{
return (x->dest == y->dest &&
- x->hostentry == y->hostentry &&
ea_same(x->eattrs, y->eattrs));
}
@@ -1303,7 +1342,6 @@ rta_lookup(rta *o)
r = rta_copy(o);
r->hash_key = h;
r->cached = 1;
- rt_lock_hostentry(r->hostentry);
rta_insert(r);
if (++rta_cache_count > rta_cache_limit)
@@ -1320,7 +1358,6 @@ rta__free(rta *a)
*a->pprev = a->next;
if (a->next)
a->next->pprev = a->pprev;
- rt_unlock_hostentry(a->hostentry);
ea_free(a->eattrs);
a->cached = 0;
sl_free(a);
@@ -1411,8 +1448,7 @@ rta_init(void)
ea_register_init(&ea_gen_from);
ea_register_init(&ea_gen_source);
ea_register_init(&ea_gen_nexthop);
-
- ea_register_init(&ea_mpls_labels);
+ ea_register_init(&ea_gen_hostentry);
}
/*
diff --git a/nest/rt-table.c b/nest/rt-table.c
index d43305c9..8677c177 100644
--- a/nest/rt-table.c
+++ b/nest/rt-table.c
@@ -121,6 +121,7 @@ static void rt_free_hostcache(rtable *tab);
static void rt_notify_hostcache(rtable *tab, net *net);
static void rt_update_hostcache(rtable *tab);
static void rt_next_hop_update(rtable *tab);
+static inline void rt_next_hop_resolve_rte(rte *r);
static inline void rt_prune_table(rtable *tab);
static inline void rt_schedule_notify(rtable *tab);
static void rt_flowspec_notify(rtable *tab, net *net);
@@ -159,7 +160,8 @@ const char *rt_export_state_name(u8 state)
return rt_export_state_name_array[state];
}
-
+static inline struct rte_storage *rt_next_hop_update_rte(rtable *tab, net *n, rte *old);
+static struct hostentry *rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
static void
net_init_with_trie(struct fib *f, void *N)
@@ -1555,13 +1557,7 @@ rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src
int fr;
stats->updates_received++;
- if (!rte_validate(c, new))
- {
- channel_rte_trace_in(D_FILTERS, c, new, "invalid");
- stats->updates_invalid++;
- new = NULL;
- }
- else if ((filter == FILTER_REJECT) ||
+ if ((filter == FILTER_REJECT) ||
((fr = f_run(filter, new, 0)) > F_ACCEPT))
{
stats->updates_filtered++;
@@ -1572,6 +1568,17 @@ rte_update_direct(struct channel *c, const net_addr *n, rte *new, struct rte_src
else
new = NULL;
}
+
+ if (new)
+ rt_next_hop_resolve_rte(new);
+
+ if (new && !rte_validate(c, new))
+ {
+ channel_rte_trace_in(D_FILTERS, c, new, "invalid");
+ stats->updates_invalid++;
+ new = NULL;
+ }
+
}
else
stats->withdraws_received++;
@@ -2513,9 +2520,29 @@ rt_preconfig(struct config *c)
*/
void
-rta_apply_hostentry(rta *a, struct hostentry *he)
+ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum])
+{
+ struct {
+ struct adata ad;
+ struct hostentry *he;
+ u32 labels[lnum];
+ } *head = (void *) tmp_alloc_adata(sizeof *head - sizeof(struct adata));
+
+ head->he = rt_get_hostentry(tab, gw, ll, dep);
+ memcpy(head->labels, labels, lnum * sizeof(u32));
+
+ ea_set_attr(to, EA_LITERAL_DIRECT_ADATA(
+ &ea_gen_hostentry, 0, &head->ad));
+}
+
+
+static void
+rta_apply_hostentry(rta *a, struct hostentry_adata *head)
{
- a->hostentry = he;
+ struct hostentry *he = head->he;
+ u32 *labels = head->labels;
+ u32 lnum = (u32 *) (head->ad.data + head->ad.length) - labels;
+
a->dest = he->dest;
ea_set_attr_u32(&a->eattrs, &ea_gen_igp_metric, 0, he->igp_metric);
@@ -2527,17 +2554,12 @@ rta_apply_hostentry(rta *a, struct hostentry *he)
return;
}
- eattr *mls_ea = ea_find(a->eattrs, &ea_mpls_labels);
-
- if (!mls_ea && he->nexthop_linkable)
+ if (!lnum && he->nexthop_linkable)
{ /* Just link the nexthop chain, no label append happens. */
ea_copy_attr(&a->eattrs, he->src->eattrs, &ea_gen_nexthop);
return;
}
- const struct adata *mls = mls_ea ? mls_ea->u.ptr : NULL;
- uint mls_cnt = mls ? mls->length / sizeof(u32) : 0;
-
eattr *he_nh_ea = ea_find(he->src->eattrs, &ea_gen_nexthop);
struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr;
@@ -2545,14 +2567,14 @@ rta_apply_hostentry(rta *a, struct hostentry *he)
NEXTHOP_WALK(nh, nhad)
{
- if (nh->labels + mls_cnt > MPLS_MAX_LABEL_STACK)
+ if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
{
log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)",
- nh->labels, mls_cnt, nh->labels + mls_cnt, MPLS_MAX_LABEL_STACK);
+ nh->labels, lnum, nh->labels + lnum, MPLS_MAX_LABEL_STACK);
continue;
}
- total_size += NEXTHOP_SIZE_CNT(nh->labels + mls_cnt);
+ total_size += NEXTHOP_SIZE_CNT(nh->labels + lnum);
}
if (total_size == OFFSETOF(struct nexthop_adata, nh))
@@ -2569,14 +2591,14 @@ rta_apply_hostentry(rta *a, struct hostentry *he)
NEXTHOP_WALK(nh, nhad)
{
- if (nh->labels + mls_cnt > MPLS_MAX_LABEL_STACK)
+ if (nh->labels + lnum > MPLS_MAX_LABEL_STACK)
continue;
memcpy(dest, nh, NEXTHOP_SIZE(nh));
- if (mls_cnt)
+ if (lnum)
{
- memcpy(&(dest->label[dest->labels]), mls->data, mls->length);
- dest->labels += mls_cnt;
+ memcpy(&(dest->label[dest->labels]), labels, lnum * sizeof labels[0]);
+ dest->labels += lnum;
}
if (ipa_nonzero(nh->gw))
@@ -2598,45 +2620,65 @@ rta_apply_hostentry(rta *a, struct hostentry *he)
&ea_gen_nexthop, 0, &new->ad));
}
-static inline int
+static inline struct hostentry_adata *
rta_next_hop_outdated(rta *a)
{
- struct hostentry *he = a->hostentry;
+ eattr *heea = ea_find(a->eattrs, &ea_gen_hostentry);
+ if (!heea)
+ return NULL;
- if (!he)
- return 0;
+ struct hostentry_adata *head = (struct hostentry_adata *) heea->u.ptr;
- if (!he->src)
- return a->dest != RTD_UNREACHABLE;
+ if (!head->he->src)
+ return (a->dest != RTD_UNREACHABLE) ? head : NULL;
- eattr *he_nh_ea = ea_find(he->src->eattrs, &ea_gen_nexthop);
+ eattr *he_nh_ea = ea_find(head->he->src->eattrs, &ea_gen_nexthop);
eattr *a_nh_ea = ea_find(a->eattrs, &ea_gen_nexthop);
- return (a->dest != he->dest) ||
- (ea_get_int(a->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != he->igp_metric) ||
- (!he->nexthop_linkable) ||
- (!he_nh_ea != !a_nh_ea) ||
- (he_nh_ea && a_nh_ea && !adata_same(he_nh_ea->u.ptr, a_nh_ea->u.ptr));
+ return ((a->dest != head->he->dest) ||
+ (ea_get_int(a->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != head->he->igp_metric) ||
+ (!head->he->nexthop_linkable) ||
+ (!he_nh_ea != !a_nh_ea) ||
+ (he_nh_ea && a_nh_ea && !adata_same(he_nh_ea->u.ptr, a_nh_ea->u.ptr)))
+ ? head : NULL;
}
static inline struct rte_storage *
rt_next_hop_update_rte(rtable *tab, net *n, rte *old)
{
- if (!rta_next_hop_outdated(old->attrs))
+ struct hostentry_adata *head = rta_next_hop_outdated(old->attrs);
+ if (!head)
return NULL;
- rta *a = alloca(RTA_MAX_SIZE);
- memcpy(a, old->attrs, rta_size(old->attrs));
-
- rta_apply_hostentry(a, old->attrs->hostentry);
- a->cached = 0;
+ rta a = *old->attrs;
+ a.cached = 0;
+ rta_apply_hostentry(&a, head);
rte e0 = *old;
- e0.attrs = a;
+ e0.attrs = &a;
return rte_store(&e0, n, tab);
}
+static inline void
+rt_next_hop_resolve_rte(rte *r)
+{
+ eattr *heea = ea_find(r->attrs->eattrs, &ea_gen_hostentry);
+ if (!heea)
+ return;
+
+ struct hostentry_adata *head = (struct hostentry_adata *) heea->u.ptr;
+
+ if (r->attrs->cached)
+ {
+ rta *a = tmp_alloc(RTA_MAX_SIZE);
+ *a = *r->attrs;
+ a->cached = 0;
+ r->attrs = a;
+ }
+
+ rta_apply_hostentry(r->attrs, head);
+}
#ifdef CONFIG_BGP
@@ -3585,7 +3627,7 @@ rt_update_hostentry(rtable *tab, struct hostentry *he)
rta *a = e->rte.attrs;
pxlen = n->n.addr->pxlen;
- if (a->hostentry)
+ if (ea_find(a->eattrs, &ea_gen_hostentry))
{
/* Recursive route should not depend on another recursive route */
log(L_WARN "Next hop address %I resolvable through recursive route for %N",
@@ -3658,7 +3700,7 @@ rt_update_hostcache(rtable *tab)
tab->hcu_scheduled = 0;
}
-struct hostentry *
+static struct hostentry *
rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep)
{
struct hostentry *he;
diff --git a/nest/rt.h b/nest/rt.h
index fc8e2d3c..0ee615b8 100644
--- a/nest/rt.h
+++ b/nest/rt.h
@@ -438,39 +438,27 @@ struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t
#define RSEM_NOEXPORT 3 /* Routes rejected by export filter */
#define RSEM_EXPORTED 4 /* Routes marked in export map */
+/* Host entry: Resolve hook for recursive nexthops */
+extern struct ea_class ea_gen_hostentry;
+struct hostentry_adata {
+ adata ad;
+ struct hostentry *he;
+ u32 labels[0];
+};
+
+void
+ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]);
+
+/*
struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep);
-void rta_apply_hostentry(rta *a, struct hostentry *he);
+void rta_apply_hostentry(rta *a, struct hostentry *he, u32 lnum, u32 labels[lnum]);
static inline void
-rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll)
+rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum])
{
- rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep));
+ rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), lnum, labels);
}
-
-/*
- * rta_set_recursive_next_hop() acquires hostentry from hostcache and fills
- * rta->hostentry field. New hostentry has zero use count. Cached rta locks its
- * hostentry (increases its use count), uncached rta does not lock it. Hostentry
- * with zero use count is removed asynchronously during host cache update,
- * therefore it is safe to hold such hostentry temorarily. Hostentry holds a
- * lock for a 'source' rta, mainly to share multipath nexthops.
- *
- * There is no need to hold a lock for hostentry->dep table, because that table
- * contains routes responsible for that hostentry, and therefore is non-empty if
- * given hostentry has non-zero use count. If the hostentry has zero use count,
- * the entry is removed before dep is referenced.
- *
- * The protocol responsible for routes with recursive next hops should hold a
- * lock for a 'source' table governing that routes (argument tab to
- * rta_set_recursive_next_hop()), because its routes reference hostentries
- * (through rta) related to the governing table. When all such routes are
- * removed, rtas are immediately removed achieving zero uc. Then the 'source'
- * table lock could be immediately released, although hostentries may still
- * exist - they will be freed together with the 'source' table.
- */
-
-static inline void rt_lock_hostentry(struct hostentry *he) { if (he) he->uc++; }
-static inline void rt_unlock_hostentry(struct hostentry *he) { if (he) he->uc--; }
+*/
int rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, rta *a, int interior);