From 13225f1dbff54619476f2d8f6bc779dbb4983e3e Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Sun, 5 Apr 2020 03:24:46 +0200 Subject: Filter: Faster prefix sets Use 16-way (4bit) branching in prefix trie instead of basic binary branching. The change makes IPv4 prefix sets almost 3x faster, but with more memory consumption and much more complicated algorithm. Together with a previous filter change, it makes IPv4 prefix sets about ~4.3x faster and slightly smaller (on my test data). --- lib/ip.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'lib/ip.h') diff --git a/lib/ip.h b/lib/ip.h index 5b179acb..cc36ce64 100644 --- a/lib/ip.h +++ b/lib/ip.h @@ -280,10 +280,16 @@ static inline uint ip6_pxlen(ip6_addr a, ip6_addr b) } static inline u32 ip4_getbit(ip4_addr a, uint pos) -{ return _I(a) & (0x80000000 >> pos); } +{ return (_I(a) >> (31 - pos)) & 1; } + +static inline u32 ip4_getbits(ip4_addr a, uint pos, uint n) +{ return (_I(a) >> ((32 - n) - pos)) & ((1u << n) - 1); } static inline u32 ip6_getbit(ip6_addr a, uint pos) -{ return a.addr[pos / 32] & (0x80000000 >> (pos % 32)); } +{ return (a.addr[pos / 32] >> (31 - (pos % 32))) & 0x1; } + +static inline u32 ip6_getbits(ip6_addr a, uint pos, uint n) +{ return (a.addr[pos / 32] >> ((32 - n) - (pos % 32))) & ((1u << n) - 1); } static inline u32 ip4_setbit(ip4_addr *a, uint pos) { return _I(*a) |= (0x80000000 >> pos); } @@ -297,6 +303,13 @@ static inline u32 ip4_clrbit(ip4_addr *a, uint pos) static inline u32 ip6_clrbit(ip6_addr *a, uint pos) { return a->addr[pos / 32] &= ~(0x80000000 >> (pos % 32)); } +static inline ip4_addr ip4_setbits(ip4_addr a, uint pos, uint val) +{ _I(a) |= val << (31 - pos); return a; } + +static inline ip6_addr ip6_setbits(ip6_addr a, uint pos, uint val) +{ a.addr[pos / 32] |= val << (31 - pos % 32); return a; } + + static inline ip4_addr ip4_opposite_m1(ip4_addr a) { return _MI4(_I(a) ^ 1); } -- cgit v1.2.3 From 71c18d9f53ec0ea5eb512fdb6510d0c3350f96b4 Mon Sep 17 00:00:00 2001 From: "Ondrej Zajicek (work)" Date: Sat, 13 Nov 2021 21:11:18 +0100 Subject: Trie: Simplify network matching code Introduce ipX_prefix_equal() and use it to simplify network matching code. --- filter/trie.c | 22 ++++++-------------- lib/ip.h | 18 ++++++++++++++++ lib/ip_test.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 16 deletions(-) (limited to 'lib/ip.h') diff --git a/filter/trie.c b/filter/trie.c index dbed5ace..5d9cc952 100644 --- a/filter/trie.c +++ b/filter/trie.c @@ -424,9 +424,6 @@ trie_add_prefix(struct f_trie *t, const net_addr *net, uint l, uint h) static int trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen) { - ip4_addr pmask = ip4_mkmask(plen); - ip4_addr paddr = ip4_and(px, pmask); - if (plen == 0) return t->zero; @@ -437,10 +434,8 @@ trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen) while (n) { - ip4_addr cmask = ip4_and(n->mask, pmask); - /* We are out of path */ - if (ip4_compare(ip4_and(paddr, cmask), ip4_and(n->addr, cmask))) + if (!ip4_prefix_equal(px, n->addr, MIN(plen, n->plen))) return 0; /* Check local mask */ @@ -452,11 +447,11 @@ trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen) return 1; /* We finished trie walk and still no match */ - if (plen <= n->plen) + if (nlen <= n->plen) return 0; /* Choose children */ - n = n->c[ip4_getbits(paddr, n->plen, TRIE_STEP)]; + n = n->c[ip4_getbits(px, n->plen, TRIE_STEP)]; } return 0; @@ -465,9 +460,6 @@ trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen) static int trie_match_net6(const struct f_trie *t, ip6_addr px, uint plen) { - ip6_addr pmask = ip6_mkmask(plen); - ip6_addr paddr = ip6_and(px, pmask); - if (plen == 0) return t->zero; @@ -478,10 +470,8 @@ trie_match_net6(const struct f_trie *t, ip6_addr px, uint plen) while (n) { - ip6_addr cmask = ip6_and(n->mask, pmask); - /* We are out of path */ - if (ip6_compare(ip6_and(paddr, cmask), ip6_and(n->addr, cmask))) + if (!ip6_prefix_equal(px, n->addr, MIN(plen, n->plen))) return 0; /* Check local mask */ @@ -493,11 +483,11 @@ trie_match_net6(const struct f_trie *t, ip6_addr px, uint plen) return 1; /* We finished trie walk and still no match */ - if (plen <= n->plen) + if (nlen <= n->plen) return 0; /* Choose children */ - n = n->c[ip6_getbits(paddr, n->plen, TRIE_STEP)]; + n = n->c[ip6_getbits(px, n->plen, TRIE_STEP)]; } return 0; diff --git a/lib/ip.h b/lib/ip.h index cc36ce64..9eef2e16 100644 --- a/lib/ip.h +++ b/lib/ip.h @@ -279,6 +279,24 @@ static inline uint ip6_pxlen(ip6_addr a, ip6_addr b) return 32 * i + 31 - u32_log2(a.addr[i] ^ b.addr[i]); } +static inline int ip4_prefix_equal(ip4_addr a, ip4_addr b, uint n) +{ + return (_I(a) ^ _I(b)) < ((u64) 1 << (32 - n)); +} + +static inline int ip6_prefix_equal(ip6_addr a, ip6_addr b, uint n) +{ + uint n0 = n / 32; + uint n1 = n % 32; + + return + ((n0 <= 0) || (_I0(a) == _I0(b))) && + ((n0 <= 1) || (_I1(a) == _I1(b))) && + ((n0 <= 2) || (_I2(a) == _I2(b))) && + ((n0 <= 3) || (_I3(a) == _I3(b))) && + (!n1 || ((a.addr[n0] ^ b.addr[n0]) < (1u << (32 - n1)))); +} + static inline u32 ip4_getbit(ip4_addr a, uint pos) { return (_I(a) >> (31 - pos)) & 1; } diff --git a/lib/ip_test.c b/lib/ip_test.c index 36d10d68..eee0a427 100644 --- a/lib/ip_test.c +++ b/lib/ip_test.c @@ -167,6 +167,70 @@ t_ip6_ntop(void) return bt_assert_batch(test_vectors, test_ipa_ntop, bt_fmt_ipa, bt_fmt_str); } +static int +t_ip4_prefix_equal(void) +{ + bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x1234ffff), 16)); + bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x1234ffff), 17)); + bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345000), 21)); + bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345000), 22)); + + bt_assert( ip4_prefix_equal(ip4_from_u32(0x00000000), ip4_from_u32(0xffffffff), 0)); + bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345678), 0)); + + bt_assert( ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345678), 32)); + bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x12345679), 32)); + bt_assert(!ip4_prefix_equal(ip4_from_u32(0x12345678), ip4_from_u32(0x92345678), 32)); + + return 1; +} + +static int +t_ip6_prefix_equal(void) +{ + bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x1234ffff, 0xfefefefe, 0xdcdcdcdc), + 48)); + + bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x1234ffff, 0xfefefefe, 0xdcdcdcdc), + 49)); + + bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20020db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc), + 48)); + + bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc), + 64)); + + bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x1234567e, 0xfefefefe, 0xdcdcdcdc), + 64)); + + bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20002020), + ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + 106)); + + bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20002020), + ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + 107)); + + bt_assert( ip6_prefix_equal(ip6_build(0xfeef0db8, 0x87654321, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x12345678, 0xfefefefe, 0xdcdcdcdc), + 0)); + + bt_assert( ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + 128)); + + bt_assert(!ip6_prefix_equal(ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202020), + ip6_build(0x20010db8, 0x12345678, 0x10101010, 0x20202021), + 128)); + + return 1; +} + int main(int argc, char *argv[]) { @@ -176,6 +240,8 @@ main(int argc, char *argv[]) bt_test_suite(t_ip6_pton, "Converting IPv6 string to ip6_addr struct"); bt_test_suite(t_ip4_ntop, "Converting ip4_addr struct to IPv4 string"); bt_test_suite(t_ip6_ntop, "Converting ip6_addr struct to IPv6 string"); + bt_test_suite(t_ip4_prefix_equal, "Testing ip4_prefix_equal()"); + bt_test_suite(t_ip6_prefix_equal, "Testing ip6_prefix_equal()"); return bt_exit_value(); } -- cgit v1.2.3 From f2e725a76882ba6b75c3ce4fb3c760bd83462410 Mon Sep 17 00:00:00 2001 From: Maria Matejka Date: Thu, 5 May 2022 19:28:56 +0200 Subject: All outstanding MPLS label stacks are stored as adata --- conf/confbase.Y | 16 +++++++++------- lib/ip.h | 4 ---- lib/route.h | 5 ++++- nest/rt-attr.c | 11 +++++++++-- nest/rt-table.c | 27 +++++++++++---------------- nest/rt.h | 6 +++--- proto/bgp/packets.c | 43 +++++++++++++++++++++++-------------------- proto/static/static.c | 28 +++++++++------------------- proto/static/static.h | 2 +- 9 files changed, 69 insertions(+), 73 deletions(-) (limited to 'lib/ip.h') diff --git a/conf/confbase.Y b/conf/confbase.Y index 8a27c3d5..9a83083c 100644 --- a/conf/confbase.Y +++ b/conf/confbase.Y @@ -92,7 +92,7 @@ CF_DECLS struct proto_spec ps; struct channel_limit cl; struct timeformat *tf; - mpls_label_stack *mls; + struct adata *ad; struct bytestring *bs; } @@ -113,7 +113,7 @@ CF_DECLS %type ipa %type net_ip4_ net_ip6_ net_ip6 net_ip_ net_ip net_or_ipa %type net_ net_any net_vpn4_ net_vpn6_ net_vpn_ net_roa4_ net_roa6_ net_roa_ net_ip6_sadr_ net_mpls_ -%type label_stack_start label_stack +%type label_stack_start label_stack %type text opttext %type symbol symbol_known toksym @@ -351,17 +351,19 @@ net_or_ipa: label_stack_start: NUM { - $$ = cfg_allocz(sizeof(mpls_label_stack)); - $$->len = 1; - $$->stack[0] = $1; + $$ = cfg_allocz(ADATA_SIZE(MPLS_MAX_LABEL_STACK * sizeof(u32))); + $$->length = sizeof(u32); + *((u32 *)$$->data) = $1; }; label_stack: label_stack_start | label_stack '/' NUM { - if ($1->len >= MPLS_MAX_LABEL_STACK) + if ($1->length >= MPLS_MAX_LABEL_STACK * sizeof(u32)) cf_error("Too many labels in stack"); - $1->stack[$1->len++] = $3; + + *((u32 *)($$->data + $1->length)) = $3; + $1->length += sizeof(u32); $$ = $1; } ; diff --git a/lib/ip.h b/lib/ip.h index 9eef2e16..8f975aba 100644 --- a/lib/ip.h +++ b/lib/ip.h @@ -363,10 +363,6 @@ static inline ip6_addr ip6_ntoh(ip6_addr a) { return _MI6(ntohl(_I0(a)), ntohl(_I1(a)), ntohl(_I2(a)), ntohl(_I3(a))); } #define MPLS_MAX_LABEL_STACK 8 -typedef struct mpls_label_stack { - uint len; - u32 stack[MPLS_MAX_LABEL_STACK]; -} mpls_label_stack; static inline int mpls_get(const char *buf, int buflen, u32 *stack) diff --git a/lib/route.h b/lib/route.h index 8fdb5d8b..5423ada3 100644 --- a/lib/route.h +++ b/lib/route.h @@ -70,7 +70,6 @@ struct nexthop { struct nexthop *next; byte flags; byte weight; - byte labels_orig; /* Number of labels before hostentry was applied */ byte labels; /* Number of all labels */ u32 label[0]; }; @@ -314,6 +313,10 @@ extern struct ea_class ea_gen_source; static inline u32 rt_get_source_attr(rte *rt) { return ea_get_int(rt->attrs->eattrs, &ea_gen_source, 0); } +/* MPLS labels: Use with a recursive nexthop specification + * to add additional labels to the resolved nexthop */ +extern struct ea_class ea_mpls_labels; + /* Next hop structures */ #define NEXTHOP_MAX_SIZE (sizeof(struct nexthop) + sizeof(u32)*MPLS_MAX_LABEL_STACK) diff --git a/nest/rt-attr.c b/nest/rt-attr.c index 39fd7db4..dc4fe785 100644 --- a/nest/rt-attr.c +++ b/nest/rt-attr.c @@ -108,6 +108,12 @@ struct ea_class ea_gen_source = { .format = ea_gen_source_format, }; +struct ea_class ea_mpls_labels = { + .name = "mpls_labels", + .type = T_CLIST, + .readonly = 1, +}; + const char * rta_dest_names[RTD_MAX] = { [RTD_NONE] = "", [RTD_UNICAST] = "unicast", @@ -220,7 +226,7 @@ nexthop__same(struct nexthop *x, struct nexthop *y) { if (!ipa_equal(x->gw, y->gw) || (x->iface != y->iface) || (x->flags != y->flags) || (x->weight != y->weight) || - (x->labels_orig != y->labels_orig) || (x->labels != y->labels)) + (x->labels != y->labels)) return 0; for (int i = 0; i < x->labels; i++) @@ -402,7 +408,6 @@ nexthop_copy(struct nexthop *o) n->next = NULL; n->flags = o->flags; n->weight = o->weight; - n->labels_orig = o->labels_orig; n->labels = o->labels; for (int i=0; ilabels; i++) n->label[i] = o->label[i]; @@ -1484,6 +1489,8 @@ rta_init(void) ea_register_init(&ea_gen_igp_metric); ea_register_init(&ea_gen_from); ea_register_init(&ea_gen_source); + + ea_register_init(&ea_mpls_labels); } /* diff --git a/nest/rt-table.c b/nest/rt-table.c index 4f119ac0..37f17bbc 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -2385,7 +2385,7 @@ rt_preconfig(struct config *c) */ void -rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls) +rta_apply_hostentry(rta *a, struct hostentry *he) { a->hostentry = he; a->dest = he->dest; @@ -2397,15 +2397,12 @@ rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls) /* No nexthop */ no_nexthop: a->nh = (struct nexthop) {}; - if (mls) - { /* Store the label stack for later changes */ - a->nh.labels_orig = a->nh.labels = mls->len; - memcpy(a->nh.label, mls->stack, mls->len * sizeof(u32)); - } return; } - if (((!mls) || (!mls->len)) && he->nexthop_linkable) + eattr *mls_ea = ea_find(a->eattrs, &ea_mpls_labels); + + if (!mls_ea && he->nexthop_linkable) { /* Just link the nexthop chain, no label append happens. */ memcpy(&(a->nh), &(he->src->nh), nexthop_size(&(he->src->nh))); return; @@ -2414,6 +2411,9 @@ no_nexthop: struct nexthop *nhp = NULL, *nhr = NULL; int skip_nexthop = 0; + const struct adata *mls = mls_ea ? mls_ea->u.ptr : NULL; + uint mls_cnt = mls ? mls->length / sizeof(u32) : 0; + for (struct nexthop *nh = &(he->src->nh); nh; nh = nh->next) { if (skip_nexthop) @@ -2430,17 +2430,16 @@ no_nexthop: if (mls) { - nhp->labels = nh->labels + mls->len; - nhp->labels_orig = mls->len; + nhp->labels = nh->labels + mls_cnt; if (nhp->labels <= MPLS_MAX_LABEL_STACK) { memcpy(nhp->label, nh->label, nh->labels * sizeof(u32)); /* First the hostentry labels */ - memcpy(&(nhp->label[nh->labels]), mls->stack, mls->len * sizeof(u32)); /* Then the bottom labels */ + memcpy(&(nhp->label[nh->labels]), mls->data, mls->length); /* Then the bottom labels */ } else { log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)", - nh->labels, mls->len, nhp->labels, MPLS_MAX_LABEL_STACK); + nh->labels, mls_cnt, nhp->labels, MPLS_MAX_LABEL_STACK); skip_nexthop++; continue; } @@ -2448,7 +2447,6 @@ no_nexthop: else if (nh->labels) { nhp->labels = nh->labels; - nhp->labels_orig = 0; memcpy(nhp->label, nh->label, nh->labels * sizeof(u32)); } @@ -2501,10 +2499,7 @@ rt_next_hop_update_rte(rtable *tab UNUSED, rte *old) rta *a = alloca(RTA_MAX_SIZE); memcpy(a, old->attrs, rta_size(old->attrs)); - mpls_label_stack mls = { .len = a->nh.labels_orig }; - memcpy(mls.stack, &a->nh.label[a->nh.labels - mls.len], mls.len * sizeof(u32)); - - rta_apply_hostentry(a, old->attrs->hostentry, &mls); + rta_apply_hostentry(a, old->attrs->hostentry); a->cached = 0; rte *e = sl_alloc(rte_slab); diff --git a/nest/rt.h b/nest/rt.h index 6bd9cd38..50be7e0b 100644 --- a/nest/rt.h +++ b/nest/rt.h @@ -281,12 +281,12 @@ struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t #define RSEM_EXPORTED 4 /* Routes marked in export map */ struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep); -void rta_apply_hostentry(rta *a, struct hostentry *he, mpls_label_stack *mls); +void rta_apply_hostentry(rta *a, struct hostentry *he); static inline void -rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, mpls_label_stack *mls) +rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll) { - rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), mls); + rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep)); } /* diff --git a/proto/bgp/packets.c b/proto/bgp/packets.c index 0aa4dc40..f8228313 100644 --- a/proto/bgp/packets.c +++ b/proto/bgp/packets.c @@ -980,15 +980,18 @@ bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll) s->hostentry = rt_get_hostentry(tab, gw, ll, c->c.table); if (!s->mpls) - rta_apply_hostentry(a, s->hostentry, NULL); + rta_apply_hostentry(a, s->hostentry); /* With MPLS, hostentry is applied later in bgp_apply_mpls_labels() */ } } static void -bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 *labels, uint lnum) +bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a) { + u32 *labels = (u32 *) s->mpls_labels->data; + u32 lnum = s->mpls_labels->length / sizeof(u32); + if (lnum > MPLS_MAX_LABEL_STACK) { REPORT("Too many MPLS labels ($u)", lnum); @@ -1001,7 +1004,7 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 *labels, uint lnum) /* Handle implicit NULL as empty MPLS stack */ if ((lnum == 1) && (labels[0] == BGP_MPLS_NULL)) - lnum = 0; + lnum = s->mpls_labels->length = 0; if (s->channel->cf->gw_mode == GW_DIRECT) { @@ -1009,13 +1012,7 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 *labels, uint lnum) memcpy(a->nh.label, labels, 4*lnum); } else /* GW_RECURSIVE */ - { - mpls_label_stack ms; - - ms.len = lnum; - memcpy(ms.stack, labels, 4*lnum); - rta_apply_hostentry(a, s->hostentry, &ms); - } + rta_apply_hostentry(a, s->hostentry); } static void @@ -1418,7 +1415,13 @@ bgp_encode_mpls_labels(struct bgp_write_state *s UNUSED, const adata *mpls, byte static void bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *pxlen, rta *a) { - u32 labels[BGP_MPLS_MAX], label; + struct { + struct adata ad; + u32 labels[BGP_MPLS_MAX]; + } labels_adata; + + u32 *labels = labels_adata.labels; + u32 label; uint lnum = 0; do { @@ -1440,19 +1443,19 @@ bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *p if (!a) return; + labels_adata.ad.length = lnum * sizeof(u32); + /* Attach MPLS attribute unless we already have one */ if (!s->mpls_labels) - { - s->mpls_labels = lp_alloc_adata(s->pool, 4*BGP_MPLS_MAX); - bgp_set_attr_ptr(&(a->eattrs), BA_MPLS_LABEL_STACK, 0, s->mpls_labels); - } - - /* Overwrite data in the attribute */ - s->mpls_labels->length = 4*lnum; - memcpy(s->mpls_labels->data, labels, 4*lnum); + ea_set_attr(&(a->eattrs), + EA_LITERAL_DIRECT_ADATA(&ea_mpls_labels, 0, + (s->mpls_labels = tmp_store_adata(labels, BGP_MPLS_MAX * sizeof(u32))))); + else + /* Overwrite data in the attribute */ + memcpy(s->mpls_labels, &labels_adata, sizeof labels_adata); /* Update next hop entry in rta */ - bgp_apply_mpls_labels(s, a, labels, lnum); + bgp_apply_mpls_labels(s, a); /* Attributes were changed, invalidate cached entry */ rta_free(s->cached_rta); diff --git a/proto/static/static.c b/proto/static/static.c index 1400e985..e792a148 100644 --- a/proto/static/static.c +++ b/proto/static/static.c @@ -76,8 +76,8 @@ static_announce_rte(struct static_proto *p, struct static_route *r) nh->weight = r2->weight; if (r2->mls) { - nh->labels = r2->mls->len; - memcpy(nh->label, r2->mls->stack, r2->mls->len * sizeof(u32)); + nh->labels = r2->mls->length / sizeof(u32); + memcpy(nh->label, r2->mls->data, r2->mls->length); } nexthop_insert(&nhs, nh); @@ -92,7 +92,11 @@ static_announce_rte(struct static_proto *p, struct static_route *r) if (r->dest == RTDX_RECURSIVE) { rtable *tab = ipa_is_ip4(r->via) ? p->igp_table_ip4 : p->igp_table_ip6; - rta_set_recursive_next_hop(p->p.main_channel->table, a, tab, r->via, IPA_NONE, r->mls); + if (r->mls) + ea_set_attr(&a->eattrs, + EA_LITERAL_DIRECT_ADATA(&ea_mpls_labels, 0, r->mls)); + + rta_set_recursive_next_hop(p->p.main_channel->table, a, tab, r->via, IPA_NONE); } /* Already announced */ @@ -314,31 +318,17 @@ static_same_dest(struct static_route *x, struct static_route *y) (x->weight != y->weight) || (x->use_bfd != y->use_bfd) || (!x->mls != !y->mls) || - ((x->mls) && (y->mls) && (x->mls->len != y->mls->len))) + ((x->mls) && (y->mls) && adata_same(x->mls, y->mls))) return 0; - - if (!x->mls) - continue; - - for (uint i = 0; i < x->mls->len; i++) - if (x->mls->stack[i] != y->mls->stack[i]) - return 0; } return !x && !y; case RTDX_RECURSIVE: if (!ipa_equal(x->via, y->via) || (!x->mls != !y->mls) || - ((x->mls) && (y->mls) && (x->mls->len != y->mls->len))) + ((x->mls) && (y->mls) && adata_same(x->mls, y->mls))) return 0; - if (!x->mls) - return 1; - - for (uint i = 0; i < x->mls->len; i++) - if (x->mls->stack[i] != y->mls->stack[i]) - return 0; - return 1; default: diff --git a/proto/static/static.h b/proto/static/static.h index d99f7ebd..ea7ca33b 100644 --- a/proto/static/static.h +++ b/proto/static/static.h @@ -49,7 +49,7 @@ struct static_route { byte weight; /* Multipath next hop weight */ byte use_bfd; /* Configured to use BFD */ struct bfd_request *bfd_req; /* BFD request, if BFD is used */ - mpls_label_stack *mls; /* MPLS label stack; may be NULL */ + struct adata *mls; /* MPLS label stack; may be NULL */ }; /* -- cgit v1.2.3 From 4fe9881d625f10e44109a649e369a413bd98de71 Mon Sep 17 00:00:00 2001 From: Maria Matejka Date: Sun, 15 May 2022 15:53:35 +0200 Subject: Moved hostentry to eattr --- lib/ip.h | 2 +- lib/route.h | 7 +-- lib/type.h | 1 + nest/rt-attr.c | 56 +++++++++++++++++++----- nest/rt-table.c | 115 +++++++++++++++++++++++++++++++------------------- nest/rt.h | 44 +++++++------------ proto/bgp/bgp.h | 1 - proto/bgp/packets.c | 47 ++++++++------------- proto/pipe/pipe.c | 3 +- proto/static/static.c | 8 ++-- 10 files changed, 160 insertions(+), 124 deletions(-) (limited to 'lib/ip.h') diff --git a/lib/ip.h b/lib/ip.h index 8f975aba..20e7a336 100644 --- a/lib/ip.h +++ b/lib/ip.h @@ -362,7 +362,7 @@ static inline ip6_addr ip6_hton(ip6_addr a) static inline ip6_addr ip6_ntoh(ip6_addr a) { return _MI6(ntohl(_I0(a)), ntohl(_I1(a)), ntohl(_I2(a)), ntohl(_I3(a))); } -#define MPLS_MAX_LABEL_STACK 8 +#define MPLS_MAX_LABEL_STACK 16 static inline int mpls_get(const char *buf, int buflen, u32 *stack) diff --git a/lib/route.h b/lib/route.h index 29a78e90..3ce8021d 100644 --- a/lib/route.h +++ b/lib/route.h @@ -87,7 +87,6 @@ typedef struct rta { u32 uc; /* Use count */ u32 hash_key; /* Hash over important fields */ struct ea_list *eattrs; /* Extended Attribute chain */ - struct hostentry *hostentry; /* Hostentry for recursive next-hops */ u16 cached:1; /* Are attributes cached? */ u16 dest:4; /* Route destination type (RTD_...) */ } rta; @@ -172,6 +171,8 @@ struct ea_class { uint readonly:1; /* This attribute can't be changed by filters */ \ uint conf:1; /* Requested by config */ \ void (*format)(const eattr *ea, byte *buf, uint size); \ + void (*stored)(const eattr *ea); /* When stored into global hash */ \ + void (*freed)(const eattr *ea); /* When released from global hash */ \ EA_CLASS_INSIDE; }; @@ -330,10 +331,6 @@ extern struct ea_class ea_gen_source; static inline u32 rt_get_source_attr(rte *rt) { return ea_get_int(rt->attrs->eattrs, &ea_gen_source, 0); } -/* MPLS labels: Use with a recursive nexthop specification - * to add additional labels to the resolved nexthop */ -extern struct ea_class ea_mpls_labels; - /* Next hop: For now, stored as adata */ extern struct ea_class ea_gen_nexthop; diff --git a/lib/type.h b/lib/type.h index 6da86c77..65a032ec 100644 --- a/lib/type.h +++ b/lib/type.h @@ -53,6 +53,7 @@ enum btype { T_OPAQUE = 0x02, /* Opaque byte string (not filterable) */ T_IFACE = 0x0c, /* Pointer to an interface (inside adata) */ T_NEXTHOP_LIST = 0x2c, /* The whole nexthop block */ + T_HOSTENTRY = 0x2e, /* Hostentry with possible MPLS labels */ /* Types shared with eattrs */ T_INT = 0x01, /* 32-bit unsigned integer number */ diff --git a/nest/rt-attr.c b/nest/rt-attr.c index bd7ca425..cf3ab659 100644 --- a/nest/rt-attr.c +++ b/nest/rt-attr.c @@ -114,10 +114,48 @@ struct ea_class ea_gen_nexthop = { .type = T_NEXTHOP_LIST, }; -struct ea_class ea_mpls_labels = { - .name = "mpls_labels", - .type = T_CLIST, +/* + * ea_set_hostentry() acquires hostentry from hostcache. + * New hostentry has zero use count. Cached rta locks its + * hostentry (increases its use count), uncached rta does not lock it. + * Hostentry with zero use count is removed asynchronously + * during host cache update, therefore it is safe to hold + * such hostentry temporarily as long as you hold the table lock. + * + * There is no need to hold a lock for hostentry->dep table, because that table + * contains routes responsible for that hostentry, and therefore is non-empty if + * given hostentry has non-zero use count. If the hostentry has zero use count, + * the entry is removed before dep is referenced. + * + * The protocol responsible for routes with recursive next hops should hold a + * lock for a 'source' table governing that routes (argument tab), + * because its routes reference hostentries related to the governing table. + * When all such routes are + * removed, rtas are immediately removed achieving zero uc. Then the 'source' + * table lock could be immediately released, although hostentries may still + * exist - they will be freed together with the 'source' table. + */ + + static void +ea_gen_hostentry_stored(const eattr *ea) +{ + struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr; + had->he->uc++; +} + +static void +ea_gen_hostentry_freed(const eattr *ea) +{ + struct hostentry_adata *had = (struct hostentry_adata *) ea->u.ptr; + had->he->uc--; +} + +struct ea_class ea_gen_hostentry = { + .name = "hostentry", + .type = T_HOSTENTRY, .readonly = 1, + .stored = ea_gen_hostentry_stored, + .freed = ea_gen_hostentry_freed, }; const char * rta_dest_names[RTD_MAX] = { @@ -876,6 +914,8 @@ ea_list_ref(ea_list *l) struct ea_class *cl = ea_class_global[a->id]; ASSERT_DIE(cl && cl->uc); + + CALL(cl->stored, a); cl->uc++; } } @@ -890,6 +930,8 @@ ea_list_unref(ea_list *l) struct ea_class *cl = ea_class_global[a->id]; ASSERT_DIE(cl && cl->uc); + + CALL(cl->freed, a); if (!--cl->uc) ea_class_free(cl); } @@ -1206,9 +1248,7 @@ rta_hash(rta *a) { u64 h; mem_hash_init(&h); -#define MIX(f) mem_hash_mix(&h, &(a->f), sizeof(a->f)); #define BMIX(f) mem_hash_mix_num(&h, a->f); - MIX(hostentry); BMIX(dest); #undef MIX @@ -1219,7 +1259,6 @@ static inline int rta_same(rta *x, rta *y) { return (x->dest == y->dest && - x->hostentry == y->hostentry && ea_same(x->eattrs, y->eattrs)); } @@ -1303,7 +1342,6 @@ rta_lookup(rta *o) r = rta_copy(o); r->hash_key = h; r->cached = 1; - rt_lock_hostentry(r->hostentry); rta_insert(r); if (++rta_cache_count > rta_cache_limit) @@ -1320,7 +1358,6 @@ rta__free(rta *a) *a->pprev = a->next; if (a->next) a->next->pprev = a->pprev; - rt_unlock_hostentry(a->hostentry); ea_free(a->eattrs); a->cached = 0; sl_free(a); @@ -1411,8 +1448,7 @@ rta_init(void) ea_register_init(&ea_gen_from); ea_register_init(&ea_gen_source); ea_register_init(&ea_gen_nexthop); - - ea_register_init(&ea_mpls_labels); + ea_register_init(&ea_gen_hostentry); } /* diff --git a/nest/rt-table.c b/nest/rt-table.c index 6f948ada..d98f33e4 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -124,7 +124,8 @@ static void rt_next_hop_update(rtable *tab); static inline void rt_prune_table(rtable *tab); static inline void rt_schedule_notify(rtable *tab); static void rt_flowspec_notify(rtable *tab, net *net); - +static inline rte *rt_next_hop_update_rte(rtable *tab, rte *old); +static struct hostentry *rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep); static void net_init_with_trie(struct fib *f, void *N) @@ -1581,13 +1582,6 @@ rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) new->sender = c; stats->imp_updates_received++; - if (!rte_validate(new)) - { - rte_trace_in(D_FILTERS, c, new, "invalid"); - stats->imp_updates_invalid++; - goto drop; - } - if (filter == FILTER_REJECT) { stats->imp_updates_filtered++; @@ -1613,6 +1607,23 @@ rte_update2(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) new->flags |= REF_FILTERED; } } + + rte *new_resolved = rt_next_hop_update_rte(c->table, new); + if (new_resolved) + { + rte_free(new); + new = new_resolved; + } + + /* After all checks, updates and filters have been done, + * validate the route */ + if (!rte_validate(new)) + { + rte_trace_in(D_FILTERS, c, new, "invalid"); + stats->imp_updates_invalid++; + goto drop; + } + if (!rta_is_cached(new->attrs)) /* Need to copy attributes */ new->attrs = rta_lookup(new->attrs); new->flags |= REF_COW; @@ -2397,9 +2408,29 @@ rt_preconfig(struct config *c) */ void -rta_apply_hostentry(rta *a, struct hostentry *he) +ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]) { - a->hostentry = he; + struct { + struct adata ad; + struct hostentry *he; + u32 labels[lnum]; + } *head = (void *) tmp_alloc_adata(sizeof *head - sizeof(struct adata)); + + head->he = rt_get_hostentry(tab, gw, ll, dep); + memcpy(head->labels, labels, lnum * sizeof(u32)); + + ea_set_attr(to, EA_LITERAL_DIRECT_ADATA( + &ea_gen_hostentry, 0, &head->ad)); +} + + +static void +rta_apply_hostentry(rta *a, struct hostentry_adata *head) +{ + struct hostentry *he = head->he; + u32 *labels = head->labels; + u32 lnum = (u32 *) (head->ad.data + head->ad.length) - labels; + a->dest = he->dest; ea_set_attr_u32(&a->eattrs, &ea_gen_igp_metric, 0, he->igp_metric); @@ -2411,17 +2442,12 @@ rta_apply_hostentry(rta *a, struct hostentry *he) return; } - eattr *mls_ea = ea_find(a->eattrs, &ea_mpls_labels); - - if (!mls_ea && he->nexthop_linkable) + if (!lnum && he->nexthop_linkable) { /* Just link the nexthop chain, no label append happens. */ ea_copy_attr(&a->eattrs, he->src->eattrs, &ea_gen_nexthop); return; } - const struct adata *mls = mls_ea ? mls_ea->u.ptr : NULL; - uint mls_cnt = mls ? mls->length / sizeof(u32) : 0; - eattr *he_nh_ea = ea_find(he->src->eattrs, &ea_gen_nexthop); struct nexthop_adata *nhad = (struct nexthop_adata *) he_nh_ea->u.ptr; @@ -2429,14 +2455,14 @@ rta_apply_hostentry(rta *a, struct hostentry *he) NEXTHOP_WALK(nh, nhad) { - if (nh->labels + mls_cnt > MPLS_MAX_LABEL_STACK) + if (nh->labels + lnum > MPLS_MAX_LABEL_STACK) { log(L_WARN "Sum of label stack sizes %d + %d = %d exceedes allowed maximum (%d)", - nh->labels, mls_cnt, nh->labels + mls_cnt, MPLS_MAX_LABEL_STACK); + nh->labels, lnum, nh->labels + lnum, MPLS_MAX_LABEL_STACK); continue; } - total_size += NEXTHOP_SIZE_CNT(nh->labels + mls_cnt); + total_size += NEXTHOP_SIZE_CNT(nh->labels + lnum); } if (total_size == OFFSETOF(struct nexthop_adata, nh)) @@ -2453,14 +2479,14 @@ rta_apply_hostentry(rta *a, struct hostentry *he) NEXTHOP_WALK(nh, nhad) { - if (nh->labels + mls_cnt > MPLS_MAX_LABEL_STACK) + if (nh->labels + lnum > MPLS_MAX_LABEL_STACK) continue; memcpy(dest, nh, NEXTHOP_SIZE(nh)); - if (mls_cnt) + if (lnum) { - memcpy(&(dest->label[dest->labels]), mls->data, mls->length); - dest->labels += mls_cnt; + memcpy(&(dest->label[dest->labels]), labels, lnum * sizeof labels[0]); + dest->labels += lnum; } if (ipa_nonzero(nh->gw)) @@ -2482,42 +2508,43 @@ rta_apply_hostentry(rta *a, struct hostentry *he) &ea_gen_nexthop, 0, &new->ad)); } -static inline int +static inline struct hostentry_adata * rta_next_hop_outdated(rta *a) { - struct hostentry *he = a->hostentry; + eattr *heea = ea_find(a->eattrs, &ea_gen_hostentry); + if (!heea) + return NULL; - if (!he) - return 0; + struct hostentry_adata *head = (struct hostentry_adata *) heea->u.ptr; - if (!he->src) - return a->dest != RTD_UNREACHABLE; + if (!head->he->src) + return (a->dest != RTD_UNREACHABLE) ? head : NULL; - eattr *he_nh_ea = ea_find(he->src->eattrs, &ea_gen_nexthop); + eattr *he_nh_ea = ea_find(head->he->src->eattrs, &ea_gen_nexthop); eattr *a_nh_ea = ea_find(a->eattrs, &ea_gen_nexthop); - return (a->dest != he->dest) || - (ea_get_int(a->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != he->igp_metric) || - (!he->nexthop_linkable) || - (!he_nh_ea != !a_nh_ea) || - (he_nh_ea && a_nh_ea && !adata_same(he_nh_ea->u.ptr, a_nh_ea->u.ptr)); + return ((a->dest != head->he->dest) || + (ea_get_int(a->eattrs, &ea_gen_igp_metric, IGP_METRIC_UNKNOWN) != head->he->igp_metric) || + (!head->he->nexthop_linkable) || + (!he_nh_ea != !a_nh_ea) || + (he_nh_ea && a_nh_ea && !adata_same(he_nh_ea->u.ptr, a_nh_ea->u.ptr))) + ? head : NULL; } static inline rte * rt_next_hop_update_rte(rtable *tab UNUSED, rte *old) { - if (!rta_next_hop_outdated(old->attrs)) + struct hostentry_adata *head = rta_next_hop_outdated(old->attrs); + if (!head) return NULL; - rta *a = alloca(RTA_MAX_SIZE); - memcpy(a, old->attrs, rta_size(old->attrs)); - - rta_apply_hostentry(a, old->attrs->hostentry); - a->cached = 0; + rta a = *old->attrs; + a.cached = 0; + rta_apply_hostentry(&a, head); rte *e = sl_alloc(rte_slab); memcpy(e, old, sizeof(rte)); - e->attrs = rta_lookup(a); + e->attrs = rta_lookup(&a); rt_lock_source(e->src); return e; @@ -3510,7 +3537,7 @@ rt_update_hostentry(rtable *tab, struct hostentry *he) rta *a = e->attrs; pxlen = n->n.addr->pxlen; - if (a->hostentry) + if (ea_find(a->eattrs, &ea_gen_hostentry)) { /* Recursive route should not depend on another recursive route */ log(L_WARN "Next hop address %I resolvable through recursive route for %N", @@ -3583,7 +3610,7 @@ rt_update_hostcache(rtable *tab) tab->hcu_scheduled = 0; } -struct hostentry * +static struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep) { struct hostentry *he; diff --git a/nest/rt.h b/nest/rt.h index 50be7e0b..eb3f8454 100644 --- a/nest/rt.h +++ b/nest/rt.h @@ -280,39 +280,27 @@ struct rt_show_data_rtable * rt_show_add_table(struct rt_show_data *d, rtable *t #define RSEM_NOEXPORT 3 /* Routes rejected by export filter */ #define RSEM_EXPORTED 4 /* Routes marked in export map */ +/* Host entry: Resolve hook for recursive nexthops */ +extern struct ea_class ea_gen_hostentry; +struct hostentry_adata { + adata ad; + struct hostentry *he; + u32 labels[0]; +}; + +void +ea_set_hostentry(ea_list **to, struct rtable *dep, struct rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]); + +/* struct hostentry * rt_get_hostentry(rtable *tab, ip_addr a, ip_addr ll, rtable *dep); -void rta_apply_hostentry(rta *a, struct hostentry *he); +void rta_apply_hostentry(rta *a, struct hostentry *he, u32 lnum, u32 labels[lnum]); static inline void -rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll) +rta_set_recursive_next_hop(rtable *dep, rta *a, rtable *tab, ip_addr gw, ip_addr ll, u32 lnum, u32 labels[lnum]) { - rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep)); + rta_apply_hostentry(a, rt_get_hostentry(tab, gw, ll, dep), lnum, labels); } - -/* - * rta_set_recursive_next_hop() acquires hostentry from hostcache and fills - * rta->hostentry field. New hostentry has zero use count. Cached rta locks its - * hostentry (increases its use count), uncached rta does not lock it. Hostentry - * with zero use count is removed asynchronously during host cache update, - * therefore it is safe to hold such hostentry temorarily. Hostentry holds a - * lock for a 'source' rta, mainly to share multipath nexthops. - * - * There is no need to hold a lock for hostentry->dep table, because that table - * contains routes responsible for that hostentry, and therefore is non-empty if - * given hostentry has non-zero use count. If the hostentry has zero use count, - * the entry is removed before dep is referenced. - * - * The protocol responsible for routes with recursive next hops should hold a - * lock for a 'source' table governing that routes (argument tab to - * rta_set_recursive_next_hop()), because its routes reference hostentries - * (through rta) related to the governing table. When all such routes are - * removed, rtas are immediately removed achieving zero uc. Then the 'source' - * table lock could be immediately released, although hostentries may still - * exist - they will be freed together with the 'source' table. - */ - -static inline void rt_lock_hostentry(struct hostentry *he) { if (he) he->uc++; } -static inline void rt_unlock_hostentry(struct hostentry *he) { if (he) he->uc--; } +*/ int rt_flowspec_check(rtable *tab_ip, rtable *tab_flow, const net_addr *n, rta *a, int interior); diff --git a/proto/bgp/bgp.h b/proto/bgp/bgp.h index e04e3bd0..6abb7870 100644 --- a/proto/bgp/bgp.h +++ b/proto/bgp/bgp.h @@ -456,7 +456,6 @@ struct bgp_parse_state { uint err_subcode; jmp_buf err_jmpbuf; - struct hostentry *hostentry; adata *mpls_labels; /* Cached state for bgp_rte_update() */ diff --git a/proto/bgp/packets.c b/proto/bgp/packets.c index fd0a1be4..b07320aa 100644 --- a/proto/bgp/packets.c +++ b/proto/bgp/packets.c @@ -986,27 +986,24 @@ bgp_apply_next_hop(struct bgp_parse_state *s, rta *a, ip_addr gw, ip_addr ll) WITHDRAW(BAD_NEXT_HOP " - zero address"); rtable *tab = ipa_is_ip4(gw) ? c->igp_table_ip4 : c->igp_table_ip6; - s->hostentry = rt_get_hostentry(tab, gw, ll, c->c.table); - - if (!s->mpls) - rta_apply_hostentry(a, s->hostentry); - - /* With MPLS, hostentry is applied later in bgp_apply_mpls_labels() */ + if (s->mpls) + { + u32 labels[BGP_MPLS_MAX]; + ea_set_hostentry(&a->eattrs, c->c.table, tab, gw, ll, BGP_MPLS_MAX, labels); + } + else + ea_set_hostentry(&a->eattrs, c->c.table, tab, gw, ll, 0, NULL); } } static void -bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a) +bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a, u32 lnum, u32 labels[lnum]) { - u32 *labels = (u32 *) s->mpls_labels->data; - u32 lnum = s->mpls_labels->length / sizeof(u32); - if (lnum > MPLS_MAX_LABEL_STACK) { REPORT("Too many MPLS labels ($u)", lnum); a->dest = RTD_UNREACHABLE; - a->hostentry = NULL; ea_unset_attr(&a->eattrs, 0, &ea_gen_nexthop); return; } @@ -1029,7 +1026,13 @@ bgp_apply_mpls_labels(struct bgp_parse_state *s, rta *a) nh.nhad.ad.length = sizeof nh.nhad + lnum * sizeof(u32); } else /* GW_RECURSIVE */ - rta_apply_hostentry(a, s->hostentry); + { + eattr *e = ea_find(a->eattrs, &ea_gen_hostentry); + ASSERT_DIE(e); + struct hostentry_adata *head = (void *) e->u.ptr; + memcpy(&head->labels, labels, lnum * sizeof(u32)); + head->ad.length = (void *)(&head->labels[lnum]) - (void *) head->ad.data; + } } static void @@ -1445,12 +1448,7 @@ bgp_encode_mpls_labels(struct bgp_write_state *s UNUSED, const adata *mpls, byte static void bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *pxlen, rta *a) { - struct { - struct adata ad; - u32 labels[BGP_MPLS_MAX]; - } labels_adata; - - u32 *labels = labels_adata.labels; + u32 labels[BGP_MPLS_MAX]; u32 label; uint lnum = 0; @@ -1473,19 +1471,8 @@ bgp_decode_mpls_labels(struct bgp_parse_state *s, byte **pos, uint *len, uint *p if (!a) return; - labels_adata.ad.length = lnum * sizeof(u32); - - /* Attach MPLS attribute unless we already have one */ - if (!s->mpls_labels) - ea_set_attr(&(a->eattrs), - EA_LITERAL_DIRECT_ADATA(&ea_mpls_labels, 0, - (s->mpls_labels = tmp_store_adata(labels, BGP_MPLS_MAX * sizeof(u32))))); - else - /* Overwrite data in the attribute */ - memcpy(s->mpls_labels, &labels_adata, sizeof labels_adata); - /* Update next hop entry in rta */ - bgp_apply_mpls_labels(s, a); + bgp_apply_mpls_labels(s, a, lnum, labels); /* Attributes were changed, invalidate cached entry */ rta_free(s->cached_rta); diff --git a/proto/pipe/pipe.c b/proto/pipe/pipe.c index 7a39beff..e458a238 100644 --- a/proto/pipe/pipe.c +++ b/proto/pipe/pipe.c @@ -75,7 +75,8 @@ pipe_rt_notify(struct proto *P, struct channel *src_ch, net *n, rte *new, rte *o memcpy(a, new->attrs, rta_size(new->attrs)); a->cached = 0; - a->hostentry = NULL; + ea_unset_attr(&a->eattrs, 0, &ea_gen_hostentry); + e = rte_get_temp(a, src); e->pflags = new->pflags; diff --git a/proto/static/static.c b/proto/static/static.c index 2e4a46a6..5102617f 100644 --- a/proto/static/static.c +++ b/proto/static/static.c @@ -100,11 +100,11 @@ static_announce_rte(struct static_proto *p, struct static_route *r) if (r->dest == RTDX_RECURSIVE) { rtable *tab = ipa_is_ip4(r->via) ? p->igp_table_ip4 : p->igp_table_ip6; - if (r->mls) - ea_set_attr(&a->eattrs, - EA_LITERAL_DIRECT_ADATA(&ea_mpls_labels, 0, r->mls)); + u32 *labels = r->mls ? (void *) r->mls->data : NULL; + u32 lnum = r->mls ? r->mls->length / sizeof(u32) : 0; - rta_set_recursive_next_hop(p->p.main_channel->table, a, tab, r->via, IPA_NONE); + ea_set_hostentry(&a->eattrs, p->p.main_channel->table, tab, + r->via, IPA_NONE, lnum, labels); } /* Already announced */ -- cgit v1.2.3