diff options
Diffstat (limited to 'sysdep')
-rw-r--r-- | sysdep/bsd/krt-sock.c | 36 | ||||
-rw-r--r-- | sysdep/bsd/sysio.h | 6 | ||||
-rw-r--r-- | sysdep/config.h | 2 | ||||
-rw-r--r-- | sysdep/linux/krt-sys.h | 1 | ||||
-rw-r--r-- | sysdep/linux/netlink.Y | 4 | ||||
-rw-r--r-- | sysdep/linux/netlink.c | 190 | ||||
-rw-r--r-- | sysdep/linux/netlink.c.orig | 2179 | ||||
-rw-r--r-- | sysdep/linux/sysio.h | 19 | ||||
-rw-r--r-- | sysdep/unix/alloc.c | 191 | ||||
-rw-r--r-- | sysdep/unix/io.c | 27 | ||||
-rw-r--r-- | sysdep/unix/main.c | 32 |
11 files changed, 2565 insertions, 122 deletions
diff --git a/sysdep/bsd/krt-sock.c b/sysdep/bsd/krt-sock.c index 1b0384ce..efdb18a3 100644 --- a/sysdep/bsd/krt-sock.c +++ b/sysdep/bsd/krt-sock.c @@ -366,6 +366,30 @@ krt_replace_rte(struct krt_proto *p, net *n, rte *new, rte *old) } } +/** + * krt_assume_onlink - check if routes on interface are considered onlink + * @iface: The interface of the next hop + * @ipv6: Switch to only consider IPv6 or IPv4 addresses. + * + * The BSD kernel does not support an onlink flag. If the interface has only + * host addresses configured, all routes should be considered as onlink and + * the function returns 1. + */ +static int +krt_assume_onlink(struct iface *iface, int ipv6) +{ + const u8 type = ipv6 ? NET_IP6 : NET_IP4; + + struct ifa *ifa; + WALK_LIST(ifa, iface->addrs) + { + if ((ifa->prefix.type == type) && !(ifa->flags & IA_HOST)) + return 0; + } + + return 1; +} + #define SKIP(ARG...) do { DBG("KRT: Ignoring route - " ARG); return; } while(0) static void @@ -528,15 +552,21 @@ krt_read_route(struct ks_msg *msg, struct krt_proto *p, int scan) a.dest = RTD_UNICAST; if (flags & RTF_GATEWAY) { - neighbor *ng; a.nh.gw = igate; /* Clean up embedded interface ID returned in link-local address */ if (ipa_is_link_local(a.nh.gw)) _I0(a.nh.gw) = 0xfe800000; - ng = neigh_find(&p->p, a.nh.gw, a.nh.iface, 0); - if (!ng || (ng->scope == SCOPE_HOST)) + /* The BSD kernel does not support an onlink flag. We heuristically + set the onlink flag, if the iface has only host addresses. */ + if (krt_assume_onlink(a.nh.iface, ipv6)) + a.nh.flags |= RNF_ONLINK; + + neighbor *nbr; + nbr = neigh_find(&p->p, a.nh.gw, a.nh.iface, + (a.nh.flags & RNF_ONLINK) ? NEF_ONLINK : 0); + if (!nbr || (nbr->scope == SCOPE_HOST)) { /* Ignore routes with next-hop 127.0.0.1, host routes with such next-hop appear on OpenBSD for address aliases. */ diff --git a/sysdep/bsd/sysio.h b/sysdep/bsd/sysio.h index c757960a..f1887fb4 100644 --- a/sysdep/bsd/sysio.h +++ b/sysdep/bsd/sysio.h @@ -271,3 +271,9 @@ sk_set_priority(sock *s, int prio UNUSED) { ERR_MSG("Socket priority not supported"); } + +static inline int +sk_set_freebind(sock *s) +{ + ERR_MSG("Freebind is not supported"); +} diff --git a/sysdep/config.h b/sysdep/config.h index 55be90f0..b0531844 100644 --- a/sysdep/config.h +++ b/sysdep/config.h @@ -13,7 +13,7 @@ #ifdef GIT_LABEL #define BIRD_VERSION XSTR1(GIT_LABEL) #else -#define BIRD_VERSION "2.0.8" +#define BIRD_VERSION "2.0.9" #endif /* Include parameters determined by configure script */ diff --git a/sysdep/linux/krt-sys.h b/sysdep/linux/krt-sys.h index a8af4c95..8897f889 100644 --- a/sysdep/linux/krt-sys.h +++ b/sysdep/linux/krt-sys.h @@ -69,6 +69,7 @@ static inline struct ifa * kif_get_primary_ip(struct iface *i UNUSED) { return N struct krt_params { u32 table_id; /* Kernel table ID we sync with */ u32 metric; /* Kernel metric used for all routes */ + uint netlink_rx_buffer; /* Rx buffer size for the netlink socket */ }; struct krt_state { diff --git a/sysdep/linux/netlink.Y b/sysdep/linux/netlink.Y index 7097f577..487ad1d8 100644 --- a/sysdep/linux/netlink.Y +++ b/sysdep/linux/netlink.Y @@ -10,7 +10,8 @@ CF_HDR CF_DECLS -CF_KEYWORDS(KERNEL, TABLE, METRIC, KRT_PREFSRC, KRT_REALM, KRT_SCOPE, KRT_MTU, KRT_WINDOW, +CF_KEYWORDS(KERNEL, TABLE, METRIC, NETLINK, RX, BUFFER, + KRT_PREFSRC, KRT_REALM, KRT_SCOPE, KRT_MTU, KRT_WINDOW, KRT_RTT, KRT_RTTVAR, KRT_SSTRESH, KRT_CWND, KRT_ADVMSS, KRT_REORDERING, KRT_HOPLIMIT, KRT_INITCWND, KRT_RTO_MIN, KRT_INITRWND, KRT_QUICKACK, KRT_LOCK_MTU, KRT_LOCK_WINDOW, KRT_LOCK_RTT, KRT_LOCK_RTTVAR, @@ -24,6 +25,7 @@ kern_proto: kern_proto kern_sys_item ';' ; kern_sys_item: KERNEL TABLE expr { THIS_KRT->sys.table_id = $3; } | METRIC expr { THIS_KRT->sys.metric = $2; } + | NETLINK RX BUFFER expr { THIS_KRT->sys.netlink_rx_buffer = $4; } ; dynamic_attr: KRT_PREFSRC { $$ = f_new_dynamic_attr(EAF_TYPE_IP_ADDRESS, T_IP, EA_KRT_PREFSRC); } ; diff --git a/sysdep/linux/netlink.c b/sysdep/linux/netlink.c index 4ecd8878..e103c8ef 100644 --- a/sysdep/linux/netlink.c +++ b/sysdep/linux/netlink.c @@ -69,6 +69,10 @@ #define RTA_ENCAP 22 #endif +#ifndef NETLINK_GET_STRICT_CHK +#define NETLINK_GET_STRICT_CHK 12 +#endif + #define krt_ipv4(p) ((p)->af == AF_INET) #define krt_ecmp6(p) ((p)->af == AF_INET6) @@ -130,7 +134,7 @@ struct nl_sock uint last_size; }; -#define NL_RX_SIZE 8192 +#define NL_RX_SIZE 32768 #define NL_OP_DELETE 0 #define NL_OP_ADD (NLM_F_CREATE|NLM_F_EXCL) @@ -158,10 +162,46 @@ nl_open_sock(struct nl_sock *nl) } static void +nl_set_strict_dump(struct nl_sock *nl UNUSED, int strict UNUSED) +{ + /* + * Strict checking is not necessary, it improves behavior on newer kernels. + * If it is not available (missing SOL_NETLINK compile-time, or ENOPROTOOPT + * run-time), we can just ignore it. + */ +#ifdef SOL_NETLINK + setsockopt(nl->fd, SOL_NETLINK, NETLINK_GET_STRICT_CHK, &strict, sizeof(strict)); +#endif +} + +static void +nl_set_rcvbuf(int fd, uint val) +{ + if (setsockopt(fd, SOL_SOCKET, SO_RCVBUFFORCE, &val, sizeof(val)) < 0) + log(L_WARN "KRT: Cannot set netlink rx buffer size to %u: %m", val); +} + +static uint +nl_cfg_rx_buffer_size(struct config *cfg) +{ + uint bufsize = 0; + + struct proto_config *pc; + WALK_LIST(pc, cfg->protos) + if ((pc->protocol == &proto_unix_kernel) && !pc->disabled) + bufsize = MAX(bufsize, ((struct krt_config *) pc)->sys.netlink_rx_buffer); + + return bufsize; +} + + +static void nl_open(void) { nl_open_sock(&nl_scan); nl_open_sock(&nl_req); + + nl_set_strict_dump(&nl_scan, 1); } static void @@ -180,20 +220,60 @@ nl_send(struct nl_sock *nl, struct nlmsghdr *nh) } static void -nl_request_dump(int af, int cmd) +nl_request_dump_link(void) { struct { struct nlmsghdr nh; - struct rtgenmsg g; + struct ifinfomsg ifi; } req = { - .nh.nlmsg_type = cmd, - .nh.nlmsg_len = sizeof(req), + .nh.nlmsg_type = RTM_GETLINK, + .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), .nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP, - .g.rtgen_family = af + .nh.nlmsg_seq = ++(nl_scan.seq), + .ifi.ifi_family = AF_UNSPEC, }; - nl_send(&nl_scan, &req.nh); + + send(nl_scan.fd, &req, sizeof(req), 0); + nl_scan.last_hdr = NULL; } +static void +nl_request_dump_addr(int af) +{ + struct { + struct nlmsghdr nh; + struct ifaddrmsg ifa; + } req = { + .nh.nlmsg_type = RTM_GETADDR, + .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), + .nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP, + .nh.nlmsg_seq = ++(nl_scan.seq), + .ifa.ifa_family = af, + }; + + send(nl_scan.fd, &req, sizeof(req), 0); + nl_scan.last_hdr = NULL; +} + +static void +nl_request_dump_route(int af) +{ + struct { + struct nlmsghdr nh; + struct rtmsg rtm; + } req = { + .nh.nlmsg_type = RTM_GETROUTE, + .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)), + .nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP, + .nh.nlmsg_seq = ++(nl_scan.seq), + .rtm.rtm_family = af, + }; + + send(nl_scan.fd, &req, sizeof(req), 0); + nl_scan.last_hdr = NULL; +} + + static struct nlmsghdr * nl_get_reply(struct nl_sock *nl) { @@ -681,7 +761,7 @@ nl_add_multipath(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af, e } static struct nexthop * -nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr *ra, int af) +nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, const net_addr *n, struct rtattr *ra, int af, int krt_src) { struct rtattr *a[BIRD_RTA_MAX]; struct rtnexthop *nh = RTA_DATA(ra); @@ -695,9 +775,9 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr { /* Use RTNH_OK(nh,len) ?? */ if ((len < sizeof(*nh)) || (len < nh->rtnh_len)) - return NULL; + goto err; - if (nh->rtnh_flags & RTNH_F_DEAD) + if ((nh->rtnh_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD)) goto next; *last = rv = lp_allocz(s->pool, NEXTHOP_MAX_SIZE); @@ -706,7 +786,10 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr rv->weight = nh->rtnh_hops; rv->iface = if_find_by_index(nh->rtnh_ifindex); if (!rv->iface) - return NULL; + { + log(L_ERR "KRT: Received route %N with unknown ifindex %u", n, nh->rtnh_ifindex); + return NULL; + } /* Nonexistent RTNH_PAYLOAD ?? */ nl_attr_len = nh->rtnh_len - RTNH_LENGTH(0); @@ -714,18 +797,18 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr { case AF_INET: if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want4, a, sizeof(a))) - return NULL; + goto err; break; case AF_INET6: if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want6, a, sizeof(a))) - return NULL; + goto err; break; #ifdef HAVE_MPLS_KERNEL case AF_MPLS: if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want_mpls, a, sizeof(a))) - return NULL; + goto err; if (a[RTA_NEWDST]) rv->labels = rta_get_mpls(a[RTA_NEWDST], rv->label); @@ -734,7 +817,7 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr #endif default: - return NULL; + goto err; } if (a[RTA_GATEWAY]) @@ -757,14 +840,19 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr nbr = neigh_find(&p->p, rv->gw, rv->iface, (rv->flags & RNF_ONLINK) ? NEF_ONLINK : 0); if (!nbr || (nbr->scope == SCOPE_HOST)) - return NULL; + { + log(L_ERR "KRT: Received route %N with strange next-hop %I", n, rv->gw); + return NULL; + } } #ifdef HAVE_MPLS_KERNEL if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE]) { - if (rta_get_u16(a[RTA_ENCAP_TYPE]) != LWTUNNEL_ENCAP_MPLS) { - log(L_WARN "KRT: Unknown encapsulation method %d in multipath", rta_get_u16(a[RTA_ENCAP_TYPE])); + if (rta_get_u16(a[RTA_ENCAP_TYPE]) != LWTUNNEL_ENCAP_MPLS) + { + log(L_WARN "KRT: Received route %N with unknown encapsulation method %d", + n, rta_get_u16(a[RTA_ENCAP_TYPE])); return NULL; } @@ -785,6 +873,10 @@ nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, struct rtattr first = nexthop_sort(first); return first; + +err: + log(L_ERR "KRT: Received strange multipath route %N", n); + return NULL; } static void @@ -1139,7 +1231,7 @@ kif_do_scan(struct kif_proto *p UNUSED) if_start_update(); - nl_request_dump(AF_UNSPEC, RTM_GETLINK); + nl_request_dump_link(); while (h = nl_get_scan()) if (h->nlmsg_type == RTM_NEWLINK || h->nlmsg_type == RTM_DELLINK) nl_parse_link(h, 1); @@ -1166,14 +1258,14 @@ kif_do_scan(struct kif_proto *p UNUSED) } } - nl_request_dump(AF_INET, RTM_GETADDR); + nl_request_dump_addr(AF_INET); while (h = nl_get_scan()) if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR) nl_parse_addr(h, 1); else log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type); - nl_request_dump(AF_INET6, RTM_GETADDR); + nl_request_dump_addr(AF_INET6); while (h = nl_get_scan()) if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR) nl_parse_addr(h, 1); @@ -1531,7 +1623,8 @@ nl_parse_end(struct nl_parse_state *s) } -#define SKIP(ARG...) do { DBG("KRT: Ignoring route - " ARG); return; } while(0) +#define SKIP0(ARG, ...) do { DBG("KRT: Ignoring route - " ARG, ##__VA_ARGS__); return; } while(0) +#define SKIP(ARG, ...) do { DBG("KRT: Ignoring route %N - " ARG, &dst, ##__VA_ARGS__); return; } while(0) static void nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h) @@ -1584,10 +1677,10 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h) return; if (!a[RTA_DST]) - SKIP("MPLS route without RTA_DST"); + SKIP0("MPLS route without RTA_DST\n"); if (rta_get_mpls(a[RTA_DST], rta_mpls_stack) != 1) - SKIP("MPLS route with multi-label RTA_DST"); + SKIP0("MPLS route with multi-label RTA_DST\n"); net_fill_mpls(&dst, rta_mpls_stack[0]); break; @@ -1605,6 +1698,9 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h) else table_id = i->rtm_table; + if (i->rtm_flags & RTM_F_CLONED) + SKIP("cloned\n"); + /* Do we know this table? */ p = HASH_FIND(nl_table_map, RTH, i->rtm_family, table_id); if (!p) @@ -1682,19 +1778,16 @@ nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h) if (a[RTA_MULTIPATH]) { - struct nexthop *nh = nl_parse_multipath(s, p, a[RTA_MULTIPATH], i->rtm_family); + struct nexthop *nh = nl_parse_multipath(s, p, n, a[RTA_MULTIPATH], i->rtm_family, krt_src); if (!nh) - { - log(L_ERR "KRT: Received strange multipath route %N", net->n.addr); - return; - } + SKIP("strange RTA_MULTIPATH\n"); nexthop_link(ra, nh); break; } - if (i->rtm_flags & RTNH_F_DEAD) - return; + if ((i->rtm_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD)) + SKIP("ignore RTNH_F_DEAD\n"); ra->nh.iface = if_find_by_index(oif); if (!ra->nh.iface) @@ -1896,7 +1989,7 @@ krt_do_scan(struct krt_proto *p UNUSED) /* CONFIG_ALL_TABLES_AT_ONCE => p is NUL struct nl_parse_state s; nl_parse_begin(&s, 1); - nl_request_dump(AF_UNSPEC, RTM_GETROUTE); + nl_request_dump_route(AF_UNSPEC); while (h = nl_get_scan()) if (h->nlmsg_type == RTM_NEWROUTE || h->nlmsg_type == RTM_DELROUTE) nl_parse_route(&s, h); @@ -1911,6 +2004,8 @@ krt_do_scan(struct krt_proto *p UNUSED) /* CONFIG_ALL_TABLES_AT_ONCE => p is NUL static sock *nl_async_sk; /* BIRD socket for asynchronous notifications */ static byte *nl_async_rx_buffer; /* Receive buffer */ +static uint nl_async_bufsize; /* Kernel rx buffer size for the netlink socket */ +static struct config *nl_last_config; /* For tracking changes to nl_async_bufsize */ static void nl_async_msg(struct nlmsghdr *h) @@ -2046,6 +2141,32 @@ nl_open_async(void) bug("Netlink: sk_open failed"); } +static void +nl_update_async_bufsize(void) +{ + /* No async socket */ + if (!nl_async_sk) + return; + + /* Already reconfigured */ + if (nl_last_config == config) + return; + + /* Update netlink buffer size */ + uint bufsize = nl_cfg_rx_buffer_size(config); + if (bufsize && (bufsize != nl_async_bufsize)) + { + /* Log message for reconfigurations only */ + if (nl_last_config) + log(L_INFO "KRT: Changing netlink rx buffer size to %u", bufsize); + + nl_set_rcvbuf(nl_async_sk->fd, bufsize); + nl_async_bufsize = bufsize; + } + + nl_last_config = config; +} + /* * Interface to the UNIX krt module @@ -2074,6 +2195,7 @@ krt_sys_start(struct krt_proto *p) nl_open(); nl_open_async(); + nl_update_async_bufsize(); return 1; } @@ -2081,12 +2203,16 @@ krt_sys_start(struct krt_proto *p) void krt_sys_shutdown(struct krt_proto *p) { + nl_update_async_bufsize(); + HASH_REMOVE2(nl_table_map, RTH, krt_pool, p); } int krt_sys_reconfigure(struct krt_proto *p UNUSED, struct krt_config *n, struct krt_config *o) { + nl_update_async_bufsize(); + return (n->sys.table_id == o->sys.table_id) && (n->sys.metric == o->sys.metric); } diff --git a/sysdep/linux/netlink.c.orig b/sysdep/linux/netlink.c.orig new file mode 100644 index 00000000..7cea5322 --- /dev/null +++ b/sysdep/linux/netlink.c.orig @@ -0,0 +1,2179 @@ +/* + * BIRD -- Linux Netlink Interface + * + * (c) 1999--2000 Martin Mares <mj@ucw.cz> + * + * Can be freely distributed and used under the terms of the GNU GPL. + */ + +#include <alloca.h> +#include <stdio.h> +#include <unistd.h> +#include <fcntl.h> +#include <sys/socket.h> +#include <sys/uio.h> +#include <errno.h> + +#undef LOCAL_DEBUG + +#include "nest/bird.h" +#include "nest/route.h" +#include "nest/protocol.h" +#include "nest/iface.h" +#include "lib/alloca.h" +#include "sysdep/unix/unix.h" +#include "sysdep/unix/krt.h" +#include "lib/socket.h" +#include "lib/string.h" +#include "lib/hash.h" +#include "conf/conf.h" + +#include <asm/types.h> +#include <linux/if.h> +#include <linux/netlink.h> +#include <linux/rtnetlink.h> + +#ifdef HAVE_MPLS_KERNEL +#include <linux/lwtunnel.h> +#endif + +#ifndef MSG_TRUNC /* Hack: Several versions of glibc miss this one :( */ +#define MSG_TRUNC 0x20 +#endif + +#ifndef IFA_FLAGS +#define IFA_FLAGS 8 +#endif + +#ifndef IFF_LOWER_UP +#define IFF_LOWER_UP 0x10000 +#endif + +#ifndef RTA_TABLE +#define RTA_TABLE 15 +#endif + +#ifndef RTA_VIA +#define RTA_VIA 18 +#endif + +#ifndef RTA_NEWDST +#define RTA_NEWDST 19 +#endif + +#ifndef RTA_ENCAP_TYPE +#define RTA_ENCAP_TYPE 21 +#endif + +#ifndef RTA_ENCAP +#define RTA_ENCAP 22 +#endif + +#define krt_ipv4(p) ((p)->af == AF_INET) +#define krt_ecmp6(p) ((p)->af == AF_INET6) + +const int rt_default_ecmp = 16; + +/* + * Structure nl_parse_state keeps state of received route processing. Ideally, + * we could just independently parse received Netlink messages and immediately + * propagate received routes to the rest of BIRD, but older Linux kernel (before + * version 4.11) represents and announces IPv6 ECMP routes not as one route with + * multiple next hops (like RTA_MULTIPATH in IPv4 ECMP), but as a sequence of + * routes with the same prefix. More recent kernels work as with IPv4. + * + * Therefore, BIRD keeps currently processed route in nl_parse_state structure + * and postpones its propagation until we expect it to be final; i.e., when + * non-matching route is received or when the scan ends. When another matching + * route is received, it is merged with the already processed route to form an + * ECMP route. Note that merging is done only for IPv6 (merge == 1), but the + * postponing is done in both cases (for simplicity). All IPv4 routes or IPv6 + * routes with RTA_MULTIPATH set are just considered non-matching. + * + * This is ignored for asynchronous notifications (every notification is handled + * as a separate route). It is not an issue for our routes, as we ignore such + * notifications anyways. But importing alien IPv6 ECMP routes does not work + * properly with older kernels. + * + * Whatever the kernel version is, IPv6 ECMP routes are sent as multiple routes + * for the same prefix. + */ + +struct nl_parse_state +{ + struct linpool *pool; + int scan; + int merge; + + net *net; + rta *attrs; + struct krt_proto *proto; + s8 new; + s8 krt_src; + u8 krt_type; + u8 krt_proto; + u32 krt_metric; + + u32 rta_flow; /* Used during parsing */ +}; + +/* + * Synchronous Netlink interface + */ + +struct nl_sock +{ + int fd; + u32 seq; + byte *rx_buffer; /* Receive buffer */ + struct nlmsghdr *last_hdr; /* Recently received packet */ + uint last_size; +}; + +#define NL_RX_SIZE 8192 + +#define NL_OP_DELETE 0 +#define NL_OP_ADD (NLM_F_CREATE|NLM_F_EXCL) +#define NL_OP_REPLACE (NLM_F_CREATE|NLM_F_REPLACE) +#define NL_OP_APPEND (NLM_F_CREATE|NLM_F_APPEND) + +static linpool *nl_linpool; + +static struct nl_sock nl_scan = {.fd = -1}; /* Netlink socket for synchronous scan */ +static struct nl_sock nl_req = {.fd = -1}; /* Netlink socket for requests */ + +static void +nl_open_sock(struct nl_sock *nl) +{ + if (nl->fd < 0) + { + nl->fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (nl->fd < 0) + die("Unable to open rtnetlink socket: %m"); + nl->seq = (u32) (current_time() TO_S); /* Or perhaps random_u32() ? */ + nl->rx_buffer = xmalloc(NL_RX_SIZE); + nl->last_hdr = NULL; + nl->last_size = 0; + } +} + +static void +nl_open(void) +{ + nl_open_sock(&nl_scan); + nl_open_sock(&nl_req); +} + +static void +nl_send(struct nl_sock *nl, struct nlmsghdr *nh) +{ + struct sockaddr_nl sa; + + memset(&sa, 0, sizeof(sa)); + sa.nl_family = AF_NETLINK; + nh->nlmsg_pid = 0; + nh->nlmsg_seq = ++(nl->seq); + nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len); + if (sendto(nl->fd, nh, nh->nlmsg_len, 0, (struct sockaddr *)&sa, sizeof(sa)) < 0) + die("rtnetlink sendto: %m"); + nl->last_hdr = NULL; +} + +static void +nl_request_dump(int af, int cmd) +{ + struct { + struct nlmsghdr nh; + struct rtgenmsg g; + } req = { + .nh.nlmsg_type = cmd, + .nh.nlmsg_len = sizeof(req), + .nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP, + .g.rtgen_family = af + }; + nl_send(&nl_scan, &req.nh); +} + +static struct nlmsghdr * +nl_get_reply(struct nl_sock *nl) +{ + for(;;) + { + if (!nl->last_hdr) + { + struct iovec iov = { nl->rx_buffer, NL_RX_SIZE }; + struct sockaddr_nl sa; + struct msghdr m = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + int x = recvmsg(nl->fd, &m, 0); + if (x < 0) + die("nl_get_reply: %m"); + if (sa.nl_pid) /* It isn't from the kernel */ + { + DBG("Non-kernel packet\n"); + continue; + } + nl->last_size = x; + nl->last_hdr = (void *) nl->rx_buffer; + if (m.msg_flags & MSG_TRUNC) + bug("nl_get_reply: got truncated reply which should be impossible"); + } + if (NLMSG_OK(nl->last_hdr, nl->last_size)) + { + struct nlmsghdr *h = nl->last_hdr; + nl->last_hdr = NLMSG_NEXT(h, nl->last_size); + if (h->nlmsg_seq != nl->seq) + { + log(L_WARN "nl_get_reply: Ignoring out of sequence netlink packet (%x != %x)", + h->nlmsg_seq, nl->seq); + continue; + } + return h; + } + if (nl->last_size) + log(L_WARN "nl_get_reply: Found packet remnant of size %d", nl->last_size); + nl->last_hdr = NULL; + } +} + +static struct tbf rl_netlink_err = TBF_DEFAULT_LOG_LIMITS; + +static int +nl_error(struct nlmsghdr *h, int ignore_esrch) +{ + struct nlmsgerr *e; + int ec; + + if (h->nlmsg_len < NLMSG_LENGTH(sizeof(struct nlmsgerr))) + { + log(L_WARN "Netlink: Truncated error message received"); + return ENOBUFS; + } + e = (struct nlmsgerr *) NLMSG_DATA(h); + ec = -e->error; + if (ec && !(ignore_esrch && (ec == ESRCH))) + log_rl(&rl_netlink_err, L_WARN "Netlink: %s", strerror(ec)); + return ec; +} + +static struct nlmsghdr * +nl_get_scan(void) +{ + struct nlmsghdr *h = nl_get_reply(&nl_scan); + + if (h->nlmsg_type == NLMSG_DONE) + return NULL; + if (h->nlmsg_type == NLMSG_ERROR) + { + nl_error(h, 0); + return NULL; + } + return h; +} + +static int +nl_exchange(struct nlmsghdr *pkt, int ignore_esrch) +{ + struct nlmsghdr *h; + + nl_send(&nl_req, pkt); + for(;;) + { + h = nl_get_reply(&nl_req); + if (h->nlmsg_type == NLMSG_ERROR) + break; + log(L_WARN "nl_exchange: Unexpected reply received"); + } + return nl_error(h, ignore_esrch) ? -1 : 0; +} + +/* + * Netlink attributes + */ + +static int nl_attr_len; + +static void * +nl_checkin(struct nlmsghdr *h, int lsize) +{ + nl_attr_len = h->nlmsg_len - NLMSG_LENGTH(lsize); + if (nl_attr_len < 0) + { + log(L_ERR "nl_checkin: underrun by %d bytes", -nl_attr_len); + return NULL; + } + return NLMSG_DATA(h); +} + +struct nl_want_attrs { + u8 defined:1; + u8 checksize:1; + u8 size; +}; + + +#define BIRD_IFLA_MAX (IFLA_WIRELESS+1) + +static struct nl_want_attrs ifla_attr_want[BIRD_IFLA_MAX] = { + [IFLA_IFNAME] = { 1, 0, 0 }, + [IFLA_MTU] = { 1, 1, sizeof(u32) }, + [IFLA_MASTER] = { 1, 1, sizeof(u32) }, + [IFLA_WIRELESS] = { 1, 0, 0 }, +}; + + +#define BIRD_IFA_MAX (IFA_FLAGS+1) + +static struct nl_want_attrs ifa_attr_want4[BIRD_IFA_MAX] = { + [IFA_ADDRESS] = { 1, 1, sizeof(ip4_addr) }, + [IFA_LOCAL] = { 1, 1, sizeof(ip4_addr) }, + [IFA_BROADCAST] = { 1, 1, sizeof(ip4_addr) }, + [IFA_FLAGS] = { 1, 1, sizeof(u32) }, +}; + +static struct nl_want_attrs ifa_attr_want6[BIRD_IFA_MAX] = { + [IFA_ADDRESS] = { 1, 1, sizeof(ip6_addr) }, + [IFA_LOCAL] = { 1, 1, sizeof(ip6_addr) }, + [IFA_FLAGS] = { 1, 1, sizeof(u32) }, +}; + + +#define BIRD_RTA_MAX (RTA_ENCAP+1) + +static struct nl_want_attrs nexthop_attr_want4[BIRD_RTA_MAX] = { + [RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) }, + [RTA_VIA] = { 1, 0, 0 }, + [RTA_FLOW] = { 1, 1, sizeof(u32) }, + [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) }, + [RTA_ENCAP] = { 1, 0, 0 }, +}; + +static struct nl_want_attrs nexthop_attr_want6[BIRD_RTA_MAX] = { + [RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) }, + [RTA_VIA] = { 1, 0, 0 }, + [RTA_FLOW] = { 1, 1, sizeof(u32) }, + [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) }, + [RTA_ENCAP] = { 1, 0, 0 }, +}; + +#ifdef HAVE_MPLS_KERNEL +static struct nl_want_attrs nexthop_attr_want_mpls[BIRD_RTA_MAX] = { + [RTA_VIA] = { 1, 0, 0 }, + [RTA_NEWDST] = { 1, 0, 0 }, +}; + +static struct nl_want_attrs encap_mpls_want[BIRD_RTA_MAX] = { + [RTA_DST] = { 1, 0, 0 }, +}; +#endif + +static struct nl_want_attrs rtm_attr_want4[BIRD_RTA_MAX] = { + [RTA_DST] = { 1, 1, sizeof(ip4_addr) }, + [RTA_OIF] = { 1, 1, sizeof(u32) }, + [RTA_GATEWAY] = { 1, 1, sizeof(ip4_addr) }, + [RTA_PRIORITY] = { 1, 1, sizeof(u32) }, + [RTA_PREFSRC] = { 1, 1, sizeof(ip4_addr) }, + [RTA_METRICS] = { 1, 0, 0 }, + [RTA_MULTIPATH] = { 1, 0, 0 }, + [RTA_FLOW] = { 1, 1, sizeof(u32) }, + [RTA_TABLE] = { 1, 1, sizeof(u32) }, + [RTA_VIA] = { 1, 0, 0 }, + [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) }, + [RTA_ENCAP] = { 1, 0, 0 }, +}; + +static struct nl_want_attrs rtm_attr_want6[BIRD_RTA_MAX] = { + [RTA_DST] = { 1, 1, sizeof(ip6_addr) }, + [RTA_SRC] = { 1, 1, sizeof(ip6_addr) }, + [RTA_IIF] = { 1, 1, sizeof(u32) }, + [RTA_OIF] = { 1, 1, sizeof(u32) }, + [RTA_GATEWAY] = { 1, 1, sizeof(ip6_addr) }, + [RTA_PRIORITY] = { 1, 1, sizeof(u32) }, + [RTA_PREFSRC] = { 1, 1, sizeof(ip6_addr) }, + [RTA_METRICS] = { 1, 0, 0 }, + [RTA_MULTIPATH] = { 1, 0, 0 }, + [RTA_FLOW] = { 1, 1, sizeof(u32) }, + [RTA_TABLE] = { 1, 1, sizeof(u32) }, + [RTA_VIA] = { 1, 0, 0 }, + [RTA_ENCAP_TYPE]= { 1, 1, sizeof(u16) }, + [RTA_ENCAP] = { 1, 0, 0 }, +}; + +#ifdef HAVE_MPLS_KERNEL +static struct nl_want_attrs rtm_attr_want_mpls[BIRD_RTA_MAX] = { + [RTA_DST] = { 1, 1, sizeof(u32) }, + [RTA_IIF] = { 1, 1, sizeof(u32) }, + [RTA_OIF] = { 1, 1, sizeof(u32) }, + [RTA_PRIORITY] = { 1, 1, sizeof(u32) }, + [RTA_METRICS] = { 1, 0, 0 }, + [RTA_MULTIPATH] = { 1, 0, 0 }, + [RTA_FLOW] = { 1, 1, sizeof(u32) }, + [RTA_TABLE] = { 1, 1, sizeof(u32) }, + [RTA_VIA] = { 1, 0, 0 }, + [RTA_NEWDST] = { 1, 0, 0 }, +}; +#endif + + +static int +nl_parse_attrs(struct rtattr *a, struct nl_want_attrs *want, struct rtattr **k, int ksize) +{ + int max = ksize / sizeof(struct rtattr *); + bzero(k, ksize); + + for ( ; RTA_OK(a, nl_attr_len); a = RTA_NEXT(a, nl_attr_len)) + { + if ((a->rta_type >= max) || !want[a->rta_type].defined) + continue; + + if (want[a->rta_type].checksize && (RTA_PAYLOAD(a) != want[a->rta_type].size)) + { + log(L_ERR "nl_parse_attrs: Malformed attribute received"); + return 0; + } + + k[a->rta_type] = a; + } + + if (nl_attr_len) + { + log(L_ERR "nl_parse_attrs: remnant of size %d", nl_attr_len); + return 0; + } + + return 1; +} + +static inline u16 rta_get_u16(struct rtattr *a) +{ return *(u16 *) RTA_DATA(a); } + +static inline u32 rta_get_u32(struct rtattr *a) +{ return *(u32 *) RTA_DATA(a); } + +static inline ip4_addr rta_get_ip4(struct rtattr *a) +{ return ip4_ntoh(*(ip4_addr *) RTA_DATA(a)); } + +static inline ip6_addr rta_get_ip6(struct rtattr *a) +{ return ip6_ntoh(*(ip6_addr *) RTA_DATA(a)); } + +static inline ip_addr rta_get_ipa(struct rtattr *a) +{ + if (RTA_PAYLOAD(a) == sizeof(ip4_addr)) + return ipa_from_ip4(rta_get_ip4(a)); + else + return ipa_from_ip6(rta_get_ip6(a)); +} + +#ifdef HAVE_MPLS_KERNEL +static inline ip_addr rta_get_via(struct rtattr *a) +{ + struct rtvia *v = RTA_DATA(a); + switch(v->rtvia_family) { + case AF_INET: return ipa_from_ip4(ip4_ntoh(*(ip4_addr *) v->rtvia_addr)); + case AF_INET6: return ipa_from_ip6(ip6_ntoh(*(ip6_addr *) v->rtvia_addr)); + } + return IPA_NONE; +} + +static u32 rta_mpls_stack[MPLS_MAX_LABEL_STACK]; +static inline int rta_get_mpls(struct rtattr *a, u32 *stack) +{ + if (!a) + return 0; + + if (RTA_PAYLOAD(a) % 4) + log(L_WARN "KRT: Strange length of received MPLS stack: %u", RTA_PAYLOAD(a)); + + int labels = mpls_get(RTA_DATA(a), RTA_PAYLOAD(a) & ~0x3, stack); + + if (labels < 0) + { + log(L_WARN "KRT: Too long MPLS stack received, ignoring"); + labels = 0; + } + + return labels; +} +#endif + +struct rtattr * +nl_add_attr(struct nlmsghdr *h, uint bufsize, uint code, const void *data, uint dlen) +{ + uint pos = NLMSG_ALIGN(h->nlmsg_len); + uint len = RTA_LENGTH(dlen); + + if (pos + len > bufsize) + bug("nl_add_attr: packet buffer overflow"); + + struct rtattr *a = (struct rtattr *)((char *)h + pos); + a->rta_type = code; + a->rta_len = len; + h->nlmsg_len = pos + len; + + if (dlen > 0) + memcpy(RTA_DATA(a), data, dlen); + + return a; +} + +static inline struct rtattr * +nl_open_attr(struct nlmsghdr *h, uint bufsize, uint code) +{ + return nl_add_attr(h, bufsize, code, NULL, 0); +} + +static inline void +nl_close_attr(struct nlmsghdr *h, struct rtattr *a) +{ + a->rta_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)a; +} + +static inline void +nl_add_attr_u16(struct nlmsghdr *h, uint bufsize, int code, u16 data) +{ + nl_add_attr(h, bufsize, code, &data, 2); +} + +static inline void +nl_add_attr_u32(struct nlmsghdr *h, uint bufsize, int code, u32 data) +{ + nl_add_attr(h, bufsize, code, &data, 4); +} + +static inline void +nl_add_attr_ip4(struct nlmsghdr *h, uint bufsize, int code, ip4_addr ip4) +{ + ip4 = ip4_hton(ip4); + nl_add_attr(h, bufsize, code, &ip4, sizeof(ip4)); +} + +static inline void +nl_add_attr_ip6(struct nlmsghdr *h, uint bufsize, int code, ip6_addr ip6) +{ + ip6 = ip6_hton(ip6); + nl_add_attr(h, bufsize, code, &ip6, sizeof(ip6)); +} + +static inline void +nl_add_attr_ipa(struct nlmsghdr *h, uint bufsize, int code, ip_addr ipa) +{ + if (ipa_is_ip4(ipa)) + nl_add_attr_ip4(h, bufsize, code, ipa_to_ip4(ipa)); + else + nl_add_attr_ip6(h, bufsize, code, ipa_to_ip6(ipa)); +} + +#ifdef HAVE_MPLS_KERNEL +static inline void +nl_add_attr_mpls(struct nlmsghdr *h, uint bufsize, int code, int len, u32 *stack) +{ + char buf[len*4]; + mpls_put(buf, len, stack); + nl_add_attr(h, bufsize, code, buf, len*4); +} + +static inline void +nl_add_attr_mpls_encap(struct nlmsghdr *h, uint bufsize, int len, u32 *stack) +{ + nl_add_attr_u16(h, bufsize, RTA_ENCAP_TYPE, LWTUNNEL_ENCAP_MPLS); + + struct rtattr *nest = nl_open_attr(h, bufsize, RTA_ENCAP); + nl_add_attr_mpls(h, bufsize, RTA_DST, len, stack); + nl_close_attr(h, nest); +} + +static inline void +nl_add_attr_via(struct nlmsghdr *h, uint bufsize, ip_addr ipa) +{ + struct rtvia *via = alloca(sizeof(struct rtvia) + 16); + + if (ipa_is_ip4(ipa)) + { + via->rtvia_family = AF_INET; + put_ip4(via->rtvia_addr, ipa_to_ip4(ipa)); + nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 4); + } + else + { + via->rtvia_family = AF_INET6; + put_ip6(via->rtvia_addr, ipa_to_ip6(ipa)); + nl_add_attr(h, bufsize, RTA_VIA, via, sizeof(struct rtvia) + 16); + } +} +#endif + +static inline struct rtnexthop * +nl_open_nexthop(struct nlmsghdr *h, uint bufsize) +{ + uint pos = NLMSG_ALIGN(h->nlmsg_len); + uint len = RTNH_LENGTH(0); + + if (pos + len > bufsize) + bug("nl_open_nexthop: packet buffer overflow"); + + h->nlmsg_len = pos + len; + + return (void *)h + pos; +} + +static inline void +nl_close_nexthop(struct nlmsghdr *h, struct rtnexthop *nh) +{ + nh->rtnh_len = (void *)h + NLMSG_ALIGN(h->nlmsg_len) - (void *)nh; +} + +static inline void +nl_add_nexthop(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af UNUSED) +{ +#ifdef HAVE_MPLS_KERNEL + if (nh->labels > 0) + if (af == AF_MPLS) + nl_add_attr_mpls(h, bufsize, RTA_NEWDST, nh->labels, nh->label); + else + nl_add_attr_mpls_encap(h, bufsize, nh->labels, nh->label); + + if (ipa_nonzero(nh->gw)) + { + if (af == (ipa_is_ip4(nh->gw) ? AF_INET : AF_INET6)) + nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw); + else + nl_add_attr_via(h, bufsize, nh->gw); + } +#else + + if (ipa_nonzero(nh->gw)) + nl_add_attr_ipa(h, bufsize, RTA_GATEWAY, nh->gw); +#endif +} + +static void +nl_add_multipath(struct nlmsghdr *h, uint bufsize, struct nexthop *nh, int af, ea_list *eattrs) +{ + struct rtattr *a = nl_open_attr(h, bufsize, RTA_MULTIPATH); + eattr *flow = ea_find(eattrs, EA_KRT_REALM); + + for (; nh; nh = nh->next) + { + struct rtnexthop *rtnh = nl_open_nexthop(h, bufsize); + + rtnh->rtnh_flags = 0; + rtnh->rtnh_hops = nh->weight; + rtnh->rtnh_ifindex = nh->iface->index; + + nl_add_nexthop(h, bufsize, nh, af); + + if (nh->flags & RNF_ONLINK) + rtnh->rtnh_flags |= RTNH_F_ONLINK; + + /* Our KRT_REALM is per-route, but kernel RTA_FLOW is per-nexthop. + Therefore, we need to attach the same attribute to each nexthop. */ + if (flow) + nl_add_attr_u32(h, bufsize, RTA_FLOW, flow->u.data); + + nl_close_nexthop(h, rtnh); + } + + nl_close_attr(h, a); +} + +static struct nexthop * +nl_parse_multipath(struct nl_parse_state *s, struct krt_proto *p, const net_addr *n, struct rtattr *ra, int af, int krt_src) +{ + struct rtattr *a[BIRD_RTA_MAX]; + struct rtnexthop *nh = RTA_DATA(ra); + struct nexthop *rv, *first, **last; + unsigned len = RTA_PAYLOAD(ra); + + first = NULL; + last = &first; + + while (len) + { + /* Use RTNH_OK(nh,len) ?? */ + if ((len < sizeof(*nh)) || (len < nh->rtnh_len)) + goto err; + + if ((nh->rtnh_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD)) + goto next; + + *last = rv = lp_allocz(s->pool, NEXTHOP_MAX_SIZE); + last = &(rv->next); + + rv->weight = nh->rtnh_hops; + rv->iface = if_find_by_index(nh->rtnh_ifindex); + if (!rv->iface) + { + log(L_ERR "KRT: Received route %N with unknown ifindex %u", n, nh->rtnh_ifindex); + return NULL; + } + + /* Nonexistent RTNH_PAYLOAD ?? */ + nl_attr_len = nh->rtnh_len - RTNH_LENGTH(0); + switch (af) + { + case AF_INET: + if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want4, a, sizeof(a))) + goto err; + break; + + case AF_INET6: + if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want6, a, sizeof(a))) + goto err; + break; + +#ifdef HAVE_MPLS_KERNEL + case AF_MPLS: + if (!nl_parse_attrs(RTNH_DATA(nh), nexthop_attr_want_mpls, a, sizeof(a))) + goto err; + + if (a[RTA_NEWDST]) + rv->labels = rta_get_mpls(a[RTA_NEWDST], rv->label); + + break; +#endif + + default: + goto err; + } + + if (a[RTA_GATEWAY]) + rv->gw = rta_get_ipa(a[RTA_GATEWAY]); + + if (a[RTA_FLOW]) + s->rta_flow = rta_get_u32(a[RTA_FLOW]); + +#ifdef HAVE_MPLS_KERNEL + if (a[RTA_VIA]) + rv->gw = rta_get_via(a[RTA_VIA]); +#endif + + if (ipa_nonzero(rv->gw)) + { + if (nh->rtnh_flags & RTNH_F_ONLINK) + rv->flags |= RNF_ONLINK; + + neighbor *nbr; + nbr = neigh_find(&p->p, rv->gw, rv->iface, + (rv->flags & RNF_ONLINK) ? NEF_ONLINK : 0); + if (!nbr || (nbr->scope == SCOPE_HOST)) + { + log(L_ERR "KRT: Received route %N with strange next-hop %I", n, rv->gw); + return NULL; + } + } + +#ifdef HAVE_MPLS_KERNEL + if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE]) + { + if (rta_get_u16(a[RTA_ENCAP_TYPE]) != LWTUNNEL_ENCAP_MPLS) + { + log(L_WARN "KRT: Received route %N with unknown encapsulation method %d", + n, rta_get_u16(a[RTA_ENCAP_TYPE])); + return NULL; + } + + struct rtattr *enca[BIRD_RTA_MAX]; + nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]); + nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca)); + rv->labels = rta_get_mpls(enca[RTA_DST], rv->label); + } +#endif + + next: + len -= NLMSG_ALIGN(nh->rtnh_len); + nh = RTNH_NEXT(nh); + } + + /* Ensure nexthops are sorted to satisfy nest invariant */ + if (!nexthop_is_sorted(first)) + first = nexthop_sort(first); + + return first; + +err: + log(L_ERR "KRT: Received strange multipath route %N", n); + return NULL; +} + +static void +nl_add_metrics(struct nlmsghdr *h, uint bufsize, u32 *metrics, int max) +{ + struct rtattr *a = nl_open_attr(h, bufsize, RTA_METRICS); + int t; + + for (t = 1; t < max; t++) + if (metrics[0] & (1 << t)) + nl_add_attr_u32(h, bufsize, t, metrics[t]); + + nl_close_attr(h, a); +} + +static int +nl_parse_metrics(struct rtattr *hdr, u32 *metrics, int max) +{ + struct rtattr *a = RTA_DATA(hdr); + int len = RTA_PAYLOAD(hdr); + + metrics[0] = 0; + for (; RTA_OK(a, len); a = RTA_NEXT(a, len)) + { + if (a->rta_type == RTA_UNSPEC) + continue; + + if (a->rta_type >= max) + continue; + + if (RTA_PAYLOAD(a) != 4) + return -1; + + metrics[0] |= 1 << a->rta_type; + metrics[a->rta_type] = rta_get_u32(a); + } + + if (len > 0) + return -1; + + return 0; +} + + +/* + * Scanning of interfaces + */ + +static void +nl_parse_link(struct nlmsghdr *h, int scan) +{ + struct ifinfomsg *i; + struct rtattr *a[BIRD_IFLA_MAX]; + int new = h->nlmsg_type == RTM_NEWLINK; + struct iface f = {}; + struct iface *ifi; + char *name; + u32 mtu, master = 0; + uint fl; + + if (!(i = nl_checkin(h, sizeof(*i))) || !nl_parse_attrs(IFLA_RTA(i), ifla_attr_want, a, sizeof(a))) + return; + if (!a[IFLA_IFNAME] || (RTA_PAYLOAD(a[IFLA_IFNAME]) < 2) || !a[IFLA_MTU]) + { + /* + * IFLA_IFNAME and IFLA_MTU are required, in fact, but there may also come + * a message with IFLA_WIRELESS set, where (e.g.) no IFLA_IFNAME exists. + * We simply ignore all such messages with IFLA_WIRELESS without notice. + */ + + if (a[IFLA_WIRELESS]) + return; + + log(L_ERR "KIF: Malformed message received"); + return; + } + + name = RTA_DATA(a[IFLA_IFNAME]); + mtu = rta_get_u32(a[IFLA_MTU]); + + if (a[IFLA_MASTER]) + master = rta_get_u32(a[IFLA_MASTER]); + + ifi = if_find_by_index(i->ifi_index); + if (!new) + { + DBG("KIF: IF%d(%s) goes down\n", i->ifi_index, name); + if (!ifi) + return; + + if_delete(ifi); + } + else + { + DBG("KIF: IF%d(%s) goes up (mtu=%d,flg=%x)\n", i->ifi_index, name, mtu, i->ifi_flags); + if (ifi && strncmp(ifi->name, name, sizeof(ifi->name)-1)) + if_delete(ifi); + + strncpy(f.name, name, sizeof(f.name)-1); + f.index = i->ifi_index; + f.mtu = mtu; + + f.master_index = master; + f.master = if_find_by_index(master); + + fl = i->ifi_flags; + if (fl & IFF_UP) + f.flags |= IF_ADMIN_UP; + if (fl & IFF_LOWER_UP) + f.flags |= IF_LINK_UP; + if (fl & IFF_LOOPBACK) /* Loopback */ + f.flags |= IF_MULTIACCESS | IF_LOOPBACK | IF_IGNORE; + else if (fl & IFF_POINTOPOINT) /* PtP */ + f.flags |= IF_MULTICAST; + else if (fl & IFF_BROADCAST) /* Broadcast */ + f.flags |= IF_MULTIACCESS | IF_BROADCAST | IF_MULTICAST; + else + f.flags |= IF_MULTIACCESS; /* NBMA */ + + if (fl & IFF_MULTICAST) + f.flags |= IF_MULTICAST; + + ifi = if_update(&f); + + if (!scan) + if_end_partial_update(ifi); + } +} + +static void +nl_parse_addr4(struct ifaddrmsg *i, int scan, int new) +{ + struct rtattr *a[BIRD_IFA_MAX]; + struct iface *ifi; + u32 ifa_flags; + int scope; + + if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want4, a, sizeof(a))) + return; + + if (!a[IFA_LOCAL]) + { + log(L_ERR "KIF: Malformed message received (missing IFA_LOCAL)"); + return; + } + if (!a[IFA_ADDRESS]) + { + log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)"); + return; + } + + ifi = if_find_by_index(i->ifa_index); + if (!ifi) + { + log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index); + return; + } + + if (a[IFA_FLAGS]) + ifa_flags = rta_get_u32(a[IFA_FLAGS]); + else + ifa_flags = i->ifa_flags; + + struct ifa ifa; + bzero(&ifa, sizeof(ifa)); + ifa.iface = ifi; + if (ifa_flags & IFA_F_SECONDARY) + ifa.flags |= IA_SECONDARY; + + ifa.ip = rta_get_ipa(a[IFA_LOCAL]); + + if (i->ifa_prefixlen > IP4_MAX_PREFIX_LENGTH) + { + log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen); + new = 0; + } + if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH) + { + ifa.brd = rta_get_ipa(a[IFA_ADDRESS]); + net_fill_ip4(&ifa.prefix, rta_get_ip4(a[IFA_ADDRESS]), i->ifa_prefixlen); + + /* It is either a host address or a peer address */ + if (ipa_equal(ifa.ip, ifa.brd)) + ifa.flags |= IA_HOST; + else + { + ifa.flags |= IA_PEER; + ifa.opposite = ifa.brd; + } + } + else + { + net_fill_ip4(&ifa.prefix, ipa_to_ip4(ifa.ip), i->ifa_prefixlen); + net_normalize(&ifa.prefix); + + if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 1) + ifa.opposite = ipa_opposite_m1(ifa.ip); + + if (i->ifa_prefixlen == IP4_MAX_PREFIX_LENGTH - 2) + ifa.opposite = ipa_opposite_m2(ifa.ip); + + if (ifi->flags & IF_BROADCAST) + { + /* If kernel offers us a broadcast address, we trust it */ + if (a[IFA_BROADCAST]) + ifa.brd = ipa_from_ip4(rta_get_ip4(a[IFA_BROADCAST])); + /* Otherwise we create one (except for /31) */ + else if (i->ifa_prefixlen < (IP4_MAX_PREFIX_LENGTH - 1)) + ifa.brd = ipa_from_ip4(ip4_or(ipa_to_ip4(ifa.ip), + ip4_not(ip4_mkmask(i->ifa_prefixlen)))); + } + } + + scope = ipa_classify(ifa.ip); + if (scope < 0) + { + log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name); + return; + } + ifa.scope = scope & IADDR_SCOPE_MASK; + + DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n", + ifi->index, ifi->name, + new ? "added" : "removed", + ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite); + + if (new) + ifa_update(&ifa); + else + ifa_delete(&ifa); + + if (!scan) + if_end_partial_update(ifi); +} + +static void +nl_parse_addr6(struct ifaddrmsg *i, int scan, int new) +{ + struct rtattr *a[BIRD_IFA_MAX]; + struct iface *ifi; + u32 ifa_flags; + int scope; + + if (!nl_parse_attrs(IFA_RTA(i), ifa_attr_want6, a, sizeof(a))) + return; + + if (!a[IFA_ADDRESS]) + { + log(L_ERR "KIF: Malformed message received (missing IFA_ADDRESS)"); + return; + } + + ifi = if_find_by_index(i->ifa_index); + if (!ifi) + { + log(L_ERR "KIF: Received address message for unknown interface %d", i->ifa_index); + return; + } + + if (a[IFA_FLAGS]) + ifa_flags = rta_get_u32(a[IFA_FLAGS]); + else + ifa_flags = i->ifa_flags; + + struct ifa ifa; + bzero(&ifa, sizeof(ifa)); + ifa.iface = ifi; + if (ifa_flags & IFA_F_SECONDARY) + ifa.flags |= IA_SECONDARY; + + /* Ignore tentative addresses silently */ + if (ifa_flags & IFA_F_TENTATIVE) + return; + + /* IFA_LOCAL can be unset for IPv6 interfaces */ + ifa.ip = rta_get_ipa(a[IFA_LOCAL] ? : a[IFA_ADDRESS]); + + if (i->ifa_prefixlen > IP6_MAX_PREFIX_LENGTH) + { + log(L_ERR "KIF: Invalid prefix length for interface %s: %d", ifi->name, i->ifa_prefixlen); + new = 0; + } + if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH) + { + ifa.brd = rta_get_ipa(a[IFA_ADDRESS]); + net_fill_ip6(&ifa.prefix, rta_get_ip6(a[IFA_ADDRESS]), i->ifa_prefixlen); + + /* It is either a host address or a peer address */ + if (ipa_equal(ifa.ip, ifa.brd)) + ifa.flags |= IA_HOST; + else + { + ifa.flags |= IA_PEER; + ifa.opposite = ifa.brd; + } + } + else + { + net_fill_ip6(&ifa.prefix, ipa_to_ip6(ifa.ip), i->ifa_prefixlen); + net_normalize(&ifa.prefix); + + if (i->ifa_prefixlen == IP6_MAX_PREFIX_LENGTH - 1) + ifa.opposite = ipa_opposite_m1(ifa.ip); + } + + scope = ipa_classify(ifa.ip); + if (scope < 0) + { + log(L_ERR "KIF: Invalid interface address %I for %s", ifa.ip, ifi->name); + return; + } + ifa.scope = scope & IADDR_SCOPE_MASK; + + DBG("KIF: IF%d(%s): %s IPA %I, flg %x, net %N, brd %I, opp %I\n", + ifi->index, ifi->name, + new ? "added" : "removed", + ifa.ip, ifa.flags, &ifa.prefix, ifa.brd, ifa.opposite); + + if (new) + ifa_update(&ifa); + else + ifa_delete(&ifa); + + if (!scan) + if_end_partial_update(ifi); +} + +static void +nl_parse_addr(struct nlmsghdr *h, int scan) +{ + struct ifaddrmsg *i; + + if (!(i = nl_checkin(h, sizeof(*i)))) + return; + + int new = (h->nlmsg_type == RTM_NEWADDR); + + switch (i->ifa_family) + { + case AF_INET: + return nl_parse_addr4(i, scan, new); + + case AF_INET6: + return nl_parse_addr6(i, scan, new); + } +} + +void +kif_do_scan(struct kif_proto *p UNUSED) +{ + struct nlmsghdr *h; + + if_start_update(); + + nl_request_dump(AF_UNSPEC, RTM_GETLINK); + while (h = nl_get_scan()) + if (h->nlmsg_type == RTM_NEWLINK || h->nlmsg_type == RTM_DELLINK) + nl_parse_link(h, 1); + else + log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type); + + /* Re-resolve master interface for slaves */ + struct iface *i; + WALK_LIST(i, iface_list) + if (i->master_index) + { + struct iface f = { + .flags = i->flags, + .mtu = i->mtu, + .index = i->index, + .master_index = i->master_index, + .master = if_find_by_index(i->master_index) + }; + + if (f.master != i->master) + { + memcpy(f.name, i->name, sizeof(f.name)); + if_update(&f); + } + } + + nl_request_dump(AF_INET, RTM_GETADDR); + while (h = nl_get_scan()) + if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR) + nl_parse_addr(h, 1); + else + log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type); + + nl_request_dump(AF_INET6, RTM_GETADDR); + while (h = nl_get_scan()) + if (h->nlmsg_type == RTM_NEWADDR || h->nlmsg_type == RTM_DELADDR) + nl_parse_addr(h, 1); + else + log(L_DEBUG "nl_scan_ifaces: Unknown packet received (type=%d)", h->nlmsg_type); + + if_end_update(); +} + +/* + * Routes + */ + +static inline u32 +krt_table_id(struct krt_proto *p) +{ + return KRT_CF->sys.table_id; +} + +static HASH(struct krt_proto) nl_table_map; + +#define RTH_KEY(p) p->af, krt_table_id(p) +#define RTH_NEXT(p) p->sys.hash_next +#define RTH_EQ(a1,i1,a2,i2) a1 == a2 && i1 == i2 +#define RTH_FN(a,i) a ^ u32_hash(i) + +#define RTH_REHASH rth_rehash +#define RTH_PARAMS /8, *2, 2, 2, 6, 20 + +HASH_DEFINE_REHASH_FN(RTH, struct krt_proto) + +int +krt_capable(rte *e) +{ + rta *a = e->attrs; + + switch (a->dest) + { + case RTD_UNICAST: + case RTD_BLACKHOLE: + case RTD_UNREACHABLE: + case RTD_PROHIBIT: + return 1; + + default: + return 0; + } +} + +static inline int +nh_bufsize(struct nexthop *nh) +{ + int rv = 0; + for (; nh != NULL; nh = nh->next) + rv += RTNH_LENGTH(RTA_LENGTH(sizeof(ip_addr))); + return rv; +} + +static int +nl_send_route(struct krt_proto *p, rte *e, int op, int dest, struct nexthop *nh) +{ + eattr *ea; + net *net = e->net; + rta *a = e->attrs; + ea_list *eattrs = a->eattrs; + int bufsize = 128 + KRT_METRICS_MAX*8 + nh_bufsize(&(a->nh)); + u32 priority = 0; + + struct { + struct nlmsghdr h; + struct rtmsg r; + char buf[0]; + } *r; + + int rsize = sizeof(*r) + bufsize; + r = alloca(rsize); + + DBG("nl_send_route(%N,op=%x)\n", net->n.addr, op); + + bzero(&r->h, sizeof(r->h)); + bzero(&r->r, sizeof(r->r)); + r->h.nlmsg_type = op ? RTM_NEWROUTE : RTM_DELROUTE; + r->h.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); + r->h.nlmsg_flags = op | NLM_F_REQUEST | NLM_F_ACK; + + r->r.rtm_family = p->af; + r->r.rtm_dst_len = net_pxlen(net->n.addr); + r->r.rtm_protocol = RTPROT_BIRD; + r->r.rtm_scope = RT_SCOPE_NOWHERE; +#ifdef HAVE_MPLS_KERNEL + if (p->af == AF_MPLS) + { + /* + * Kernel MPLS code is a bit picky. We must: + * 1) Always set RT_SCOPE_UNIVERSE and RTN_UNICAST (even for RTM_DELROUTE) + * 2) Never use RTA_PRIORITY + */ + + u32 label = net_mpls(net->n.addr); + nl_add_attr_mpls(&r->h, rsize, RTA_DST, 1, &label); + r->r.rtm_scope = RT_SCOPE_UNIVERSE; + r->r.rtm_type = RTN_UNICAST; + } + else +#endif + { + nl_add_attr_ipa(&r->h, rsize, RTA_DST, net_prefix(net->n.addr)); + + /* Add source address for IPv6 SADR routes */ + if (net->n.addr->type == NET_IP6_SADR) + { + net_addr_ip6_sadr *a = (void *) &net->n.addr; + nl_add_attr_ip6(&r->h, rsize, RTA_SRC, a->src_prefix); + r->r.rtm_src_len = a->src_pxlen; + } + } + + /* + * Strange behavior for RTM_DELROUTE: + * 1) rtm_family is ignored in IPv6, works for IPv4 + * 2) not setting RTA_PRIORITY is different from setting default value (on IPv6) + * 3) not setting RTA_PRIORITY is equivalent to setting 0, which is wildcard + */ + + if (krt_table_id(p) < 256) + r->r.rtm_table = krt_table_id(p); + else + nl_add_attr_u32(&r->h, rsize, RTA_TABLE, krt_table_id(p)); + + if (p->af == AF_MPLS) + priority = 0; + else if (a->source == RTS_DUMMY) + priority = e->u.krt.metric; + else if (KRT_CF->sys.metric) + priority = KRT_CF->sys.metric; + else if ((op != NL_OP_DELETE) && (ea = ea_find(eattrs, EA_KRT_METRIC))) + priority = ea->u.data; + + if (priority) + nl_add_attr_u32(&r->h, rsize, RTA_PRIORITY, priority); + + /* For route delete, we do not specify remaining route attributes */ + if (op == NL_OP_DELETE) + goto dest; + + /* Default scope is LINK for device routes, UNIVERSE otherwise */ + if (p->af == AF_MPLS) + r->r.rtm_scope = RT_SCOPE_UNIVERSE; + else if (ea = ea_find(eattrs, EA_KRT_SCOPE)) + r->r.rtm_scope = ea->u.data; + else + r->r.rtm_scope = (dest == RTD_UNICAST && ipa_zero(nh->gw)) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE; + + if (ea = ea_find(eattrs, EA_KRT_PREFSRC)) + nl_add_attr_ipa(&r->h, rsize, RTA_PREFSRC, *(ip_addr *)ea->u.ptr->data); + + if (ea = ea_find(eattrs, EA_KRT_REALM)) + nl_add_attr_u32(&r->h, rsize, RTA_FLOW, ea->u.data); + + + u32 metrics[KRT_METRICS_MAX]; + metrics[0] = 0; + + struct ea_walk_state ews = { .eattrs = eattrs }; + while (ea = ea_walk(&ews, EA_KRT_METRICS, KRT_METRICS_MAX)) + { + int id = ea->id - EA_KRT_METRICS; + metrics[0] |= 1 << id; + metrics[id] = ea->u.data; + } + + if (metrics[0]) + nl_add_metrics(&r->h, rsize, metrics, KRT_METRICS_MAX); + + +dest: + switch (dest) + { + case RTD_UNICAST: + r->r.rtm_type = RTN_UNICAST; + if (nh->next && !krt_ecmp6(p)) + nl_add_multipath(&r->h, rsize, nh, p->af, eattrs); + else + { + nl_add_attr_u32(&r->h, rsize, RTA_OIF, nh->iface->index); + nl_add_nexthop(&r->h, rsize, nh, p->af); + + if (nh->flags & RNF_ONLINK) + r->r.rtm_flags |= RTNH_F_ONLINK; + } + break; + case RTD_BLACKHOLE: + r->r.rtm_type = RTN_BLACKHOLE; + break; + case RTD_UNREACHABLE: + r->r.rtm_type = RTN_UNREACHABLE; + break; + case RTD_PROHIBIT: + r->r.rtm_type = RTN_PROHIBIT; + break; + case RTD_NONE: + break; + default: + bug("krt_capable inconsistent with nl_send_route"); + } + + /* Ignore missing for DELETE */ + return nl_exchange(&r->h, (op == NL_OP_DELETE)); +} + +static inline int +nl_add_rte(struct krt_proto *p, rte *e) +{ + rta *a = e->attrs; + int err = 0; + + if (krt_ecmp6(p) && a->nh.next) + { + struct nexthop *nh = &(a->nh); + + err = nl_send_route(p, e, NL_OP_ADD, RTD_UNICAST, nh); + if (err < 0) + return err; + + for (nh = nh->next; nh; nh = nh->next) + err += nl_send_route(p, e, NL_OP_APPEND, RTD_UNICAST, nh); + + return err; + } + + return nl_send_route(p, e, NL_OP_ADD, a->dest, &(a->nh)); +} + +static inline int +nl_delete_rte(struct krt_proto *p, rte *e) +{ + int err = 0; + + /* For IPv6, we just repeatedly request DELETE until we get error */ + do + err = nl_send_route(p, e, NL_OP_DELETE, RTD_NONE, NULL); + while (krt_ecmp6(p) && !err); + + return err; +} + +static inline int +nl_replace_rte(struct krt_proto *p, rte *e) +{ + rta *a = e->attrs; + return nl_send_route(p, e, NL_OP_REPLACE, a->dest, &(a->nh)); +} + + +void +krt_replace_rte(struct krt_proto *p, net *n UNUSED, rte *new, rte *old) +{ + int err = 0; + + /* + * We use NL_OP_REPLACE for IPv4, it has an issue with not checking for + * matching rtm_protocol, but that is OK when dedicated priority is used. + * + * We do not use NL_OP_REPLACE for IPv6, as it has broken semantics for ECMP + * and with some kernel versions ECMP replace crashes kernel. Would need more + * testing and checks for kernel versions. + * + * For IPv6, we use NL_OP_DELETE and then NL_OP_ADD. We also do not trust the + * old route value, so we do not try to optimize IPv6 ECMP reconfigurations. + */ + + if (krt_ipv4(p) && old && new) + { + err = nl_replace_rte(p, new); + } + else + { + if (old) + nl_delete_rte(p, old); + + if (new) + err = nl_add_rte(p, new); + } + + if (new) + { + if (err < 0) + bmap_clear(&p->sync_map, new->id); + else + bmap_set(&p->sync_map, new->id); + } +} + +static int +nl_mergable_route(struct nl_parse_state *s, net *net, struct krt_proto *p, uint priority, uint krt_type, uint rtm_family) +{ + /* Route merging is used for IPv6 scans */ + if (!s->scan || (rtm_family != AF_INET6)) + return 0; + + /* Saved and new route must have same network, proto/table, and priority */ + if ((s->net != net) || (s->proto != p) || (s->krt_metric != priority)) + return 0; + + /* Both must be regular unicast routes */ + if ((s->krt_type != RTN_UNICAST) || (krt_type != RTN_UNICAST)) + return 0; + + return 1; +} + +static void +nl_announce_route(struct nl_parse_state *s) +{ + rte *e = rte_get_temp(s->attrs); + e->net = s->net; + e->u.krt.src = s->krt_src; + e->u.krt.proto = s->krt_proto; + e->u.krt.seen = 0; + e->u.krt.best = 0; + e->u.krt.metric = s->krt_metric; + + if (s->scan) + krt_got_route(s->proto, e); + else + krt_got_route_async(s->proto, e, s->new); + + s->net = NULL; + s->attrs = NULL; + s->proto = NULL; + lp_flush(s->pool); +} + +static inline void +nl_parse_begin(struct nl_parse_state *s, int scan) +{ + memset(s, 0, sizeof (struct nl_parse_state)); + s->pool = nl_linpool; + s->scan = scan; +} + +static inline void +nl_parse_end(struct nl_parse_state *s) +{ + if (s->net) + nl_announce_route(s); +} + + +#define SKIP0(ARG, ...) do { DBG("KRT: Ignoring route - " ARG, ##__VA_ARGS__); return; } while(0) +#define SKIP(ARG, ...) do { DBG("KRT: Ignoring route %N - " ARG, &dst, ##__VA_ARGS__); return; } while(0) + +static void +nl_parse_route(struct nl_parse_state *s, struct nlmsghdr *h) +{ + struct krt_proto *p; + struct rtmsg *i; + struct rtattr *a[BIRD_RTA_MAX]; + int new = h->nlmsg_type == RTM_NEWROUTE; + + net_addr dst, src = {}; + u32 oif = ~0; + u32 table_id; + u32 priority = 0; + u32 def_scope = RT_SCOPE_UNIVERSE; + int krt_src; + + if (!(i = nl_checkin(h, sizeof(*i)))) + return; + + switch (i->rtm_family) + { + case AF_INET: + if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want4, a, sizeof(a))) + return; + + if (a[RTA_DST]) + net_fill_ip4(&dst, rta_get_ip4(a[RTA_DST]), i->rtm_dst_len); + else + net_fill_ip4(&dst, IP4_NONE, 0); + break; + + case AF_INET6: + if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want6, a, sizeof(a))) + return; + + if (a[RTA_DST]) + net_fill_ip6(&dst, rta_get_ip6(a[RTA_DST]), i->rtm_dst_len); + else + net_fill_ip6(&dst, IP6_NONE, 0); + + if (a[RTA_SRC]) + net_fill_ip6(&src, rta_get_ip6(a[RTA_SRC]), i->rtm_src_len); + else + net_fill_ip6(&src, IP6_NONE, 0); + break; + +#ifdef HAVE_MPLS_KERNEL + case AF_MPLS: + if (!nl_parse_attrs(RTM_RTA(i), rtm_attr_want_mpls, a, sizeof(a))) + return; + + if (!a[RTA_DST]) + SKIP0("MPLS route without RTA_DST\n"); + + if (rta_get_mpls(a[RTA_DST], rta_mpls_stack) != 1) + SKIP0("MPLS route with multi-label RTA_DST\n"); + + net_fill_mpls(&dst, rta_mpls_stack[0]); + break; +#endif + + default: + return; + } + + if (a[RTA_OIF]) + oif = rta_get_u32(a[RTA_OIF]); + + if (a[RTA_TABLE]) + table_id = rta_get_u32(a[RTA_TABLE]); + else + table_id = i->rtm_table; + + if (i->rtm_flags & RTM_F_CLONED) + SKIP("cloned\n"); + + /* Do we know this table? */ + p = HASH_FIND(nl_table_map, RTH, i->rtm_family, table_id); + if (!p) + SKIP("unknown table %u\n", table_id); + + if (a[RTA_SRC] && (p->p.net_type != NET_IP6_SADR)) + SKIP("src prefix for non-SADR channel\n"); + + if (a[RTA_IIF]) + SKIP("IIF set\n"); + + if (i->rtm_tos != 0) /* We don't support TOS */ + SKIP("TOS %02x\n", i->rtm_tos); + + if (s->scan && !new) + SKIP("RTM_DELROUTE in scan\n"); + + if (a[RTA_PRIORITY]) + priority = rta_get_u32(a[RTA_PRIORITY]); + + int c = net_classify(&dst); + if ((c < 0) || !(c & IADDR_HOST) || ((c & IADDR_SCOPE_MASK) <= SCOPE_LINK)) + SKIP("strange class/scope\n"); + + switch (i->rtm_protocol) + { + case RTPROT_UNSPEC: + SKIP("proto unspec\n"); + + case RTPROT_REDIRECT: + krt_src = KRT_SRC_REDIRECT; + break; + + case RTPROT_KERNEL: + krt_src = KRT_SRC_KERNEL; + return; + + case RTPROT_BIRD: + if (!s->scan) + SKIP("echo\n"); + krt_src = KRT_SRC_BIRD; + break; + + case RTPROT_BOOT: + default: + krt_src = KRT_SRC_ALIEN; + } + + net_addr *n = &dst; + if (p->p.net_type == NET_IP6_SADR) + { + n = alloca(sizeof(net_addr_ip6_sadr)); + net_fill_ip6_sadr(n, net6_prefix(&dst), net6_pxlen(&dst), + net6_prefix(&src), net6_pxlen(&src)); + } + + net *net = net_get(p->p.main_channel->table, n); + + if (s->net && !nl_mergable_route(s, net, p, priority, i->rtm_type, i->rtm_family)) + nl_announce_route(s); + + rta *ra = lp_allocz(s->pool, RTA_MAX_SIZE); + ra->src = p->p.main_source; + ra->source = RTS_INHERIT; + ra->scope = SCOPE_UNIVERSE; + + if (a[RTA_FLOW]) + s->rta_flow = rta_get_u32(a[RTA_FLOW]); + else + s->rta_flow = 0; + + switch (i->rtm_type) + { + case RTN_UNICAST: + ra->dest = RTD_UNICAST; + + if (a[RTA_MULTIPATH]) + { + struct nexthop *nh = nl_parse_multipath(s, p, n, a[RTA_MULTIPATH], i->rtm_family, krt_src); + if (!nh) + SKIP("strange RTA_MULTIPATH\n"); + + nexthop_link(ra, nh); + break; + } + + if ((i->rtm_flags & RTNH_F_DEAD) && (krt_src != KRT_SRC_BIRD)) + SKIP("ignore RTNH_F_DEAD\n"); + + ra->nh.iface = if_find_by_index(oif); + if (!ra->nh.iface) + { + log(L_ERR "KRT: Received route %N with unknown ifindex %u", net->n.addr, oif); + return; + } + + if (a[RTA_GATEWAY]) + ra->nh.gw = rta_get_ipa(a[RTA_GATEWAY]); + +#ifdef HAVE_MPLS_KERNEL + if (a[RTA_VIA]) + ra->nh.gw = rta_get_via(a[RTA_VIA]); +#endif + + if (ipa_nonzero(ra->nh.gw)) + { + /* Silently skip strange 6to4 routes */ + const net_addr_ip6 sit = NET_ADDR_IP6(IP6_NONE, 96); + if ((i->rtm_family == AF_INET6) && ipa_in_netX(ra->nh.gw, (net_addr *) &sit)) + return; + + if (i->rtm_flags & RTNH_F_ONLINK) + ra->nh.flags |= RNF_ONLINK; + + neighbor *nbr; + nbr = neigh_find(&p->p, ra->nh.gw, ra->nh.iface, + (ra->nh.flags & RNF_ONLINK) ? NEF_ONLINK : 0); + if (!nbr || (nbr->scope == SCOPE_HOST)) + { + log(L_ERR "KRT: Received route %N with strange next-hop %I", net->n.addr, + ra->nh.gw); + return; + } + } + + break; + case RTN_BLACKHOLE: + ra->dest = RTD_BLACKHOLE; + break; + case RTN_UNREACHABLE: + ra->dest = RTD_UNREACHABLE; + break; + case RTN_PROHIBIT: + ra->dest = RTD_PROHIBIT; + break; + /* FIXME: What about RTN_THROW? */ + default: + SKIP("type %d\n", i->rtm_type); + return; + } + +#ifdef HAVE_MPLS_KERNEL + if ((i->rtm_family == AF_MPLS) && a[RTA_NEWDST] && !ra->nh.next) + ra->nh.labels = rta_get_mpls(a[RTA_NEWDST], ra->nh.label); + + if (a[RTA_ENCAP] && a[RTA_ENCAP_TYPE] && !ra->nh.next) + { + switch (rta_get_u16(a[RTA_ENCAP_TYPE])) + { + case LWTUNNEL_ENCAP_MPLS: + { + struct rtattr *enca[BIRD_RTA_MAX]; + nl_attr_len = RTA_PAYLOAD(a[RTA_ENCAP]); + nl_parse_attrs(RTA_DATA(a[RTA_ENCAP]), encap_mpls_want, enca, sizeof(enca)); + ra->nh.labels = rta_get_mpls(enca[RTA_DST], ra->nh.label); + break; + } + default: + SKIP("unknown encapsulation method %d\n", rta_get_u16(a[RTA_ENCAP_TYPE])); + break; + } + } +#endif + + if (i->rtm_scope != def_scope) + { + ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr)); + ea->next = ra->eattrs; + ra->eattrs = ea; + ea->flags = EALF_SORTED; + ea->count = 1; + ea->attrs[0].id = EA_KRT_SCOPE; + ea->attrs[0].flags = 0; + ea->attrs[0].type = EAF_TYPE_INT; + ea->attrs[0].u.data = i->rtm_scope; + } + + if (a[RTA_PREFSRC]) + { + ip_addr ps = rta_get_ipa(a[RTA_PREFSRC]); + + ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr)); + ea->next = ra->eattrs; + ra->eattrs = ea; + ea->flags = EALF_SORTED; + ea->count = 1; + ea->attrs[0].id = EA_KRT_PREFSRC; + ea->attrs[0].flags = 0; + ea->attrs[0].type = EAF_TYPE_IP_ADDRESS; + + struct adata *ad = lp_alloc(s->pool, sizeof(struct adata) + sizeof(ps)); + ad->length = sizeof(ps); + memcpy(ad->data, &ps, sizeof(ps)); + + ea->attrs[0].u.ptr = ad; + } + + /* Can be set per-route or per-nexthop */ + if (s->rta_flow) + { + ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + sizeof(eattr)); + ea->next = ra->eattrs; + ra->eattrs = ea; + ea->flags = EALF_SORTED; + ea->count = 1; + ea->attrs[0].id = EA_KRT_REALM; + ea->attrs[0].flags = 0; + ea->attrs[0].type = EAF_TYPE_INT; + ea->attrs[0].u.data = s->rta_flow; + } + + if (a[RTA_METRICS]) + { + u32 metrics[KRT_METRICS_MAX]; + ea_list *ea = lp_alloc(s->pool, sizeof(ea_list) + KRT_METRICS_MAX * sizeof(eattr)); + int t, n = 0; + + if (nl_parse_metrics(a[RTA_METRICS], metrics, ARRAY_SIZE(metrics)) < 0) + { + log(L_ERR "KRT: Received route %N with strange RTA_METRICS attribute", net->n.addr); + return; + } + + for (t = 1; t < KRT_METRICS_MAX; t++) + if (metrics[0] & (1 << t)) + { + ea->attrs[n].id = EA_CODE(PROTOCOL_KERNEL, KRT_METRICS_OFFSET + t); + ea->attrs[n].flags = 0; + ea->attrs[n].type = EAF_TYPE_INT; /* FIXME: Some are EAF_TYPE_BITFIELD */ + ea->attrs[n].u.data = metrics[t]; + n++; + } + + if (n > 0) + { + ea->next = ra->eattrs; + ea->flags = EALF_SORTED; + ea->count = n; + ra->eattrs = ea; + } + } + + /* + * Ideally, now we would send the received route to the rest of kernel code. + * But IPv6 ECMP routes before 4.11 are sent as a sequence of routes, so we + * postpone it and merge next hops until the end of the sequence. Note that + * when doing merging of next hops, we expect the new route to be unipath. + * Otherwise, we ignore additional next hops in nexthop_insert(). + */ + + if (!s->net) + { + /* Store the new route */ + s->net = net; + s->attrs = ra; + s->proto = p; + s->new = new; + s->krt_src = krt_src; + s->krt_type = i->rtm_type; + s->krt_proto = i->rtm_protocol; + s->krt_metric = priority; + } + else + { + /* Merge next hops with the stored route */ + rta *oa = s->attrs; + + struct nexthop *nhs = &oa->nh; + nexthop_insert(&nhs, &ra->nh); + + /* Perhaps new nexthop is inserted at the first position */ + if (nhs == &ra->nh) + { + /* Swap rtas */ + s->attrs = ra; + + /* Keep old eattrs */ + ra->eattrs = oa->eattrs; + } + } +} + +void +krt_do_scan(struct krt_proto *p UNUSED) /* CONFIG_ALL_TABLES_AT_ONCE => p is NULL */ +{ + struct nlmsghdr *h; + struct nl_parse_state s; + + nl_parse_begin(&s, 1); + nl_request_dump(AF_UNSPEC, RTM_GETROUTE); + while (h = nl_get_scan()) + if (h->nlmsg_type == RTM_NEWROUTE || h->nlmsg_type == RTM_DELROUTE) + nl_parse_route(&s, h); + else + log(L_DEBUG "nl_scan_fire: Unknown packet received (type=%d)", h->nlmsg_type); + nl_parse_end(&s); +} + +/* + * Asynchronous Netlink interface + */ + +static sock *nl_async_sk; /* BIRD socket for asynchronous notifications */ +static byte *nl_async_rx_buffer; /* Receive buffer */ + +static void +nl_async_msg(struct nlmsghdr *h) +{ + struct nl_parse_state s; + + switch (h->nlmsg_type) + { + case RTM_NEWROUTE: + case RTM_DELROUTE: + DBG("KRT: Received async route notification (%d)\n", h->nlmsg_type); + nl_parse_begin(&s, 0); + nl_parse_route(&s, h); + nl_parse_end(&s); + break; + case RTM_NEWLINK: + case RTM_DELLINK: + DBG("KRT: Received async link notification (%d)\n", h->nlmsg_type); + if (kif_proto) + nl_parse_link(h, 0); + break; + case RTM_NEWADDR: + case RTM_DELADDR: + DBG("KRT: Received async address notification (%d)\n", h->nlmsg_type); + if (kif_proto) + nl_parse_addr(h, 0); + break; + default: + DBG("KRT: Received unknown async notification (%d)\n", h->nlmsg_type); + } +} + +static int +nl_async_hook(sock *sk, uint size UNUSED) +{ + struct iovec iov = { nl_async_rx_buffer, NL_RX_SIZE }; + struct sockaddr_nl sa; + struct msghdr m = { + .msg_name = &sa, + .msg_namelen = sizeof(sa), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + struct nlmsghdr *h; + int x; + uint len; + + x = recvmsg(sk->fd, &m, 0); + if (x < 0) + { + if (errno == ENOBUFS) + { + /* + * Netlink reports some packets have been thrown away. + * One day we might react to it by asking for route table + * scan in near future. + */ + log(L_WARN "Kernel dropped some netlink messages, will resync on next scan."); + return 1; /* More data are likely to be ready */ + } + else if (errno != EWOULDBLOCK) + log(L_ERR "Netlink recvmsg: %m"); + return 0; + } + if (sa.nl_pid) /* It isn't from the kernel */ + { + DBG("Non-kernel packet\n"); + return 1; + } + h = (void *) nl_async_rx_buffer; + len = x; + if (m.msg_flags & MSG_TRUNC) + { + log(L_WARN "Netlink got truncated asynchronous message"); + return 1; + } + while (NLMSG_OK(h, len)) + { + nl_async_msg(h); + h = NLMSG_NEXT(h, len); + } + if (len) + log(L_WARN "nl_async_hook: Found packet remnant of size %d", len); + return 1; +} + +static void +nl_async_err_hook(sock *sk, int e UNUSED) +{ + nl_async_hook(sk, 0); +} + +static void +nl_open_async(void) +{ + sock *sk; + struct sockaddr_nl sa; + int fd; + + if (nl_async_sk) + return; + + DBG("KRT: Opening async netlink socket\n"); + + fd = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (fd < 0) + { + log(L_ERR "Unable to open asynchronous rtnetlink socket: %m"); + return; + } + + bzero(&sa, sizeof(sa)); + sa.nl_family = AF_NETLINK; + sa.nl_groups = RTMGRP_LINK | + RTMGRP_IPV4_IFADDR | RTMGRP_IPV4_ROUTE | + RTMGRP_IPV6_IFADDR | RTMGRP_IPV6_ROUTE; + + if (bind(fd, (struct sockaddr *) &sa, sizeof(sa)) < 0) + { + log(L_ERR "Unable to bind asynchronous rtnetlink socket: %m"); + close(fd); + return; + } + + nl_async_rx_buffer = xmalloc(NL_RX_SIZE); + + sk = nl_async_sk = sk_new(krt_pool); + sk->type = SK_MAGIC; + sk->rx_hook = nl_async_hook; + sk->err_hook = nl_async_err_hook; + sk->fd = fd; + if (sk_open(sk) < 0) + bug("Netlink: sk_open failed"); +} + + +/* + * Interface to the UNIX krt module + */ + +void +krt_sys_io_init(void) +{ + nl_linpool = lp_new_default(krt_pool); + HASH_INIT(nl_table_map, krt_pool, 6); +} + +int +krt_sys_start(struct krt_proto *p) +{ + struct krt_proto *old = HASH_FIND(nl_table_map, RTH, p->af, krt_table_id(p)); + + if (old) + { + log(L_ERR "%s: Kernel table %u already registered by %s", + p->p.name, krt_table_id(p), old->p.name); + return 0; + } + + HASH_INSERT2(nl_table_map, RTH, krt_pool, p); + + nl_open(); + nl_open_async(); + + return 1; +} + +void +krt_sys_shutdown(struct krt_proto *p) +{ + HASH_REMOVE2(nl_table_map, RTH, krt_pool, p); +} + +int +krt_sys_reconfigure(struct krt_proto *p UNUSED, struct krt_config *n, struct krt_config *o) +{ + return (n->sys.table_id == o->sys.table_id) && (n->sys.metric == o->sys.metric); +} + +void +krt_sys_init_config(struct krt_config *cf) +{ + cf->sys.table_id = RT_TABLE_MAIN; + cf->sys.metric = 32; +} + +void +krt_sys_copy_config(struct krt_config *d, struct krt_config *s) +{ + d->sys.table_id = s->sys.table_id; + d->sys.metric = s->sys.metric; +} + +static const char *krt_metrics_names[KRT_METRICS_MAX] = { + NULL, "lock", "mtu", "window", "rtt", "rttvar", "sstresh", "cwnd", "advmss", + "reordering", "hoplimit", "initcwnd", "features", "rto_min", "initrwnd", "quickack" +}; + +static const char *krt_features_names[KRT_FEATURES_MAX] = { + "ecn", NULL, NULL, "allfrag" +}; + +int +krt_sys_get_attr(const eattr *a, byte *buf, int buflen UNUSED) +{ + switch (a->id) + { + case EA_KRT_PREFSRC: + bsprintf(buf, "prefsrc"); + return GA_NAME; + + case EA_KRT_REALM: + bsprintf(buf, "realm"); + return GA_NAME; + + case EA_KRT_SCOPE: + bsprintf(buf, "scope"); + return GA_NAME; + + case EA_KRT_LOCK: + buf += bsprintf(buf, "lock:"); + ea_format_bitfield(a, buf, buflen, krt_metrics_names, 2, KRT_METRICS_MAX); + return GA_FULL; + + case EA_KRT_FEATURES: + buf += bsprintf(buf, "features:"); + ea_format_bitfield(a, buf, buflen, krt_features_names, 0, KRT_FEATURES_MAX); + return GA_FULL; + + default:; + int id = (int)EA_ID(a->id) - KRT_METRICS_OFFSET; + if (id > 0 && id < KRT_METRICS_MAX) + { + bsprintf(buf, "%s", krt_metrics_names[id]); + return GA_NAME; + } + + return GA_UNKNOWN; + } +} + + + +void +kif_sys_start(struct kif_proto *p UNUSED) +{ + nl_open(); + nl_open_async(); +} + +void +kif_sys_shutdown(struct kif_proto *p UNUSED) +{ +} + +int +kif_update_sysdep_addr(struct iface *i UNUSED) +{ + return 0; +} diff --git a/sysdep/linux/sysio.h b/sysdep/linux/sysio.h index e21ff487..f13eda7c 100644 --- a/sysdep/linux/sysio.h +++ b/sysdep/linux/sysio.h @@ -10,6 +10,10 @@ #define IPV6_MINHOPCOUNT 73 #endif +#ifndef IPV6_FREEBIND +#define IPV6_FREEBIND 78 +#endif + #ifndef TCP_MD5SIG_EXT #define TCP_MD5SIG_EXT 32 #endif @@ -266,3 +270,18 @@ sk_set_priority(sock *s, int prio) return 0; } +static inline int +sk_set_freebind(sock *s) +{ + int y = 1; + + if (sk_is_ipv4(s)) + if (setsockopt(s->fd, SOL_IP, IP_FREEBIND, &y, sizeof(y)) < 0) + ERR("IP_FREEBIND"); + + if (sk_is_ipv6(s)) + if (setsockopt(s->fd, SOL_IPV6, IPV6_FREEBIND, &y, sizeof(y)) < 0) + ERR("IPV6_FREEBIND"); + + return 0; +} diff --git a/sysdep/unix/alloc.c b/sysdep/unix/alloc.c index 4c9d5eb5..755b5fa5 100644 --- a/sysdep/unix/alloc.c +++ b/sysdep/unix/alloc.c @@ -8,7 +8,10 @@ #include "nest/bird.h" #include "lib/resource.h" +#include "lib/lists.h" +#include "lib/event.h" +#include <errno.h> #include <stdlib.h> #include <unistd.h> @@ -17,82 +20,168 @@ #endif long page_size = 0; -_Bool alloc_multipage = 0; #ifdef HAVE_MMAP +#define KEEP_PAGES_MAIN_MAX 256 +#define KEEP_PAGES_MAIN_MIN 8 +#define CLEANUP_PAGES_BULK 256 + +_Static_assert(KEEP_PAGES_MAIN_MIN * 4 < KEEP_PAGES_MAIN_MAX); + static _Bool use_fake = 0; + +#if DEBUGGING +struct free_page { + node unused[42]; + node n; +}; #else -static _Bool use_fake = 1; +struct free_page { + node n; +}; +#endif + +struct free_pages { + list pages; + u16 min, max; /* Minimal and maximal number of free pages kept */ + uint cnt; /* Number of empty pages */ + event cleanup; +}; + +static void global_free_pages_cleanup_event(void *); + +static struct free_pages global_free_pages = { + .min = KEEP_PAGES_MAIN_MIN, + .max = KEEP_PAGES_MAIN_MAX, + .cleanup = { .hook = global_free_pages_cleanup_event }, +}; + +uint *pages_kept = &global_free_pages.cnt; + +static void * +alloc_sys_page(void) +{ + void *ptr = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (ptr == MAP_FAILED) + bug("mmap(%lu) failed: %m", page_size); + + return ptr; +} + +extern int shutting_down; /* Shutdown requested. */ + +#else // ! HAVE_MMAP +#define use_fake 1 #endif -void resource_sys_init(void) +void * +alloc_page(void) { + if (use_fake) + { + void *ptr = NULL; + int err = posix_memalign(&ptr, page_size, page_size); + + if (err || !ptr) + bug("posix_memalign(%lu) failed", (long unsigned int) page_size); + + return ptr; + } + #ifdef HAVE_MMAP - if (!(page_size = sysconf(_SC_PAGESIZE))) - die("System page size must be non-zero"); + struct free_pages *fps = &global_free_pages; - if ((u64_popcount(page_size) > 1) || (page_size > 16384)) + if (fps->cnt) { -#endif - /* Too big or strange page, use the aligned allocator instead */ - page_size = 4096; - use_fake = 1; + struct free_page *fp = SKIP_BACK(struct free_page, n, HEAD(fps->pages)); + rem_node(&fp->n); + if ((--fps->cnt < fps->min) && !shutting_down) + ev_schedule(&fps->cleanup); + + bzero(fp, page_size); + return fp; } + + return alloc_sys_page(); +#endif } -void * -alloc_sys_page(void) +void +free_page(void *ptr) { -#ifdef HAVE_MMAP - if (!use_fake) + if (use_fake) { - if (alloc_multipage) - { - void *big = mmap(NULL, page_size * 2, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (big == MAP_FAILED) - bug("mmap(%lu) failed: %m", page_size); - - uintptr_t offset = ((uintptr_t) big) % page_size; - if (offset) - { - void *ret = big + page_size - offset; - munmap(big, page_size - offset); - munmap(ret + page_size, offset); - return ret; - } - else - { - munmap(big + page_size, page_size); - return big; - } - } - - void *ret = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - if (ret == MAP_FAILED) - bug("mmap(%lu) failed: %m", page_size); - - return ret; + free(ptr); + return; } - else + +#ifdef HAVE_MMAP + struct free_pages *fps = &global_free_pages; + struct free_page *fp = ptr; + + fp->n = (node) {}; + add_tail(&fps->pages, &fp->n); + + if ((++fps->cnt > fps->max) && !shutting_down) + ev_schedule(&fps->cleanup); #endif +} + +#ifdef HAVE_MMAP +static void +global_free_pages_cleanup_event(void *data UNUSED) +{ + if (shutting_down) + return; + + struct free_pages *fps = &global_free_pages; + + while (fps->cnt / 2 < fps->min) { - void *ret = aligned_alloc(page_size, page_size); - if (!ret) - bug("aligned_alloc(%lu) failed", page_size); - return ret; + struct free_page *fp = alloc_sys_page(); + fp->n = (node) {}; + add_tail(&fps->pages, &fp->n); + fps->cnt++; + } + + for (uint seen = 0; (seen < CLEANUP_PAGES_BULK) && (fps->cnt > fps->max / 2); seen++) + { + struct free_page *fp = SKIP_BACK(struct free_page, n, TAIL(fps->pages)); + rem_node(&fp->n); + + if (munmap(fp, page_size) == 0) + fps->cnt--; + else if (errno == ENOMEM) + add_head(&fps->pages, &fp->n); + else + bug("munmap(%p) failed: %m", fp); } } +#endif void -free_sys_page(void *ptr) +resource_sys_init(void) { #ifdef HAVE_MMAP - if (!use_fake) + ASSERT_DIE(global_free_pages.cnt == 0); + + if (!(page_size = sysconf(_SC_PAGESIZE))) + die("System page size must be non-zero"); + + if (u64_popcount(page_size) == 1) { - if (munmap(ptr, page_size) < 0) - bug("munmap(%p) failed: %m", ptr); + struct free_pages *fps = &global_free_pages; + + init_list(&fps->pages); + global_free_pages_cleanup_event(NULL); + return; } - else + + /* Too big or strange page, use the aligned allocator instead */ + log(L_WARN "Got strange memory page size (%lu), using the aligned allocator instead", page_size); + use_fake = 1; #endif - free(ptr); + + page_size = 4096; } diff --git a/sysdep/unix/io.c b/sysdep/unix/io.c index 3d67d0a7..8a116789 100644 --- a/sysdep/unix/io.c +++ b/sysdep/unix/io.c @@ -1436,6 +1436,10 @@ sk_open(sock *s) if (sk_set_high_port(s) < 0) log(L_WARN "Socket error: %s%#m", s->err); + if (s->flags & SKF_FREEBIND) + if (sk_set_freebind(s) < 0) + log(L_WARN "Socket error: %s%#m", s->err); + sockaddr_fill(&sa, s->af, bind_addr, s->iface, bind_port); if (bind(fd, &sa.sa, SA_LEN(sa)) < 0) ERR2("bind"); @@ -1850,8 +1854,8 @@ sk_read_ssh(sock *s) /* sk_read() and sk_write() are called from BFD's event loop */ -int -sk_read(sock *s, int revents) +static inline int +sk_read_noflush(sock *s, int revents) { switch (s->type) { @@ -1914,7 +1918,15 @@ sk_read(sock *s, int revents) } int -sk_write(sock *s) +sk_read(sock *s, int revents) +{ + int e = sk_read_noflush(s, revents); + tmp_flush(); + return e; +} + +static inline int +sk_write_noflush(sock *s) { switch (s->type) { @@ -1962,6 +1974,14 @@ sk_write(sock *s) } } +int +sk_write(sock *s) +{ + int e = sk_write_noflush(s); + tmp_flush(); + return e; +} + int sk_is_ipv4(sock *s) { return s->af == AF_INET; } @@ -1980,6 +2000,7 @@ sk_err(sock *s, int revents) } s->err_hook(s, se); + tmp_flush(); } void diff --git a/sysdep/unix/main.c b/sysdep/unix/main.c index 7e8ea0dc..cdf0a310 100644 --- a/sysdep/unix/main.c +++ b/sysdep/unix/main.c @@ -682,7 +682,7 @@ signal_init(void) * Parsing of command-line arguments */ -static char *opt_list = "B:c:dD:ps:P:u:g:flRh"; +static char *opt_list = "bc:dD:ps:P:u:g:flRh"; int parse_and_exit; char *bird_name; static char *use_user; @@ -703,7 +703,6 @@ display_help(void) fprintf(stderr, "\n" "Options: \n" - " -B <block-size> Use 2^this number as memory allocation block size (default: 12)\n" " -c <config-file> Use given configuration file instead of\n" " " PATH_CONFIG_FILE "\n" " -d Enable debug messages and run bird in foreground\n" @@ -790,15 +789,12 @@ get_gid(const char *s) return gr->gr_gid; } -extern _Bool alloc_multipage; - static void parse_args(int argc, char **argv) { int config_changed = 0; int socket_changed = 0; int c; - int bp; bird_name = get_bird_name(argv[0], "bird"); if (argc == 2) @@ -811,29 +807,6 @@ parse_args(int argc, char **argv) while ((c = getopt(argc, argv, opt_list)) >= 0) switch (c) { - case 'B': - bp = atoi(optarg); - if (bp < 1) - { - fprintf(stderr, "Strange block size power %d\n\n", bp); - display_usage(); - exit(1); - } - - if ((1 << bp) < page_size) - { - fprintf(stderr, "Requested block size %ld is lesser than page size %ld\n\n", (1L<<bp), page_size); - display_usage(); - exit(1); - } - - if ((1L << bp) > page_size) - { - alloc_multipage = 1; - page_size = (1L << bp); - } - - break; case 'c': config_name = optarg; config_changed = 1; @@ -888,8 +861,6 @@ parse_args(int argc, char **argv) } } -void resource_sys_init(void); - /* * Hic Est main() */ @@ -902,7 +873,6 @@ main(int argc, char **argv) dmalloc_debug(0x2f03d00); #endif - resource_sys_init(); parse_args(argc, argv); log_switch(1, NULL, NULL); |