summaryrefslogtreecommitdiff
path: root/filter
diff options
context:
space:
mode:
Diffstat (limited to 'filter')
-rw-r--r--filter/config.Y335
-rw-r--r--filter/data.c133
-rw-r--r--filter/data.h200
-rw-r--r--filter/decl.m435
-rw-r--r--filter/f-inst.c733
-rw-r--r--filter/f-inst.h31
-rw-r--r--filter/f-util.c147
-rw-r--r--filter/filter.c57
-rw-r--r--filter/filter.h19
-rw-r--r--filter/filter_test.c4
-rw-r--r--filter/test.conf457
-rw-r--r--filter/test.conf26
-rw-r--r--filter/tree_test.c5
-rw-r--r--filter/trie.c951
-rw-r--r--filter/trie_test.c851
15 files changed, 3028 insertions, 936 deletions
diff --git a/filter/config.Y b/filter/config.Y
index 8034b790..5ba4f7e6 100644
--- a/filter/config.Y
+++ b/filter/config.Y
@@ -22,6 +22,46 @@ static inline u32 pair_b(u32 p) { return p & 0xFFFF; }
#define f_generate_complex(fi_code, da, arg) \
f_new_inst(FI_EA_SET, f_new_inst(fi_code, f_new_inst(FI_EA_GET, da), arg), da)
+#define f_generate_complex_sym(fi_code, sym, arg) ({ \
+ if (sym->class != SYM_ATTRIBUTE) \
+ cf_error("Can't empty %s: not an attribute", sym->name); \
+ f_generate_complex(fi_code, sym->attribute, arg); \
+})
+
+#define f_generate_complex_default(fi_code, da, arg, def) \
+ f_new_inst(FI_EA_SET, f_new_inst(fi_code, f_new_inst(FI_DEFAULT, f_new_inst(FI_EA_GET, da), f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = def })), arg), da)
+
+
+static int
+f_new_var(struct sym_scope *s)
+{
+ /*
+ * - A variable is an offset on vstack from vbase.
+ * - Vbase is set on filter start / function call.
+ * - Scopes contain anonymous scopes (blocks) inside filter/function scope
+ * - Each scope knows number of vars in that scope
+ * - Offset is therefore a sum of 'slots' up to named scope
+ * - New variables are added on top of vstk, so intermediate values cannot
+ * be there during FI_VAR_INIT. I.e. no 'var' inside 'term'.
+ * - Also, each f_line must always have its scope, otherwise a variable may
+ * be defined but not initialized if relevant f_line is not executed.
+ */
+
+ int offset = s->slots++;
+
+ while (!s->name)
+ {
+ s = s->next;
+ ASSERT(s);
+ offset += s->slots;
+ }
+
+ if (offset >= 0xff)
+ cf_error("Too many variables, at most 255 allowed");
+
+ return offset;
+}
+
/*
* Sets and their items are during parsing handled as lists, linked
* through left ptr. The first item in a list also contains a pointer
@@ -161,28 +201,32 @@ f_new_lc_item(u32 f1, u32 t1, u32 f2, u32 t2, u32 f3, u32 t3)
}
static inline struct f_inst *
-f_generate_empty(struct f_dynamic_attr dyn)
+f_generate_empty(const struct symbol *sym)
{
- struct f_val empty;
+ if (sym->class != SYM_ATTRIBUTE)
+ cf_error("Can't empty %s: not an attribute", sym->name);
- switch (dyn.type & EAF_TYPE_MASK) {
- case EAF_TYPE_AS_PATH:
- empty = f_const_empty_path;
- break;
- case EAF_TYPE_INT_SET:
- empty = f_const_empty_clist;
- break;
- case EAF_TYPE_EC_SET:
- empty = f_const_empty_eclist;
- break;
- case EAF_TYPE_LC_SET:
- empty = f_const_empty_lclist;
- break;
- default:
- cf_error("Can't empty that attribute");
- }
+ const struct ea_class *def = sym->attribute;
+ const struct f_val *empty = f_get_empty(def->type);
+ if (!empty)
+ cf_error("Can't empty attribute %s", def->name);
+
+ return f_new_inst(FI_EA_SET, f_new_inst(FI_CONSTANT, *empty), def);
+}
- return f_new_inst(FI_EA_SET, f_new_inst(FI_CONSTANT, empty), dyn);
+static inline struct f_inst *
+f_implicit_roa_check(struct rtable_config *tab)
+{
+ const struct ea_class *def = ea_class_find("bgp_path");
+ if (!def)
+ cf_error("Fatal: Couldn't find BGP path attribute definition.");
+
+ struct f_static_attr fsa = f_new_static_attr(T_NET, SA_NET, 1);
+
+ return f_new_inst(FI_ROA_CHECK,
+ f_new_inst(FI_RTA_GET, fsa),
+ f_new_inst(FI_AS_PATH_LAST, f_new_inst(FI_EA_GET, def)),
+ tab);
}
/*
@@ -262,7 +306,7 @@ assert_assign(struct f_lval *lval, struct f_inst *expr, const char *start, const
checker = f_new_inst(FI_EQ, expr, getter);
setter->next = checker;
-
+
return assert_done(setter, start, end);
}
@@ -273,15 +317,17 @@ CF_KEYWORDS(FUNCTION, PRINT, PRINTN, UNSET, RETURN,
INT, BOOL, IP, TYPE, PREFIX, RD, PAIR, QUAD, EC, LC,
SET, STRING, BGPMASK, BGPPATH, CLIST, ECLIST, LCLIST,
IF, THEN, ELSE, CASE,
+ FOR, IN, DO,
TRUE, FALSE, RT, RO, UNKNOWN, GENERIC,
- FROM, GW, NET, MASK, PROTO, SOURCE, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS,
- PREFERENCE,
+ FROM, GW, NET, MASK, PROTO, SCOPE, DEST, IFNAME, IFINDEX, WEIGHT, GW_MPLS,
ROA_CHECK, ASN, SRC, DST,
IS_V4, IS_V6,
LEN, MAXLEN,
+ DATA, DATA1, DATA2,
DEFINED,
- ADD, DELETE, CONTAINS, RESET,
- PREPEND, FIRST, LAST, LAST_NONAGGREGATED, MATCH,
+ ADD, DELETE, RESET,
+ PREPEND, FIRST, LAST, LAST_NONAGGREGATED,
+ MIN, MAX,
EMPTY,
FILTER, WHERE, EVAL, ATTRIBUTE,
BT_ASSERT, BT_TEST_SUITE, BT_CHECK_ASSIGN, BT_TEST_SAME, FORMAT, STACKS)
@@ -290,21 +336,23 @@ CF_KEYWORDS(FUNCTION, PRINT, PRINTN, UNSET, RETURN,
%nonassoc ELSE
%type <xp> cmds_int cmd_prep
-%type <x> term block cmd cmds constant constructor print_list var_list function_call symbol_value bgp_path_expr bgp_path bgp_path_tail
-%type <fda> dynamic_attr
+%type <x> term cmd cmd_var cmds cmds_scoped constant constructor print_list var var_init var_list function_call symbol_value bgp_path_expr bgp_path bgp_path_tail
%type <fsa> static_attr
+%type <fab> attr_bit
%type <f> filter where_filter
%type <fl> filter_body function_body
%type <flv> lvalue
-%type <i> type function_args function_vars
+%type <i> type function_vars
+%type <fa> function_argsn function_args
%type <ecs> ec_kind
-%type <fret> break_command
+%type <fret> break_command
%type <i32> cnum
%type <e> pair_item ec_item lc_item set_item switch_item set_items switch_items switch_body
%type <trie> fprefix_set
%type <v> set_atom switch_atom fipa
%type <px> fprefix
%type <t> get_cf_position
+%type <s> for_var
CF_GRAMMAR
@@ -328,17 +376,24 @@ filter_def:
conf: filter_eval ;
filter_eval:
- EVAL term { f_eval_int(f_linearize($2)); }
+ EVAL term { f_eval_int(f_linearize($2, 1)); }
;
conf: custom_attr ;
custom_attr: ATTRIBUTE type symbol ';' {
- cf_define_symbol($3, SYM_ATTRIBUTE, attribute, ca_lookup(new_config->pool, $3->name, $2)->fda);
+ if (($3->class == SYM_ATTRIBUTE) && ($3->scope == new_config->root_scope))
+ cf_error("Duplicate attribute %s definition", $3->name);
+
+ cf_define_symbol($3, SYM_ATTRIBUTE, attribute,
+ ea_register_alloc(new_config->pool, (struct ea_class) {
+ .name = $3->name,
+ .type = $2,
+ })->class);
};
conf: bt_test_suite ;
bt_test_suite:
- BT_TEST_SUITE '(' CF_SYM_KNOWN ',' text ')' {
+ BT_TEST_SUITE '(' symbol_known ',' text ')' {
cf_assert_symbol($3, SYM_FUNCTION);
struct f_bt_test_suite *t = cfg_allocz(sizeof(struct f_bt_test_suite));
t->fn = $3->function;
@@ -351,7 +406,7 @@ bt_test_suite:
conf: bt_test_same ;
bt_test_same:
- BT_TEST_SAME '(' CF_SYM_KNOWN ',' CF_SYM_KNOWN ',' NUM ')' {
+ BT_TEST_SAME '(' symbol_known ',' symbol_known ',' NUM ')' {
cf_assert_symbol($3, SYM_FUNCTION);
cf_assert_symbol($5, SYM_FUNCTION);
struct f_bt_test_suite *t = cfg_allocz(sizeof(struct f_bt_test_suite));
@@ -403,25 +458,28 @@ type:
;
function_argsn:
- /* EMPTY */
+ /* EMPTY */ { $$ = NULL; }
| function_argsn type symbol ';' {
if ($3->scope->slots >= 0xfe) cf_error("Too many declarations, at most 255 allowed");
- cf_define_symbol($3, SYM_VARIABLE | $2, offset, $3->scope->slots++);
+ $$ = cfg_alloc(sizeof(struct f_arg));
+ $$->arg = cf_define_symbol($3, SYM_VARIABLE | $2, offset, sym_->scope->slots++);
+ $$->next = $1;
}
;
function_args:
- '(' ')' { $$ = 0; }
+ '(' ')' { $$ = NULL; }
| '(' function_argsn type symbol ')' {
- cf_define_symbol($4, SYM_VARIABLE | $3, offset, $4->scope->slots++);
- $$ = $4->scope->slots;
+ $$ = cfg_alloc(sizeof(struct f_arg));
+ $$->arg = cf_define_symbol($4, SYM_VARIABLE | $3, offset, sym_->scope->slots++);
+ $$->next = $2;
}
;
function_vars:
/* EMPTY */ { $$ = 0; }
| function_vars type symbol ';' {
- cf_define_symbol($3, SYM_VARIABLE | $2, offset, $3->scope->slots++);
+ cf_define_symbol($3, SYM_VARIABLE | $2, offset, f_new_var(sym_->scope));
$$ = $1 + 1;
}
;
@@ -429,7 +487,7 @@ function_vars:
filter_body: function_body ;
filter:
- CF_SYM_KNOWN {
+ symbol_known {
cf_assert_symbol($1, SYM_FILTER);
$$ = $1->filter;
}
@@ -449,20 +507,35 @@ where_filter:
function_body:
function_vars '{' cmds '}' {
- $$ = f_linearize($3);
+ $$ = f_linearize($3, 0);
$$->vars = $1;
}
;
conf: function_def ;
function_def:
- FUNCTION symbol { DBG( "Beginning of function %s\n", $2->name );
+ FUNCTION symbol {
+ DBG( "Beginning of function %s\n", $2->name );
$2 = cf_define_symbol($2, SYM_FUNCTION, function, NULL);
cf_push_scope($2);
- } function_args function_body {
- DBG("Definition of function %s with %u args and %u local vars.\n", $2->name, $4, $5->vars);
- $5->args = $4;
- $2->function = $5;
+ } function_args {
+ /* Make dummy f_line for storing function prototype */
+ struct f_line *dummy = cfg_allocz(sizeof(struct f_line));
+ $2->function = dummy;
+
+ /* Revert the args */
+ while ($4) {
+ struct f_arg *tmp = $4;
+ $4 = $4->next;
+
+ tmp->next = dummy->arg_list;
+ dummy->arg_list = tmp;
+ dummy->args++;
+ }
+ } function_body {
+ $6->args = $2->function->args;
+ $6->arg_list = $2->function->arg_list;
+ $2->function = $6;
cf_pop_scope();
}
;
@@ -473,7 +546,11 @@ cmds: /* EMPTY */ { $$ = NULL; }
| cmds_int { $$ = $1.begin; }
;
-cmd_prep: cmd {
+cmds_scoped: { cf_push_soft_scope(); } cmds { cf_pop_soft_scope(); $$ = $2; } ;
+
+cmd_var: var | cmd ;
+
+cmd_prep: cmd_var {
$$.begin = $$.end = $1;
if ($1)
while ($$.end->next)
@@ -495,15 +572,6 @@ cmds_int: cmd_prep
}
;
-block:
- cmd {
- $$=$1;
- }
- | '{' cmds '}' {
- $$=$2;
- }
- ;
-
/*
* Complex types, their bison value is struct f_val
*/
@@ -527,10 +595,10 @@ set_atom:
| VPN_RD { $$.type = T_RD; $$.val.ec = $1; }
| ENUM { $$.type = pair_a($1); $$.val.i = pair_b($1); }
| '(' term ')' {
- if (f_eval(f_linearize($2), cfg_mem, &($$)) > F_RETURN) cf_error("Runtime error");
+ if (f_eval(f_linearize($2, 1), &($$)) > F_RETURN) cf_error("Runtime error");
if (!f_valid_set_type($$.type)) cf_error("Set-incompatible type");
}
- | CF_SYM_KNOWN {
+ | symbol_known {
cf_assert_symbol($1, SYM_CONSTANT);
if (!f_valid_set_type(SYM_TYPE($1))) cf_error("%s: set-incompatible type", $1->name);
$$ = *$1->val;
@@ -539,13 +607,13 @@ set_atom:
switch_atom:
NUM { $$.type = T_INT; $$.val.i = $1; }
- | '(' term ')' { $$.type = T_INT; $$.val.i = f_eval_int(f_linearize($2)); }
+ | '(' term ')' { $$.type = T_INT; $$.val.i = f_eval_int(f_linearize($2, 1)); }
| fipa { $$ = $1; }
| ENUM { $$.type = pair_a($1); $$.val.i = pair_b($1); }
;
cnum:
- term { $$ = f_eval_int(f_linearize($1)); }
+ term { $$ = f_eval_int(f_linearize($1, 1)); }
pair_item:
'(' cnum ',' cnum ')' { $$ = f_new_pair_item($2, $2, $4, $4); }
@@ -629,19 +697,19 @@ fprefix_set:
;
switch_body: /* EMPTY */ { $$ = NULL; }
- | switch_body switch_items ':' cmds {
+ | switch_body switch_items ':' cmds_scoped {
/* Fill data fields */
struct f_tree *t;
- struct f_line *line = f_linearize($4);
+ struct f_line *line = f_linearize($4, 0);
for (t = $2; t; t = t->left)
t->data = line;
$$ = f_merge_items($1, $2);
}
- | switch_body ELSECOL cmds {
+ | switch_body ELSECOL cmds_scoped {
struct f_tree *t = f_new_tree();
t->from.type = t->to.type = T_VOID;
t->right = t;
- t->data = f_linearize($3);
+ t->data = f_linearize($3, 0);
$$ = f_merge_items($1, t);
}
;
@@ -658,6 +726,7 @@ bgp_path:
bgp_path_tail:
NUM bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .asn = $1, .kind = PM_ASN, }, }); $$->next = $2; }
| NUM DDOT NUM bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .from = $1, .to = $3, .kind = PM_ASN_RANGE }, }); $$->next = $4; }
+ | '[' ']' bgp_path_tail { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .set = NULL, .kind = PM_ASN_SET }, }); $$->next = $3; }
| '[' set_items ']' bgp_path_tail {
if ($2->from.type != T_INT) cf_error("Only integer sets allowed in path mask");
$$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_PATH_MASK_ITEM, .val.pmi = { .set = build_tree($2), .kind = PM_ASN_SET }, }); $$->next = $4;
@@ -677,6 +746,7 @@ constant:
| fipa { $$ = f_new_inst(FI_CONSTANT, $1); }
| VPN_RD { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_RD, .val.ec = $1, }); }
| net_ { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_NET, .val.net = $1, }); }
+ | '[' ']' { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_SET, .val.t = NULL, }); }
| '[' set_items ']' {
DBG( "We've got a set here..." );
$$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_SET, .val.t = build_tree($2), });
@@ -700,31 +770,26 @@ var_list: /* EMPTY */ { $$ = NULL; }
| var_list ',' term { $$ = $3; $$->next = $1; }
function_call:
- CF_SYM_KNOWN '(' var_list ')' {
+ symbol_known '(' var_list ')'
+ {
if ($1->class != SYM_FUNCTION)
cf_error("You can't call something which is not a function. Really.");
- struct f_inst *fc = f_new_inst(FI_CALL, $1);
- uint args = 0;
+ /* Revert the var_list */
+ struct f_inst *args = NULL;
while ($3) {
- args++;
- struct f_inst *tmp = $3->next;
- $3->next = fc;
+ struct f_inst *tmp = $3;
+ $3 = $3->next;
- fc = $3;
- $3 = tmp;
+ tmp->next = args;
+ args = tmp;
}
- if (args != $1->function->args)
- cf_error("Function call '%s' got %u arguments, need %u arguments.",
- $1->name, args, $1->function->args);
-
- $$ = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_VOID });
- $$->next = fc;
+ $$ = f_new_inst(FI_CALL, args, $1);
}
;
-symbol_value: CF_SYM_KNOWN
+symbol_value: symbol_known
{
switch ($1->class) {
case SYM_CONSTANT_RANGE:
@@ -734,7 +799,7 @@ symbol_value: CF_SYM_KNOWN
$$ = f_new_inst(FI_VAR_GET, $1);
break;
case SYM_ATTRIBUTE:
- $$ = f_new_inst(FI_EA_GET, *$1->attribute);
+ $$ = f_new_inst(FI_EA_GET, $1->attribute);
break;
default:
cf_error("Can't get value of symbol %s", $1->name);
@@ -743,17 +808,13 @@ symbol_value: CF_SYM_KNOWN
;
static_attr:
- FROM { $$ = f_new_static_attr(T_IP, SA_FROM, 0); }
- | GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
+ GW { $$ = f_new_static_attr(T_IP, SA_GW, 0); }
| NET { $$ = f_new_static_attr(T_NET, SA_NET, 1); }
| PROTO { $$ = f_new_static_attr(T_STRING, SA_PROTO, 1); }
- | SOURCE { $$ = f_new_static_attr(T_ENUM_RTS, SA_SOURCE, 1); }
- | SCOPE { $$ = f_new_static_attr(T_ENUM_SCOPE, SA_SCOPE, 0); }
| DEST { $$ = f_new_static_attr(T_ENUM_RTD, SA_DEST, 0); }
| IFNAME { $$ = f_new_static_attr(T_STRING, SA_IFNAME, 0); }
| IFINDEX { $$ = f_new_static_attr(T_INT, SA_IFINDEX, 1); }
| WEIGHT { $$ = f_new_static_attr(T_INT, SA_WEIGHT, 0); }
- | PREFERENCE { $$ = f_new_static_attr(T_INT, SA_PREF, 0); }
| GW_MPLS { $$ = f_new_static_attr(T_INT, SA_GW_MPLS, 0); }
;
@@ -763,6 +824,8 @@ term:
| term '-' term { $$ = f_new_inst(FI_SUBTRACT, $1, $3); }
| term '*' term { $$ = f_new_inst(FI_MULTIPLY, $1, $3); }
| term '/' term { $$ = f_new_inst(FI_DIVIDE, $1, $3); }
+ | term '&' term { $$ = f_new_inst(FI_BITAND, $1, $3); }
+ | term '|' term { $$ = f_new_inst(FI_BITOR, $1, $3); }
| term AND term { $$ = f_new_inst(FI_AND, $1, $3); }
| term OR term { $$ = f_new_inst(FI_OR, $1, $3); }
| term '=' term { $$ = f_new_inst(FI_EQ, $1, $3); }
@@ -781,8 +844,10 @@ term:
| constructor { $$ = $1; }
| static_attr { $$ = f_new_inst(FI_RTA_GET, $1); }
-
- | dynamic_attr { $$ = f_new_inst(FI_EA_GET, $1); }
+ | attr_bit {
+ struct f_inst *c = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << $1.bit)});
+ $$ = f_new_inst(FI_EQ, c, f_new_inst(FI_BITAND, f_new_inst(FI_EA_GET, $1.class), c));
+ }
| term '.' IS_V4 { $$ = f_new_inst(FI_IS_V4, $1); }
| term '.' TYPE { $$ = f_new_inst(FI_TYPE, $1); }
@@ -790,13 +855,18 @@ term:
| term '.' RD { $$ = f_new_inst(FI_ROUTE_DISTINGUISHER, $1); }
| term '.' LEN { $$ = f_new_inst(FI_LENGTH, $1); }
| term '.' MAXLEN { $$ = f_new_inst(FI_ROA_MAXLEN, $1); }
- | term '.' ASN { $$ = f_new_inst(FI_ROA_ASN, $1); }
+ | term '.' ASN { $$ = f_new_inst(FI_ASN, $1); }
| term '.' SRC { $$ = f_new_inst(FI_NET_SRC, $1); }
| term '.' DST { $$ = f_new_inst(FI_NET_DST, $1); }
| term '.' MASK '(' term ')' { $$ = f_new_inst(FI_IP_MASK, $1, $5); }
| term '.' FIRST { $$ = f_new_inst(FI_AS_PATH_FIRST, $1); }
| term '.' LAST { $$ = f_new_inst(FI_AS_PATH_LAST, $1); }
| term '.' LAST_NONAGGREGATED { $$ = f_new_inst(FI_AS_PATH_LAST_NAG, $1); }
+ | term '.' DATA { $$ = f_new_inst(FI_PAIR_DATA, $1); }
+ | term '.' DATA1 { $$ = f_new_inst(FI_LC_DATA1, $1); }
+ | term '.' DATA2 { $$ = f_new_inst(FI_LC_DATA2, $1); }
+ | term '.' MIN { $$ = f_new_inst(FI_MIN, $1); }
+ | term '.' MAX { $$ = f_new_inst(FI_MAX, $1); }
/* Communities */
/* This causes one shift/reduce conflict
@@ -815,13 +885,11 @@ term:
| DELETE '(' term ',' term ')' { $$ = f_new_inst(FI_CLIST_DEL, $3, $5); }
| FILTER '(' term ',' term ')' { $$ = f_new_inst(FI_CLIST_FILTER, $3, $5); }
- | ROA_CHECK '(' rtable ')' { $$ = f_new_inst(FI_ROA_CHECK_IMPLICIT, $3); }
- | ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK_EXPLICIT, $5, $7, $3); }
+ | ROA_CHECK '(' rtable ')' { $$ = f_implicit_roa_check($3); }
+ | ROA_CHECK '(' rtable ',' term ',' term ')' { $$ = f_new_inst(FI_ROA_CHECK, $5, $7, $3); }
| FORMAT '(' term ')' { $$ = f_new_inst(FI_FORMAT, $3); }
-/* | term '.' LEN { $$->code = P('P','l'); } */
-
| function_call
;
@@ -841,20 +909,53 @@ print_list: /* EMPTY */ { $$ = NULL; }
}
;
+var_init:
+ /* empty */ { $$ = f_new_inst(FI_CONSTANT, (struct f_val) { }); }
+ | '=' term { $$ = $2; }
+ ;
+
+var:
+ type symbol var_init ';' {
+ struct symbol *sym = cf_define_symbol($2, SYM_VARIABLE | $1, offset, f_new_var(sym_->scope));
+ $$ = f_new_inst(FI_VAR_INIT, $3, sym);
+ }
+
+for_var:
+ type symbol { $$ = cf_define_symbol($2, SYM_VARIABLE | $1, offset, f_new_var(sym_->scope)); }
+ | CF_SYM_KNOWN { $$ = $1; cf_assert_symbol($1, SYM_VARIABLE); }
+ ;
+
cmd:
- IF term THEN block {
+ '{' cmds_scoped '}' {
+ $$ = $2;
+ }
+ | IF term THEN cmd {
$$ = f_new_inst(FI_CONDITION, $2, $4, NULL);
}
- | IF term THEN block ELSE block {
+ | IF term THEN cmd ELSE cmd {
$$ = f_new_inst(FI_CONDITION, $2, $4, $6);
}
- | CF_SYM_KNOWN '=' term ';' {
+ | FOR {
+ /* Reserve space for walk data on stack */
+ cf_push_scope(NULL);
+ conf_this_scope->slots += 2;
+ } for_var IN
+ /* Parse term in the parent scope */
+ { conf_this_scope->active = 0; } term { conf_this_scope->active = 1; }
+ DO cmd {
+ cf_pop_scope();
+ $$ = f_new_inst(FI_FOR_INIT, $6, $3);
+ $$->next = f_new_inst(FI_FOR_NEXT, $3, $9);
+ }
+ | symbol_known '=' term ';' {
switch ($1->class) {
case SYM_VARIABLE_RANGE:
$$ = f_new_inst(FI_VAR_SET, $3, $1);
break;
case SYM_ATTRIBUTE:
- $$ = f_new_inst(FI_EA_SET, $3, *$1->attribute);
+ if ($1->attribute->readonly)
+ cf_error("Attribute %s is read-only", $1->attribute->name);
+ $$ = f_new_inst(FI_EA_SET, $3, $1->attribute);
break;
default:
cf_error("Can't assign to symbol %s", $1->name);
@@ -864,16 +965,25 @@ cmd:
DBG( "Ook, we'll return the value\n" );
$$ = f_new_inst(FI_RETURN, $2);
}
- | dynamic_attr '=' term ';' {
- $$ = f_new_inst(FI_EA_SET, $3, $1);
- }
| static_attr '=' term ';' {
if ($1.readonly)
cf_error( "This static attribute is read-only.");
$$ = f_new_inst(FI_RTA_SET, $3, $1);
}
- | UNSET '(' dynamic_attr ')' ';' {
- $$ = f_new_inst(FI_EA_UNSET, $3);
+ | UNSET '(' symbol_known ')' ';' {
+ if ($3->class != SYM_ATTRIBUTE)
+ cf_error("Can't unset %s", $3->name);
+ if ($3->attribute->readonly)
+ cf_error("Attribute %s is read-only", $3->attribute->name);
+ $$ = f_new_inst(FI_EA_UNSET, $3->attribute);
+ }
+ | attr_bit '=' term ';' {
+ $$ = f_new_inst(FI_CONDITION, $3,
+ f_generate_complex_default(FI_BITOR, $1.class,
+ f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = (1U << $1.bit)}), 0),
+ f_generate_complex_default(FI_BITAND, $1.class,
+ f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_INT, .val.i = ~(1U << $1.bit)}), 0)
+ );
}
| break_command print_list ';' {
struct f_inst *breaker = f_new_inst(FI_DIE, $1);
@@ -893,16 +1003,16 @@ cmd:
| PRINTN print_list ';' {
$$ = f_new_inst(FI_PRINT, $2);
}
- | function_call ';' { $$ = f_new_inst(FI_DROP_RESULT, $1); }
+ | function_call ';' { $$ = f_new_inst(FI_DROP_RESULT, $1); }
| CASE term '{' switch_body '}' {
$$ = f_new_inst(FI_SWITCH, $2, build_tree($4));
}
- | dynamic_attr '.' EMPTY ';' { $$ = f_generate_empty($1); }
- | dynamic_attr '.' PREPEND '(' term ')' ';' { $$ = f_generate_complex( FI_PATH_PREPEND, $1, $5 ); }
- | dynamic_attr '.' ADD '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_ADD, $1, $5 ); }
- | dynamic_attr '.' DELETE '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_DEL, $1, $5 ); }
- | dynamic_attr '.' FILTER '(' term ')' ';' { $$ = f_generate_complex( FI_CLIST_FILTER, $1, $5 ); }
+ | symbol_known '.' EMPTY ';' { $$ = f_generate_empty($1); }
+ | symbol_known '.' PREPEND '(' term ')' ';' { $$ = f_generate_complex_sym( FI_PATH_PREPEND, $1, $5 ); }
+ | symbol_known '.' ADD '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_ADD, $1, $5 ); }
+ | symbol_known '.' DELETE '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_DEL, $1, $5 ); }
+ | symbol_known '.' FILTER '(' term ')' ';' { $$ = f_generate_complex_sym( FI_CLIST_FILTER, $1, $5 ); }
| BT_ASSERT '(' get_cf_position term get_cf_position ')' ';' { $$ = assert_done($4, $3 + 1, $5 - 1); }
| BT_CHECK_ASSIGN '(' get_cf_position lvalue get_cf_position ',' term ')' ';' { $$ = assert_assign(&$4, $7, $3 + 1, $5 - 1); }
;
@@ -913,8 +1023,17 @@ get_cf_position:
};
lvalue:
- CF_SYM_KNOWN { cf_assert_symbol($1, SYM_VARIABLE); $$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1 }; }
+ symbol_known {
+ switch ($1->class) {
+ case SYM_VARIABLE_RANGE:
+ $$ = (struct f_lval) { .type = F_LVAL_VARIABLE, .sym = $1 };
+ break;
+ case SYM_ATTRIBUTE:
+ $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1->attribute };
+ break;
+ }
+ }
| static_attr { $$ = (struct f_lval) { .type = F_LVAL_SA, .sa = $1 }; }
- | dynamic_attr { $$ = (struct f_lval) { .type = F_LVAL_EA, .da = $1 }; };
+ ;
CF_END
diff --git a/filter/data.c b/filter/data.c
index 7c33d2cb..d26b07f5 100644
--- a/filter/data.c
+++ b/filter/data.c
@@ -16,10 +16,10 @@
#include "lib/unaligned.h"
#include "lib/net.h"
#include "lib/ip.h"
-#include "nest/route.h"
+#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
-#include "nest/attrs.h"
+#include "lib/attrs.h"
#include "conf/conf.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
@@ -27,6 +27,8 @@
static const char * const f_type_str[] = {
[T_VOID] = "void",
+ [T_OPAQUE] = "opaque byte string",
+ [T_IFACE] = "interface",
[T_INT] = "int",
[T_BOOL] = "bool",
@@ -36,7 +38,6 @@ static const char * const f_type_str[] = {
[T_ENUM_RTS] = "enum rts",
[T_ENUM_BGP_ORIGIN] = "enum bgp_origin",
[T_ENUM_SCOPE] = "enum scope",
- [T_ENUM_RTC] = "enum rtc",
[T_ENUM_RTD] = "enum rtd",
[T_ENUM_ROA] = "enum roa",
[T_ENUM_NETTYPE] = "enum nettype",
@@ -47,6 +48,7 @@ static const char * const f_type_str[] = {
[T_NET] = "prefix",
[T_STRING] = "string",
[T_PATH_MASK] = "bgpmask",
+ [T_PATH_MASK_ITEM] = "bgpmask item",
[T_PATH] = "bgppath",
[T_CLIST] = "clist",
[T_EC] = "ec",
@@ -54,20 +56,31 @@ static const char * const f_type_str[] = {
[T_LC] = "lc",
[T_LCLIST] = "lclist",
[T_RD] = "rd",
+
+ [T_SET] = "set",
+ [T_PREFIX_SET] = "prefix set",
};
const char *
-f_type_name(enum f_type t)
+f_type_name(btype t)
{
- if (t < ARRAY_SIZE(f_type_str))
- return f_type_str[t] ?: "?";
-
- if ((t == T_SET) || (t == T_PREFIX_SET))
- return "set";
+ return (t < ARRAY_SIZE(f_type_str)) ? (f_type_str[t] ?: "?") : "?";
+}
- return "?";
+btype
+f_type_element_type(btype t)
+{
+ switch(t) {
+ case T_PATH: return T_INT;
+ case T_CLIST: return T_PAIR;
+ case T_ECLIST: return T_EC;
+ case T_LCLIST: return T_LC;
+ default: return T_VOID;
+ };
}
+const struct f_trie f_const_empty_trie = { .ipv4 = -1, };
+
const struct f_val f_const_empty_path = {
.type = T_PATH,
.val.ad = &null_adata,
@@ -80,16 +93,11 @@ const struct f_val f_const_empty_path = {
}, f_const_empty_lclist = {
.type = T_LCLIST,
.val.ad = &null_adata,
+}, f_const_empty_prefix_set = {
+ .type = T_PREFIX_SET,
+ .val.ti = &f_const_empty_trie,
};
-static struct adata *
-adata_empty(struct linpool *pool, int l)
-{
- struct adata *res = lp_alloc(pool, sizeof(struct adata) + l);
- res->length = l;
- return res;
-}
-
static void
pm_format(const struct f_path_mask *p, buffer *buf)
{
@@ -176,7 +184,7 @@ val_compare(const struct f_val *v1, const struct f_val *v2)
if (val_is_ip4(v1) && (v2->type == T_QUAD))
return uint_cmp(ipa_to_u32(v1->val.ip), v2->val.i);
- debug( "Types do not match in val_compare\n" );
+ DBG( "Types do not match in val_compare\n" );
return F_CMP_ERROR;
}
@@ -290,6 +298,12 @@ val_same(const struct f_val *v1, const struct f_val *v2)
int
clist_set_type(const struct f_tree *set, struct f_val *v)
{
+ if (!set)
+ {
+ v->type = T_VOID;
+ return 1;
+ }
+
switch (set->from.type)
{
case T_PAIR:
@@ -412,7 +426,7 @@ clist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
- struct adata *res = adata_empty(pool, nl);
+ struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@@ -446,7 +460,7 @@ eclist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
- struct adata *res = adata_empty(pool, nl);
+ struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@@ -478,7 +492,7 @@ lclist_filter(struct linpool *pool, const struct adata *list, const struct f_val
if (nl == list->length)
return list;
- struct adata *res = adata_empty(pool, nl);
+ struct adata *res = lp_alloc_adata(pool, nl);
memcpy(res->data, tmp, nl);
return res;
}
@@ -526,6 +540,9 @@ val_in_range(const struct f_val *v1, const struct f_val *v2)
if (v2->type != T_SET)
return F_CMP_ERROR;
+ if (!v2->val.t)
+ return 0;
+
/* With integrated Quad<->IP implicit conversion */
if ((v1->type == v2->val.t->from.type) ||
((v1->type == T_QUAD) && val_is_ip4(&(v2->val.t->from)) && val_is_ip4(&(v2->val.t->to))))
@@ -599,3 +616,75 @@ val_dump(const struct f_val *v) {
return val_dump_buffer;
}
+
+struct f_val *
+lp_val_copy(struct linpool *lp, const struct f_val *v)
+{
+ switch (v->type)
+ {
+ case T_VOID:
+ case T_BOOL:
+ case T_INT:
+ case T_IP:
+ case T_PAIR:
+ case T_QUAD:
+ case T_EC:
+ case T_LC:
+ case T_RD:
+ case T_ENUM:
+ case T_PATH_MASK_ITEM:
+ /* These aren't embedded but there is no need to copy them */
+ case T_SET:
+ case T_PREFIX_SET:
+ case T_PATH_MASK:
+ case T_IFACE:
+ {
+ struct f_val *out = lp_alloc(lp, sizeof(*out));
+ *out = *v;
+ return out;
+ }
+
+ case T_NET:
+ {
+ struct {
+ struct f_val val;
+ net_addr net[0];
+ } *out = lp_alloc(lp, sizeof(*out) + v->val.net->length);
+ out->val = *v;
+ out->val.val.net = out->net;
+ net_copy(out->net, v->val.net);
+ return &out->val;
+ }
+
+ case T_STRING:
+ {
+ uint len = strlen(v->val.s);
+ struct {
+ struct f_val val;
+ char buf[0];
+ } *out = lp_alloc(lp, sizeof(*out) + len + 1);
+ out->val = *v;
+ out->val.val.s = out->buf;
+ memcpy(out->buf, v->val.s, len+1);
+ return &out->val;
+ }
+
+ case T_PATH:
+ case T_CLIST:
+ case T_ECLIST:
+ case T_LCLIST:
+ {
+ struct {
+ struct f_val val;
+ struct adata ad;
+ } *out = lp_alloc(lp, sizeof(*out) + v->val.ad->length);
+ out->val = *v;
+ out->val.val.ad = &out->ad;
+ memcpy(&out->ad, v->val.ad, v->val.ad->length);
+ return &out->val;
+ }
+
+ default:
+ bug("Unknown type in value copy: %d", v->type);
+ }
+}
diff --git a/filter/data.h b/filter/data.h
index 45246f9f..c1e7c736 100644
--- a/filter/data.h
+++ b/filter/data.h
@@ -11,110 +11,37 @@
#define _BIRD_FILTER_DATA_H_
#include "nest/bird.h"
-
-/* Type numbers must be in 0..0xff range */
-#define T_MASK 0xff
-
-/* Internal types */
-enum f_type {
-/* Nothing. Simply nothing. */
- T_VOID = 0,
-
-/* User visible types, which fit in int */
- T_INT = 0x10,
- T_BOOL = 0x11,
- T_PAIR = 0x12, /* Notice that pair is stored as integer: first << 16 | second */
- T_QUAD = 0x13,
-
-/* Put enumerational types in 0x30..0x3f range */
- T_ENUM_LO = 0x30,
- T_ENUM_HI = 0x3f,
-
- T_ENUM_RTS = 0x30,
- T_ENUM_BGP_ORIGIN = 0x31,
- T_ENUM_SCOPE = 0x32,
- T_ENUM_RTC = 0x33,
- T_ENUM_RTD = 0x34,
- T_ENUM_ROA = 0x35,
- T_ENUM_NETTYPE = 0x36,
- T_ENUM_RA_PREFERENCE = 0x37,
- T_ENUM_AF = 0x38,
-
-/* new enums go here */
- T_ENUM_EMPTY = 0x3f, /* Special hack for atomic_aggr */
-
-#define T_ENUM T_ENUM_LO ... T_ENUM_HI
-
-/* Bigger ones */
- T_IP = 0x20,
- T_NET = 0x21,
- T_STRING = 0x22,
- T_PATH_MASK = 0x23, /* mask for BGP path */
- T_PATH = 0x24, /* BGP path */
- T_CLIST = 0x25, /* Community list */
- T_EC = 0x26, /* Extended community value, u64 */
- T_ECLIST = 0x27, /* Extended community list */
- T_LC = 0x28, /* Large community value, lcomm */
- T_LCLIST = 0x29, /* Large community list */
- T_RD = 0x2a, /* Route distinguisher for VPN addresses */
- T_PATH_MASK_ITEM = 0x2b, /* Path mask item for path mask constructors */
-
- T_SET = 0x80,
- T_PREFIX_SET = 0x81,
-} PACKED;
+#include "lib/type.h"
/* Filter value; size of this affects filter memory consumption */
struct f_val {
- enum f_type type; /* T_* */
- union {
- uint i;
- u64 ec;
- lcomm lc;
- ip_addr ip;
- const net_addr *net;
- const char *s;
- const struct f_tree *t;
- const struct f_trie *ti;
- const struct adata *ad;
- const struct f_path_mask *path_mask;
- struct f_path_mask_item pmi;
- } val;
+ btype type; /* T_* */
+ union bval_long val;
};
-/* Dynamic attribute definition (eattrs) */
-struct f_dynamic_attr {
- u8 type; /* EA type (EAF_*) */
- u8 bit; /* For bitfield accessors */
- enum f_type f_type; /* Filter type */
- uint ea_code; /* EA code */
-};
+#define fputip(a) ({ ip_addr *ax = falloc(sizeof(*ax)); *ax = (a); ax; })
enum f_sa_code {
- SA_FROM = 1,
- SA_GW,
+ SA_GW = 1,
SA_NET,
SA_PROTO,
- SA_SOURCE,
- SA_SCOPE,
SA_DEST,
SA_IFNAME,
SA_IFINDEX,
SA_WEIGHT,
- SA_PREF,
SA_GW_MPLS,
} PACKED;
/* Static attribute definition (members of struct rta) */
struct f_static_attr {
- enum f_type f_type; /* Filter type */
+ btype type; /* Data type */
enum f_sa_code sa_code; /* Static attribute id */
- int readonly:1; /* Don't allow writing */
+ int readonly:1; /* Don't allow writing */
};
/* Filter l-value type */
enum f_lval_type {
F_LVAL_VARIABLE,
- F_LVAL_PREFERENCE,
F_LVAL_SA,
F_LVAL_EA,
};
@@ -124,7 +51,7 @@ struct f_lval {
enum f_lval_type type;
union {
struct symbol *sym;
- struct f_dynamic_attr da;
+ const struct ea_class *da;
struct f_static_attr sa;
};
};
@@ -141,18 +68,23 @@ struct f_tree {
void *data;
};
+#define TRIE_STEP 4
+#define TRIE_STACK_LENGTH 33
+
struct f_trie_node4
{
ip4_addr addr, mask, accept;
- uint plen;
- struct f_trie_node4 *c[2];
+ u16 plen;
+ u16 local;
+ struct f_trie_node4 *c[1 << TRIE_STEP];
};
struct f_trie_node6
{
ip6_addr addr, mask, accept;
- uint plen;
- struct f_trie_node6 *c[2];
+ u16 plen;
+ u16 local;
+ struct f_trie_node6 *c[1 << TRIE_STEP];
};
struct f_trie_node
@@ -169,9 +101,20 @@ struct f_trie
u8 zero;
s8 ipv4; /* -1 for undefined / empty */
u16 data_size; /* Additional data for each trie node */
+ u32 prefix_count; /* Works only for restricted tries (pxlen == l == h) */
struct f_trie_node root; /* Root trie node */
};
+struct f_trie_walk_state
+{
+ u8 ipv4;
+ u8 accept_length; /* Current inter-node prefix position */
+ u8 start_pos; /* Initial prefix position in stack[0] */
+ u8 local_pos; /* Current intra-node prefix position */
+ u8 stack_pos; /* Current node in stack below */
+ const struct f_trie_node *stack[TRIE_STACK_LENGTH];
+};
+
struct f_tree *f_new_tree(void);
struct f_tree *build_tree(struct f_tree *);
const struct f_tree *find_tree(const struct f_tree *t, const struct f_val *val);
@@ -182,12 +125,75 @@ void tree_walk(const struct f_tree *t, void (*hook)(const struct f_tree *, void
struct f_trie *f_new_trie(linpool *lp, uint data_size);
void *trie_add_prefix(struct f_trie *t, const net_addr *n, uint l, uint h);
int trie_match_net(const struct f_trie *t, const net_addr *n);
+int trie_match_longest_ip4(const struct f_trie *t, const net_addr_ip4 *net, net_addr_ip4 *dst, ip4_addr *found0);
+int trie_match_longest_ip6(const struct f_trie *t, const net_addr_ip6 *net, net_addr_ip6 *dst, ip6_addr *found0);
+void trie_walk_init(struct f_trie_walk_state *s, const struct f_trie *t, const net_addr *from);
+int trie_walk_next(struct f_trie_walk_state *s, net_addr *net);
int trie_same(const struct f_trie *t1, const struct f_trie *t2);
void trie_format(const struct f_trie *t, buffer *buf);
+static inline int
+trie_match_next_longest_ip4(net_addr_ip4 *n, ip4_addr *found)
+{
+ while (n->pxlen)
+ {
+ n->pxlen--;
+ ip4_clrbit(&n->prefix, n->pxlen);
+
+ if (ip4_getbit(*found, n->pxlen))
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int
+trie_match_next_longest_ip6(net_addr_ip6 *n, ip6_addr *found)
+{
+ while (n->pxlen)
+ {
+ n->pxlen--;
+ ip6_clrbit(&n->prefix, n->pxlen);
+
+ if (ip6_getbit(*found, n->pxlen))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+#define TRIE_WALK_TO_ROOT_IP4(trie, net, dst) ({ \
+ net_addr_ip4 dst; \
+ ip4_addr _found; \
+ for (int _n = trie_match_longest_ip4(trie, net, &dst, &_found); \
+ _n; \
+ _n = trie_match_next_longest_ip4(&dst, &_found))
+
+#define TRIE_WALK_TO_ROOT_IP6(trie, net, dst) ({ \
+ net_addr_ip6 dst; \
+ ip6_addr _found; \
+ for (int _n = trie_match_longest_ip6(trie, net, &dst, &_found); \
+ _n; \
+ _n = trie_match_next_longest_ip6(&dst, &_found))
+
+#define TRIE_WALK_TO_ROOT_END })
+
+
+#define TRIE_WALK(trie, net, from) ({ \
+ net_addr net; \
+ struct f_trie_walk_state tws_; \
+ trie_walk_init(&tws_, trie, from); \
+ while (trie_walk_next(&tws_, &net))
+
+#define TRIE_WALK_END })
+
+
#define F_CMP_ERROR 999
-const char *f_type_name(enum f_type t);
+const char *f_type_name(btype t);
+
+enum btype f_type_element_type(btype t);
int val_same(const struct f_val *v1, const struct f_val *v2);
int val_compare(const struct f_val *v1, const struct f_val *v2);
@@ -195,15 +201,19 @@ void val_format(const struct f_val *v, buffer *buf);
char *val_format_str(struct linpool *lp, const struct f_val *v);
const char *val_dump(const struct f_val *v);
+struct f_val *lp_val_copy(struct linpool *lp, const struct f_val *v);
+
static inline int val_is_ip4(const struct f_val *v)
{ return (v->type == T_IP) && ipa_is_ip4(v->val.ip); }
int val_in_range(const struct f_val *v1, const struct f_val *v2);
int clist_set_type(const struct f_tree *set, struct f_val *v);
static inline int eclist_set_type(const struct f_tree *set)
-{ return set->from.type == T_EC; }
+{ return !set || set->from.type == T_EC; }
static inline int lclist_set_type(const struct f_tree *set)
-{ return set->from.type == T_LC; }
+{ return !set || set->from.type == T_LC; }
+static inline int path_set_type(const struct f_tree *set)
+{ return !set || set->from.type == T_INT; }
const struct adata *clist_filter(struct linpool *pool, const struct adata *list, const struct f_val *set, int pos);
const struct adata *eclist_filter(struct linpool *pool, const struct adata *list, const struct f_val *set, int pos);
@@ -219,8 +229,18 @@ undef_value(struct f_val v)
(v.val.ad == &null_adata);
}
-extern const struct f_val f_const_empty_path, f_const_empty_clist, f_const_empty_eclist, f_const_empty_lclist;
+extern const struct f_val f_const_empty_path, f_const_empty_clist, f_const_empty_eclist, f_const_empty_lclist, f_const_empty_prefix_set;
+static inline const struct f_val *f_get_empty(btype t)
+{
+ switch (t) {
+ case T_PATH: return &f_const_empty_path;
+ case T_CLIST: return &f_const_empty_clist;
+ case T_ECLIST: return &f_const_empty_eclist;
+ case T_LCLIST: return &f_const_empty_lclist;
+ default: return NULL;
+ }
+}
-enum filter_return f_eval(const struct f_line *expr, struct linpool *tmp_pool, struct f_val *pres);
+enum filter_return f_eval(const struct f_line *expr, struct f_val *pres);
#endif
diff --git a/filter/decl.m4 b/filter/decl.m4
index 44537aaa..e2472127 100644
--- a/filter/decl.m4
+++ b/filter/decl.m4
@@ -94,7 +94,7 @@ FID_DUMP_BODY()m4_dnl
debug("%s" $4 "\n", INDENT, $5);
]])
FID_INTERPRET_EXEC()m4_dnl
-const $1 $2 = whati->$2
+$1 $2 = whati->$2
FID_INTERPRET_BODY')
# Instruction arguments are needed only until linearization is done.
@@ -191,6 +191,12 @@ if (f$1->type && f$2->type && (f$1->type != f$2->type) &&
cf_error("Arguments $1 and $2 of %s must be of the same type", f_instruction_name(what->fi_code));
FID_INTERPRET_BODY()')
+m4_define(ARG_PREFER_SAME_TYPE, `
+FID_NEW_BODY()m4_dnl
+if (f$1->type && f$2->type && (f$1->type != f$2->type))
+ (void) (f_const_promotion(f$2, f$1->type) || f_const_promotion(f$1, f$2->type));
+FID_INTERPRET_BODY()')
+
# Executing another filter line. This replaces the recursion
# that was needed in the former implementation.
m4_define(LINEX, `FID_INTERPRET_EXEC()LINEX_($1)FID_INTERPRET_NEW()return $1 FID_INTERPRET_BODY()')
@@ -216,7 +222,7 @@ whati->f$1 = f$1;
FID_DUMP_BODY()m4_dnl
f_dump_line(item->fl$1, indent + 1);
FID_LINEARIZE_BODY()m4_dnl
-item->fl$1 = f_linearize(whati->f$1);
+item->fl$1 = f_linearize(whati->f$1, $2);
FID_SAME_BODY()m4_dnl
if (!f_same(f1->fl$1, f2->fl$1)) return 0;
FID_ITERATE_BODY()m4_dnl
@@ -244,9 +250,13 @@ m4_define(ERROR,
# This macro specifies result type and makes there are no conflicting definitions
m4_define(RESULT_TYPE,
`m4_ifdef([[INST_RESULT_TYPE]],
- [[m4_ifelse(INST_RESULT_TYPE,$1,,[[ERROR([[Multiple type definitons]])]])]],
+ [[m4_ifelse(INST_RESULT_TYPE,$1,,[[ERROR([[Multiple type definitions in]] INST_NAME)]])]],
[[m4_define(INST_RESULT_TYPE,$1) RESULT_TYPE_($1)]])')
+m4_define(RESULT_TYPE_CHECK,
+ `m4_ifelse(INST_OUTVAL,0,,
+ [[m4_ifdef([[INST_RESULT_TYPE]],,[[ERROR([[Missing type definition in]] INST_NAME)]])]])')
+
m4_define(RESULT_TYPE_, `
FID_NEW_BODY()m4_dnl
what->type = $1;
@@ -256,7 +266,7 @@ FID_INTERPRET_BODY()')
m4_define(SYMBOL, `FID_MEMBER(struct symbol *, sym, [[strcmp(f1->sym->name, f2->sym->name) || (f1->sym->class != f2->sym->class)]], "symbol %s", item->sym->name)')
m4_define(RTC, `FID_MEMBER(struct rtable_config *, rtc, [[strcmp(f1->rtc->name, f2->rtc->name)]], "route table %s", item->rtc->name)')
m4_define(STATIC_ATTR, `FID_MEMBER(struct f_static_attr, sa, f1->sa.sa_code != f2->sa.sa_code,,)')
-m4_define(DYNAMIC_ATTR, `FID_MEMBER(struct f_dynamic_attr, da, f1->da.ea_code != f2->da.ea_code,,)')
+m4_define(DYNAMIC_ATTR, `FID_MEMBER(const struct ea_class *, da, f1->da != f2->da,,)')
m4_define(ACCESS_RTE, `FID_HIC(,[[do { if (!fs->rte) runtime("No route to access"); } while (0)]],NEVER_CONSTANT())')
# 2) Code wrapping
@@ -300,6 +310,7 @@ m4_define(FID_ITERATE, `FID_ZONE(10, Iteration)')
# This macro does all the code wrapping. See inline comments.
m4_define(INST_FLUSH, `m4_ifdef([[INST_NAME]], [[
+RESULT_TYPE_CHECK()m4_dnl Check for defined RESULT_TYPE()
FID_ENUM()m4_dnl Contents of enum fi_code { ... }
INST_NAME(),
FID_ENUM_STR()m4_dnl Contents of const char * indexed by enum fi_code
@@ -375,6 +386,7 @@ case INST_NAME(): {
#undef whati
#undef item
dest->items[pos].fi_code = what->fi_code;
+ dest->items[pos].flags = what->flags;
dest->items[pos].lineno = what->lineno;
break;
}
@@ -402,6 +414,7 @@ m4_define(INST, `m4_dnl This macro is called on beginning of each instruction
INST_FLUSH()m4_dnl First, old data is flushed
m4_define([[INST_NAME]], [[$1]])m4_dnl Then we store instruction name,
m4_define([[INST_INVAL]], [[$2]])m4_dnl instruction input value count,
+m4_define([[INST_OUTVAL]], [[$3]])m4_dnl instruction output value count,
m4_undefine([[INST_NEVER_CONSTANT]])m4_dnl reset NEVER_CONSTANT trigger,
m4_undefine([[INST_RESULT_TYPE]])m4_dnl and reset RESULT_TYPE value.
FID_INTERPRET_BODY()m4_dnl By default, every code is interpreter code.
@@ -490,7 +503,7 @@ fi_constant(struct f_inst *what, struct f_val val)
}
static int
-f_const_promotion(struct f_inst *arg, enum f_type want)
+f_const_promotion(struct f_inst *arg, btype want)
{
if (arg->fi_code != FI_CONSTANT)
return 0;
@@ -505,6 +518,11 @@ f_const_promotion(struct f_inst *arg, enum f_type want)
return 1;
}
+ else if ((c->type == T_SET) && (!c->val.t) && (want == T_PREFIX_SET)) {
+ *c = f_const_empty_prefix_set;
+ return 1;
+ }
+
return 0;
}
@@ -560,7 +578,7 @@ FID_WR_PUT(8)
}
struct f_line *
-f_linearize_concat(const struct f_inst * const inst[], uint count)
+f_linearize_concat(const struct f_inst * const inst[], uint count, uint results)
{
uint len = 0;
for (uint i=0; i<count; i++)
@@ -572,6 +590,8 @@ f_linearize_concat(const struct f_inst * const inst[], uint count)
for (uint i=0; i<count; i++)
out->len = linearize(out, inst[i], out->len);
+ out->results = results;
+
#ifdef LOCAL_DEBUG
f_dump_line(out, 0);
#endif
@@ -640,7 +660,8 @@ FID_WR_PUT(4)m4_dnl
struct f_inst {
struct f_inst *next; /* Next instruction */
enum f_instruction_code fi_code; /* Instruction code */
- enum f_type type; /* Type of returned value, if known */
+ enum f_instruction_flags flags; /* Flags, instruction-specific */
+ btype type; /* Type of returned value, if known */
int size; /* How many instructions are underneath */
int lineno; /* Line number */
union {
diff --git a/filter/f-inst.c b/filter/f-inst.c
index 00e22383..fff93517 100644
--- a/filter/f-inst.c
+++ b/filter/f-inst.c
@@ -62,8 +62,9 @@
* m4_dnl INST(FI_NOP, in, out) { enum value, input args, output args
* m4_dnl ARG(num, type); argument, its id (in data fields) and type accessible by v1, v2, v3
* m4_dnl ARG_ANY(num); argument with no type check accessible by v1, v2, v3
+ * m4_dnl ARG_TYPE(num, type); just declare the type of argument
* m4_dnl VARARG; variable-length argument list; accessible by vv(i) and whati->varcount
- * m4_dnl LINE(num, unused); this argument has to be converted to its own f_line
+ * m4_dnl LINE(num, out); this argument has to be converted to its own f_line
* m4_dnl SYMBOL; symbol handed from config
* m4_dnl STATIC_ATTR; static attribute definition
* m4_dnl DYNAMIC_ATTR; dynamic attribute definition
@@ -80,10 +81,17 @@
* m4_dnl )
*
* m4_dnl RESULT(type, union-field, value); putting this on value stack
+ * m4_dnl RESULT_(type, union-field, value); like RESULT(), but do not declare the type
* m4_dnl RESULT_VAL(value-struct); pass the struct f_val directly
+ * m4_dnl RESULT_TYPE(type); just declare the type of result value
* m4_dnl RESULT_VOID; return undef
* m4_dnl }
*
+ * Note that runtime arguments m4_dnl (ARG*, VARARG) must be defined before
+ * parse-time arguments m4_dnl (LINE, SYMBOL, ...). During linearization,
+ * first ones move position in f_line by linearizing arguments first, while
+ * second ones store data to the current position.
+ *
* Also note that the { ... } blocks are not respected by M4 at all.
* If you get weird unmatched-brace-pair errors, check what it generated and why.
* What is really considered as one instruction is not the { ... } block
@@ -91,6 +99,24 @@
*
* Other code is just copied into the interpreter part.
*
+ * The filter language uses a simple type system, where values have types
+ * (constants T_*) and also terms (instructions) are statically typed. Our
+ * static typing is partial (some terms do not declare types of arguments
+ * or results), therefore it can detect most but not all type errors and
+ * therefore we still have runtime type checks.
+ *
+ * m4_dnl Types of arguments are declared by macros ARG() and ARG_TYPE(),
+ * m4_dnl types of results are declared by RESULT() and RESULT_TYPE().
+ * m4_dnl Macros ARG_ANY(), RESULT_() and RESULT_VAL() do not declare types
+ * m4_dnl themselves, but can be combined with ARG_TYPE() / RESULT_TYPE().
+ *
+ * m4_dnl Note that types should be declared only once. If there are
+ * m4_dnl multiple RESULT() macros in an instruction definition, they must
+ * m4_dnl use the exact same expression for type, or they should be replaced
+ * m4_dnl by multiple RESULT_() macros and a common RESULT_TYPE() macro.
+ * m4_dnl See e.g. FI_EA_GET or FI_MIN instructions.
+ *
+ *
* If you are satisfied with this, you don't need to read the following
* detailed description of what is really done with the instruction definitions.
*
@@ -212,10 +238,40 @@
* m4_dnl NEVER_CONSTANT-> don't generate pre-interpretation code at all
* m4_dnl ACCESS_RTE -> check that route is available, also NEVER_CONSTANT
* m4_dnl ACCESS_EATTRS -> pre-cache the eattrs; use only with ACCESS_RTE
- * m4_dnl f_rta_cow(fs) -> function to call before any change to route should be done
*
* m4_dnl If you are stymied, see FI_CALL or FI_CONSTANT or just search for
* m4_dnl the mentioned macros in this file to see what is happening there in wild.
+ *
+ *
+ * A note about soundness of the type system:
+ *
+ * A type system is sound when types of expressions are consistent with
+ * types of values resulting from evaluation of such expressions. Untyped
+ * expressions are ok, but badly typed expressions are not sound. So is
+ * the type system of BIRD filtering code sound? There are some points:
+ *
+ * All cases of (one) m4_dnl RESULT() macro are obviously ok, as the macro
+ * both declares a type and returns a value. One have to check instructions
+ * that use m4_dnl RESULT_TYPE() macro. There are two issues:
+ *
+ * FI_AND, FI_OR - second argument is statically checked to be T_BOOL and
+ * passed as result without dynamic typecheck, declared to be T_BOOL. If
+ * an untyped non-bool expression is used as a second argument, then
+ * the mismatched type is returned.
+ *
+ * FI_VAR_GET - soundness depends on consistency of declared symbol types
+ * and stored values. This is maintained when values are stored by
+ * FI_VAR_SET, but when they are stored by FI_CALL, only static checking is
+ * used, so when an untyped expression returning mismatched value is used
+ * as a function argument, then inconsistent value is stored and subsequent
+ * FI_VAR_GET would be unsound.
+ *
+ * Both of these issues are inconsequential, as mismatched values from
+ * unsound expressions will be caught by dynamic typechecks like mismatched
+ * values from untyped expressions.
+ *
+ * Also note that FI_CALL is the only expression without properly declared
+ * result type.
*/
/* Binary operators */
@@ -240,13 +296,23 @@
if (v2.val.i == 0) runtime( "Mother told me not to divide by 0" );
RESULT(T_INT, i, v1.val.i / v2.val.i);
}
+ INST(FI_BITOR, 2, 1) {
+ ARG(1,T_INT);
+ ARG(2,T_INT);
+ RESULT(T_INT, i, v1.val.i | v2.val.i);
+ }
+ INST(FI_BITAND, 2, 1) {
+ ARG(1,T_INT);
+ ARG(2,T_INT);
+ RESULT(T_INT, i, v1.val.i & v2.val.i);
+ }
INST(FI_AND, 1, 1) {
ARG(1,T_BOOL);
ARG_TYPE_STATIC(2,T_BOOL);
RESULT_TYPE(T_BOOL);
if (v1.val.i)
- LINE(2,0);
+ LINE(2,1);
else
RESULT_VAL(v1);
}
@@ -256,7 +322,7 @@
RESULT_TYPE(T_BOOL);
if (!v1.val.i)
- LINE(2,0);
+ LINE(2,1);
else
RESULT_VAL(v1);
}
@@ -349,7 +415,7 @@
break;
case T_SET:
- if (vv(i).val.t->from.type != T_INT)
+ if (!path_set_type(vv(i).val.t))
runtime("Only integer sets allowed in path mask");
pm->item[i] = (struct f_path_mask_item) {
@@ -371,12 +437,14 @@
INST(FI_NEQ, 2, 1) {
ARG_ANY(1);
ARG_ANY(2);
+ ARG_PREFER_SAME_TYPE(1, 2);
RESULT(T_BOOL, i, !val_same(&v1, &v2));
}
INST(FI_EQ, 2, 1) {
ARG_ANY(1);
ARG_ANY(2);
+ ARG_PREFER_SAME_TYPE(1, 2);
RESULT(T_BOOL, i, val_same(&v1, &v2));
}
@@ -447,6 +515,18 @@
RESULT(T_BOOL, i, ipa_is_ip4(v1.val.ip));
}
+ INST(FI_VAR_INIT, 1, 0) {
+ NEVER_CONSTANT;
+ ARG_ANY(1);
+ SYMBOL;
+ ARG_TYPE(1, sym->class & 0xff);
+
+ /* New variable is always the last on stack */
+ uint pos = curline.vbase + sym->offset;
+ fstk->vstk[pos] = v1;
+ fstk->vcnt = pos + 1;
+ }
+
/* Set to indirect value prepared in v1 */
INST(FI_VAR_SET, 1, 0) {
NEVER_CONSTANT;
@@ -477,12 +557,100 @@
RESULT_VAL(val);
}
+ INST(FI_FOR_INIT, 1, 0) {
+ NEVER_CONSTANT;
+ ARG_ANY(1);
+ SYMBOL;
+
+ FID_NEW_BODY()
+ ASSERT((sym->class & ~0xff) == SYM_VARIABLE);
+
+ /* Static type check */
+ if (f1->type)
+ {
+ enum btype t_var = (sym->class & 0xff);
+ enum btype t_arg = f_type_element_type(f1->type);
+ if (!t_arg)
+ cf_error("Value of expression in FOR must be iterable, got %s",
+ f_type_name(f1->type));
+ if (t_var != t_arg)
+ cf_error("Loop variable '%s' in FOR must be %s, is %s",
+ sym->name, f_type_name(t_arg), f_type_name(t_var));
+ }
+
+ FID_INTERPRET_BODY()
+
+ /* Dynamic type check */
+ if ((sym->class & 0xff) != f_type_element_type(v1.type))
+ runtime("Mismatched argument and variable type");
+
+ /* Setup the index */
+ v2 = (struct f_val) { .type = T_INT, .val.i = 0 };
+
+ /* Keep v1 and v2 on the stack */
+ fstk->vcnt += 2;
+ }
+
+ INST(FI_FOR_NEXT, 2, 0) {
+ NEVER_CONSTANT;
+ SYMBOL;
+
+ /* Type checks are done in FI_FOR_INIT */
+
+ /* Loop variable */
+ struct f_val *var = &fstk->vstk[curline.vbase + sym->offset];
+ int step = 0;
+
+ switch(v1.type)
+ {
+ case T_PATH:
+ var->type = T_INT;
+ step = as_path_walk(v1.val.ad, &v2.val.i, &var->val.i);
+ break;
+
+ case T_CLIST:
+ var->type = T_PAIR;
+ step = int_set_walk(v1.val.ad, &v2.val.i, &var->val.i);
+ break;
+
+ case T_ECLIST:
+ var->type = T_EC;
+ step = ec_set_walk(v1.val.ad, &v2.val.i, &var->val.ec);
+ break;
+
+ case T_LCLIST:
+ var->type = T_LC;
+ step = lc_set_walk(v1.val.ad, &v2.val.i, &var->val.lc);
+ break;
+
+ default:
+ runtime( "Clist or lclist expected" );
+ }
+
+ if (step)
+ {
+ /* Keep v1 and v2 on the stack */
+ fstk->vcnt += 2;
+
+ /* Repeat this instruction */
+ curline.pos--;
+
+ /* Execute the loop body */
+ LINE(1, 0);
+
+ /* Space for loop variable, may be unused */
+ fstk->vcnt += 1;
+ }
+ else
+ var->type = T_VOID;
+ }
+
INST(FI_CONDITION, 1, 0) {
ARG(1, T_BOOL);
if (v1.val.i)
LINE(2,0);
else
- LINE(3,1);
+ LINE(3,0);
}
INST(FI_PRINT, 0, 0) {
@@ -519,78 +687,98 @@
{
STATIC_ATTR;
ACCESS_RTE;
- struct rta *rta = fs->rte->attrs;
+ ACCESS_EATTRS;
switch (sa.sa_code)
{
- case SA_FROM: RESULT(sa.f_type, ip, rta->from); break;
- case SA_GW: RESULT(sa.f_type, ip, rta->nh.gw); break;
- case SA_NET: RESULT(sa.f_type, net, fs->rte->net); break;
- case SA_PROTO: RESULT(sa.f_type, s, fs->rte->src->proto->name); break;
- case SA_SOURCE: RESULT(sa.f_type, i, rta->source); break;
- case SA_SCOPE: RESULT(sa.f_type, i, rta->scope); break;
- case SA_DEST: RESULT(sa.f_type, i, rta->dest); break;
- case SA_IFNAME: RESULT(sa.f_type, s, rta->nh.iface ? rta->nh.iface->name : ""); break;
- case SA_IFINDEX: RESULT(sa.f_type, i, rta->nh.iface ? rta->nh.iface->index : 0); break;
- case SA_WEIGHT: RESULT(sa.f_type, i, rta->nh.weight + 1); break;
- case SA_PREF: RESULT(sa.f_type, i, rta->pref); break;
- case SA_GW_MPLS: RESULT(sa.f_type, i, rta->nh.labels ? rta->nh.label[0] : MPLS_NULL); break;
-
+ case SA_NET: RESULT(sa.type, net, fs->rte->net); break;
+ case SA_PROTO: RESULT(sa.type, s, fs->rte->src->proto->name); break;
default:
- bug("Invalid static attribute access (%u/%u)", sa.f_type, sa.sa_code);
+ {
+ struct eattr *nhea = ea_find(*fs->eattrs, &ea_gen_nexthop);
+ struct nexthop_adata *nhad = nhea ? (struct nexthop_adata *) nhea->u.ptr : NULL;
+ struct nexthop *nh = nhad ? &nhad->nh : NULL;
+
+ switch (sa.sa_code)
+ {
+ case SA_DEST:
+ RESULT(sa.type, i, nhad ?
+ (NEXTHOP_IS_REACHABLE(nhad) ? RTD_UNICAST : nhad->dest)
+ : RTD_NONE);
+ break;
+ case SA_GW:
+ RESULT(sa.type, ip, nh ? nh->gw : IPA_NONE);
+ break;
+ case SA_IFNAME:
+ RESULT(sa.type, s, (nh && nh->iface) ? nh->iface->name : "");
+ break;
+ case SA_IFINDEX:
+ RESULT(sa.type, i, (nh && nh->iface) ? nh->iface->index : 0);
+ break;
+ case SA_WEIGHT:
+ RESULT(sa.type, i, (nh ? nh->weight : 0) + 1);
+ break;
+ case SA_GW_MPLS:
+ RESULT(sa.type, i, (nh && nh->labels) ? nh->label[0] : MPLS_NULL);
+ break;
+ default:
+ bug("Invalid static attribute access (%u/%u)", sa.type, sa.sa_code);
+ }
+ }
}
}
}
INST(FI_RTA_SET, 1, 0) {
ACCESS_RTE;
+ ACCESS_EATTRS;
ARG_ANY(1);
STATIC_ATTR;
- ARG_TYPE(1, sa.f_type);
-
- f_rta_cow(fs);
+ ARG_TYPE(1, sa.type);
{
- struct rta *rta = fs->rte->attrs;
+ union {
+ struct nexthop_adata nha;
+ struct {
+ struct adata ad;
+ struct nexthop nh;
+ u32 label;
+ };
+ } nha;
+
+ nha.ad = (struct adata) {
+ .length = sizeof (struct nexthop_adata) - sizeof (struct adata),
+ };
+
+ eattr *a = NULL;
switch (sa.sa_code)
{
- case SA_FROM:
- rta->from = v1.val.ip;
- break;
+ case SA_DEST:
+ {
+ int i = v1.val.i;
+ if ((i != RTD_BLACKHOLE) && (i != RTD_UNREACHABLE) && (i != RTD_PROHIBIT))
+ runtime( "Destination can be changed only to blackhole, unreachable or prohibit" );
+ nha.nha.dest = i;
+ nha.ad.length = NEXTHOP_DEST_SIZE;
+ break;
+ }
case SA_GW:
{
+ struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
+
ip_addr ip = v1.val.ip;
- struct iface *ifa = ipa_is_link_local(ip) ? rta->nh.iface : NULL;
+ struct iface *ifa = (ipa_is_link_local(ip) && nh_ea) ?
+ ((struct nexthop_adata *) nh_ea->u.ptr)->nh.iface : NULL;
+
neighbor *n = neigh_find(fs->rte->src->proto, ip, ifa, 0);
if (!n || (n->scope == SCOPE_HOST))
runtime( "Invalid gw address" );
- rta->dest = RTD_UNICAST;
- rta->nh.gw = ip;
- rta->nh.iface = n->iface;
- rta->nh.next = NULL;
- rta->hostentry = NULL;
- rta->nh.labels = 0;
- }
- break;
-
- case SA_SCOPE:
- rta->scope = v1.val.i;
- break;
-
- case SA_DEST:
- {
- int i = v1.val.i;
- if ((i != RTD_BLACKHOLE) && (i != RTD_UNREACHABLE) && (i != RTD_PROHIBIT))
- runtime( "Destination can be changed only to blackhole, unreachable or prohibit" );
-
- rta->dest = i;
- rta->nh.gw = IPA_NONE;
- rta->nh.iface = NULL;
- rta->nh.next = NULL;
- rta->hostentry = NULL;
- rta->nh.labels = 0;
+ nha.nh = (struct nexthop) {
+ .gw = ip,
+ .iface = n->iface,
+ };
}
break;
@@ -600,12 +788,9 @@
if (!ifa)
runtime( "Invalid iface name" );
- rta->dest = RTD_UNICAST;
- rta->nh.gw = IPA_NONE;
- rta->nh.iface = ifa;
- rta->nh.next = NULL;
- rta->hostentry = NULL;
- rta->nh.labels = 0;
+ nha.nh = (struct nexthop) {
+ .iface = ifa,
+ };
}
break;
@@ -614,13 +799,20 @@
if (v1.val.i >= 0x100000)
runtime( "Invalid MPLS label" );
+ struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
+ if (!nh_ea)
+ runtime( "No nexthop to add a MPLS label to" );
+
+ nha.nh = ((struct nexthop_adata *) nh_ea->u.ptr)->nh;
+
if (v1.val.i != MPLS_NULL)
{
- rta->nh.label[0] = v1.val.i;
- rta->nh.labels = 1;
+ nha.nh.label[0] = v1.val.i;
+ nha.nh.labels = 1;
+ nha.ad.length = sizeof nha - sizeof (struct adata);
}
else
- rta->nh.labels = 0;
+ nha.nh.labels = 0;
}
break;
@@ -629,22 +821,36 @@
int i = v1.val.i;
if (i < 1 || i > 256)
runtime( "Setting weight value out of bounds" );
- if (rta->dest != RTD_UNICAST)
+
+ struct eattr *nh_ea = ea_find(*fs->eattrs, &ea_gen_nexthop);
+ if (!nh_ea)
+ runtime( "No nexthop to set weight on" );
+
+ struct nexthop_adata *nhad = (struct nexthop_adata *) nh_ea->u.ptr;
+ if (!NEXTHOP_IS_REACHABLE(nhad))
runtime( "Setting weight needs regular nexthop " );
+ struct nexthop_adata *nhax = (struct nexthop_adata *) tmp_copy_adata(&nhad->ad);
+
/* Set weight on all next hops */
- for (struct nexthop *nh = &rta->nh; nh; nh = nh->next)
+ NEXTHOP_WALK(nh, nhax)
nh->weight = i - 1;
- }
- break;
- case SA_PREF:
- rta->pref = v1.val.i;
+ a = ea_set_attr(fs->eattrs,
+ EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, &nhax->ad));
+ }
break;
default:
- bug("Invalid static attribute access (%u/%u)", sa.f_type, sa.sa_code);
+ bug("Invalid static attribute access (%u/%u)", sa.type, sa.sa_code);
}
+
+ if (!a)
+ a = ea_set_attr(fs->eattrs,
+ EA_LITERAL_DIRECT_ADATA(&ea_gen_nexthop, 0, tmp_copy_adata(&nha.ad)));
+
+ a->originated = 1;
+ a->fresh = 1;
}
}
@@ -652,74 +858,30 @@
DYNAMIC_ATTR;
ACCESS_RTE;
ACCESS_EATTRS;
- RESULT_TYPE(da.f_type);
+ RESULT_TYPE(da->type);
{
- eattr *e = ea_find(*fs->eattrs, da.ea_code);
-
- if (!e) {
- /* A special case: undefined as_path looks like empty as_path */
- if (da.type == EAF_TYPE_AS_PATH) {
- RESULT_(T_PATH, ad, &null_adata);
- break;
- }
+ const struct f_val *empty;
+ const eattr *e = ea_find(*fs->eattrs, da->id);
- /* The same special case for int_set */
- if (da.type == EAF_TYPE_INT_SET) {
- RESULT_(T_CLIST, ad, &null_adata);
- break;
- }
-
- /* The same special case for ec_set */
- if (da.type == EAF_TYPE_EC_SET) {
- RESULT_(T_ECLIST, ad, &null_adata);
- break;
- }
+ if (e)
+ {
+ ASSERT_DIE(e->type == da->type);
- /* The same special case for lc_set */
- if (da.type == EAF_TYPE_LC_SET) {
- RESULT_(T_LCLIST, ad, &null_adata);
- break;
+ switch (e->type) {
+ case T_IP:
+ RESULT_(T_IP, ip, *((const ip_addr *) e->u.ptr->data));
+ break;
+ default:
+ RESULT_VAL([[(struct f_val) {
+ .type = e->type,
+ .val.bval = e->u,
+ }]]);
}
-
- /* Undefined value */
- RESULT_VOID;
- break;
}
-
- switch (e->type & EAF_TYPE_MASK) {
- case EAF_TYPE_INT:
- RESULT_(da.f_type, i, e->u.data);
- break;
- case EAF_TYPE_ROUTER_ID:
- RESULT_(T_QUAD, i, e->u.data);
- break;
- case EAF_TYPE_OPAQUE:
- RESULT_(T_ENUM_EMPTY, i, 0);
- break;
- case EAF_TYPE_IP_ADDRESS:
- RESULT_(T_IP, ip, *((ip_addr *) e->u.ptr->data));
- break;
- case EAF_TYPE_AS_PATH:
- RESULT_(T_PATH, ad, e->u.ptr);
- break;
- case EAF_TYPE_BITFIELD:
- RESULT_(T_BOOL, i, !!(e->u.data & (1u << da.bit)));
- break;
- case EAF_TYPE_INT_SET:
- RESULT_(T_CLIST, ad, e->u.ptr);
- break;
- case EAF_TYPE_EC_SET:
- RESULT_(T_ECLIST, ad, e->u.ptr);
- break;
- case EAF_TYPE_LC_SET:
- RESULT_(T_LCLIST, ad, e->u.ptr);
- break;
- case EAF_TYPE_UNDEF:
+ else if (empty = f_get_empty(da->type))
+ RESULT_VAL(*empty);
+ else
RESULT_VOID;
- break;
- default:
- bug("Unknown dynamic attribute type");
- }
}
}
@@ -728,62 +890,32 @@
ACCESS_EATTRS;
ARG_ANY(1);
DYNAMIC_ATTR;
- ARG_TYPE(1, da.f_type);
+ ARG_TYPE(1, da->type);
{
- struct ea_list *l = lp_alloc(fs->pool, sizeof(struct ea_list) + sizeof(eattr));
-
- l->next = NULL;
- l->flags = EALF_SORTED;
- l->count = 1;
- l->attrs[0].id = da.ea_code;
- l->attrs[0].flags = 0;
- l->attrs[0].type = da.type | EAF_ORIGINATED | EAF_FRESH;
-
- switch (da.type) {
- case EAF_TYPE_INT:
- case EAF_TYPE_ROUTER_ID:
- l->attrs[0].u.data = v1.val.i;
- break;
+ struct eattr *a;
- case EAF_TYPE_OPAQUE:
- runtime( "Setting opaque attribute is not allowed" );
- break;
+ if (da->type >= EAF_TYPE__MAX)
+ bug("Unsupported attribute type");
- case EAF_TYPE_IP_ADDRESS:;
- int len = sizeof(ip_addr);
- struct adata *ad = lp_alloc(fs->pool, sizeof(struct adata) + len);
- ad->length = len;
- (* (ip_addr *) ad->data) = v1.val.ip;
- l->attrs[0].u.ptr = ad;
+ switch (da->type) {
+ case T_OPAQUE:
+ case T_IFACE:
+ runtime( "Setting opaque attribute is not allowed" );
break;
- case EAF_TYPE_AS_PATH:
- case EAF_TYPE_INT_SET:
- case EAF_TYPE_EC_SET:
- case EAF_TYPE_LC_SET:
- l->attrs[0].u.ptr = v1.val.ad;
- break;
-
- case EAF_TYPE_BITFIELD:
- {
- /* First, we have to find the old value */
- eattr *e = ea_find(*fs->eattrs, da.ea_code);
- u32 data = e ? e->u.data : 0;
-
- if (v1.val.i)
- l->attrs[0].u.data = data | (1u << da.bit);
- else
- l->attrs[0].u.data = data & ~(1u << da.bit);
- }
+ case T_IP:
+ a = ea_set_attr(fs->eattrs,
+ EA_LITERAL_STORE_ADATA(da, 0, &v1.val.ip, sizeof(ip_addr)));
break;
default:
- bug("Unknown dynamic attribute type");
+ a = ea_set_attr(fs->eattrs,
+ EA_LITERAL_GENERIC(da->id, da->type, 0, .u = v1.val.bval));
+ break;
}
- f_rta_cow(fs);
- l->next = *fs->eattrs;
- *fs->eattrs = l;
+ a->originated = 1;
+ a->fresh = 1;
}
}
@@ -792,21 +924,20 @@
ACCESS_RTE;
ACCESS_EATTRS;
- {
- struct ea_list *l = lp_alloc(fs->pool, sizeof(struct ea_list) + sizeof(eattr));
-
- l->next = NULL;
- l->flags = EALF_SORTED;
- l->count = 1;
- l->attrs[0].id = da.ea_code;
- l->attrs[0].flags = 0;
- l->attrs[0].type = EAF_TYPE_UNDEF | EAF_ORIGINATED | EAF_FRESH;
- l->attrs[0].u.data = 0;
-
- f_rta_cow(fs);
- l->next = *fs->eattrs;
- *fs->eattrs = l;
- }
+ ea_unset_attr(fs->eattrs, 1, da);
+ }
+
+ INST(FI_DEFAULT, 2, 1) {
+ ARG_ANY(1);
+ ARG_ANY(2);
+ RESULT_TYPE(f_type_element_type(v2.type));
+
+ log(L_INFO "Type of arg 1 is: %d", v1.type);
+
+ if (v1.type == T_VOID)
+ RESULT_VAL(v2);
+ else
+ RESULT_VAL(v1);
}
INST(FI_LENGTH, 1, 1) { /* Get length of */
@@ -901,14 +1032,31 @@
((net_addr_roa6 *) v1.val.net)->max_pxlen);
}
- INST(FI_ROA_ASN, 1, 1) { /* Get ROA ASN */
- ARG(1, T_NET);
- if (!net_is_roa(v1.val.net))
- runtime( "ROA expected" );
+ INST(FI_ASN, 1, 1) { /* Get ROA ASN or community ASN part */
+ ARG_ANY(1);
+ RESULT_TYPE(T_INT);
+ switch(v1.type)
+ {
+ case T_NET:
+ if (!net_is_roa(v1.val.net))
+ runtime( "ROA expected" );
- RESULT(T_INT, i, (v1.val.net->type == NET_ROA4) ?
- ((net_addr_roa4 *) v1.val.net)->asn :
- ((net_addr_roa6 *) v1.val.net)->asn);
+ RESULT_(T_INT, i, (v1.val.net->type == NET_ROA4) ?
+ ((net_addr_roa4 *) v1.val.net)->asn :
+ ((net_addr_roa6 *) v1.val.net)->asn);
+ break;
+
+ case T_PAIR:
+ RESULT_(T_INT, i, v1.val.i >> 16);
+ break;
+
+ case T_LC:
+ RESULT_(T_INT, i, v1.val.lc.asn);
+ break;
+
+ default:
+ runtime( "Net, pair or lc expected" );
+ }
}
INST(FI_IP, 1, 1) { /* Convert prefix to ... */
@@ -942,7 +1090,90 @@
RESULT(T_INT, i, as_path_get_last_nonaggregated(v1.val.ad));
}
- INST(FI_RETURN, 1, 1) {
+ INST(FI_PAIR_DATA, 1, 1) { /* Get data part from the standard community */
+ ARG(1, T_PAIR);
+ RESULT(T_INT, i, v1.val.i & 0xFFFF);
+ }
+
+ INST(FI_LC_DATA1, 1, 1) { /* Get data1 part from the large community */
+ ARG(1, T_LC);
+ RESULT(T_INT, i, v1.val.lc.ldp1);
+ }
+
+ INST(FI_LC_DATA2, 1, 1) { /* Get data2 part from the large community */
+ ARG(1, T_LC);
+ RESULT(T_INT, i, v1.val.lc.ldp2);
+ }
+
+ INST(FI_MIN, 1, 1) { /* Get minimum element from list */
+ ARG_ANY(1);
+ RESULT_TYPE(f_type_element_type(v1.type));
+ switch(v1.type)
+ {
+ case T_CLIST:
+ {
+ u32 val = 0;
+ int_set_min(v1.val.ad, &val);
+ RESULT_(T_PAIR, i, val);
+ }
+ break;
+
+ case T_ECLIST:
+ {
+ u64 val = 0;
+ ec_set_min(v1.val.ad, &val);
+ RESULT_(T_EC, ec, val);
+ }
+ break;
+
+ case T_LCLIST:
+ {
+ lcomm val = { 0, 0, 0 };
+ lc_set_min(v1.val.ad, &val);
+ RESULT_(T_LC, lc, val);
+ }
+ break;
+
+ default:
+ runtime( "Clist or lclist expected" );
+ }
+ }
+
+ INST(FI_MAX, 1, 1) { /* Get maximum element from list */
+ ARG_ANY(1);
+ RESULT_TYPE(f_type_element_type(v1.type));
+ switch(v1.type)
+ {
+ case T_CLIST:
+ {
+ u32 val = 0;
+ int_set_max(v1.val.ad, &val);
+ RESULT_(T_PAIR, i, val);
+ }
+ break;
+
+ case T_ECLIST:
+ {
+ u64 val = 0;
+ ec_set_max(v1.val.ad, &val);
+ RESULT_(T_EC, ec, val);
+ }
+ break;
+
+ case T_LCLIST:
+ {
+ lcomm val = { 0, 0, 0 };
+ lc_set_max(v1.val.ad, &val);
+ RESULT_(T_LC, lc, val);
+ }
+ break;
+
+ default:
+ runtime( "Clist or lclist expected" );
+ }
+ }
+
+ INST(FI_RETURN, 1, 0) {
NEVER_CONSTANT;
/* Acquire the return value */
ARG_ANY(1);
@@ -970,28 +1201,59 @@
INST(FI_CALL, 0, 1) {
NEVER_CONSTANT;
+ VARARG;
SYMBOL;
+ /* Fake result type declaration */
+ RESULT_TYPE(T_VOID);
+
+ FID_NEW_BODY()
+ ASSERT(sym->class == SYM_FUNCTION);
+
+ if (whati->varcount != sym->function->args)
+ cf_error("Function '%s' expects %u arguments, got %u arguments",
+ sym->name, sym->function->args, whati->varcount);
+
+ /* Typecheck individual arguments */
+ struct f_inst *a = fvar;
+ struct f_arg *b = sym->function->arg_list;
+ for (uint i = 1; a && b; a = a->next, b = b->next, i++)
+ {
+ enum btype b_type = b->arg->class & 0xff;
+
+ if (a->type && (a->type != b_type) && !f_const_promotion(a, b_type))
+ cf_error("Argument %u of '%s' must be %s, got %s",
+ i, sym->name, f_type_name(b_type), f_type_name(a->type));
+ }
+ ASSERT(!a && !b);
+
+ /* Add implicit void slot for the return value */
+ struct f_inst *tmp = f_new_inst(FI_CONSTANT, (struct f_val) { .type = T_VOID });
+ tmp->next = whati->fvar;
+ whati->fvar = tmp;
+ what->size += tmp->size;
+
+ /* Mark recursive calls, they have dummy f_line */
+ if (!sym->function->len)
+ what->flags |= FIF_RECURSIVE;
+
FID_SAME_BODY()
- if (!(f1->sym->flags & SYM_FLAG_SAME))
- return 0;
+ if (!(f1->sym->flags & SYM_FLAG_SAME) && !(f1_->flags & FIF_RECURSIVE))
+ return 0;
FID_ITERATE_BODY()
+ if (!(what->flags & FIF_RECURSIVE))
BUFFER_PUSH(fit->lines) = whati->sym->function;
FID_INTERPRET_BODY()
/* Push the body on stack */
LINEX(sym->function);
+ curline.vbase = curline.ventry;
curline.emask |= FE_RETURN;
- /* Before this instruction was called, there was the T_VOID
- * automatic return value pushed on value stack and also
- * sym->function->args function arguments. Setting the
- * vbase to point to first argument. */
- ASSERT(curline.ventry >= sym->function->args);
- curline.ventry -= sym->function->args;
- curline.vbase = curline.ventry;
+ /* Arguments on stack */
+ fstk->vcnt += sym->function->args;
/* Storage for local variables */
f_vcnt_check_overflow(sym->function->vars);
@@ -1105,17 +1367,10 @@
if (v1.type == T_PATH)
{
- const struct f_tree *set = NULL;
- u32 key = 0;
-
- if (v2.type == T_INT)
- key = v2.val.i;
- else if ((v2.type == T_SET) && (v2.val.t->from.type == T_INT))
- set = v2.val.t;
+ if ((v2.type == T_SET) && path_set_type(v2.val.t) || (v2.type == T_INT))
+ RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, &v2, 0) ]]);
else
runtime("Can't delete non-integer (set)");
-
- RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, set, key, 0) ]]);
}
else if (v1.type == T_CLIST)
@@ -1167,10 +1422,8 @@
if (v1.type == T_PATH)
{
- u32 key = 0;
-
- if ((v2.type == T_SET) && (v2.val.t->from.type == T_INT))
- RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, v2.val.t, key, 1) ]]);
+ if ((v2.type == T_SET) && path_set_type(v2.val.t))
+ RESULT_(T_PATH, ad, [[ as_path_filter(fpool, v1.val.ad, &v2, 1) ]]);
else
runtime("Can't filter integer");
}
@@ -1208,37 +1461,7 @@
runtime("Can't filter non-[e|l]clist");
}
- INST(FI_ROA_CHECK_IMPLICIT, 0, 1) { /* ROA Check */
- NEVER_CONSTANT;
- RTC(1);
- struct rtable *table = rtc->table;
- ACCESS_RTE;
- ACCESS_EATTRS;
- const net_addr *net = fs->rte->net;
-
- /* We ignore temporary attributes, probably not a problem here */
- /* 0x02 is a value of BA_AS_PATH, we don't want to include BGP headers */
- eattr *e = ea_find(*fs->eattrs, EA_CODE(PROTOCOL_BGP, 0x02));
-
- if (!e || ((e->type & EAF_TYPE_MASK) != EAF_TYPE_AS_PATH))
- runtime("Missing AS_PATH attribute");
-
- u32 as = 0;
- as_path_get_last(e->u.ptr, &as);
-
- if (!table)
- runtime("Missing ROA table");
-
- if (table->addr_type != NET_ROA4 && table->addr_type != NET_ROA6)
- runtime("Table type must be either ROA4 or ROA6");
-
- if (table->addr_type != (net->type == NET_IP4 ? NET_ROA4 : NET_ROA6))
- RESULT(T_ENUM_ROA, i, ROA_UNKNOWN); /* Prefix and table type mismatch */
- else
- RESULT(T_ENUM_ROA, i, [[ net_roa_check(table, net, as) ]]);
- }
-
- INST(FI_ROA_CHECK_EXPLICIT, 2, 1) { /* ROA Check */
+ INST(FI_ROA_CHECK, 2, 1) { /* ROA Check */
NEVER_CONSTANT;
ARG(1, T_NET);
ARG(2, T_INT);
@@ -1260,7 +1483,7 @@
}
- INST(FI_FORMAT, 1, 0) { /* Format */
+ INST(FI_FORMAT, 1, 1) { /* Format */
ARG_ANY(1);
RESULT(T_STRING, s, val_format_str(fpool, &v1));
}
diff --git a/filter/f-inst.h b/filter/f-inst.h
index df45f88e..fbc59de7 100644
--- a/filter/f-inst.h
+++ b/filter/f-inst.h
@@ -22,7 +22,7 @@
/* Flags for instructions */
enum f_instruction_flags {
- FIF_PRINTED = 1, /* FI_PRINT_AND_DIE: message put in buffer */
+ FIF_RECURSIVE = 1, /* FI_CALL: function is directly recursive */
} PACKED;
/* Include generated filter instruction declarations */
@@ -35,19 +35,26 @@ const char *f_instruction_name_(enum f_instruction_code fi);
static inline const char *f_instruction_name(enum f_instruction_code fi)
{ return f_instruction_name_(fi) + 3; }
+struct f_arg {
+ struct symbol *arg;
+ struct f_arg *next;
+};
+
/* Filter structures for execution */
/* Line of instructions to be unconditionally executed one after another */
struct f_line {
uint len; /* Line length */
u8 args; /* Function: Args required */
u8 vars;
+ u8 results; /* Results left on stack: cmd -> 0, term -> 1 */
+ struct f_arg *arg_list;
struct f_line_item items[0]; /* The items themselves */
};
/* Convert the f_inst infix tree to the f_line structures */
-struct f_line *f_linearize_concat(const struct f_inst * const inst[], uint count);
-static inline struct f_line *f_linearize(const struct f_inst *root)
-{ return f_linearize_concat(&root, 1); }
+struct f_line *f_linearize_concat(const struct f_inst * const inst[], uint count, uint results);
+static inline struct f_line *f_linearize(const struct f_inst *root, uint results)
+{ return f_linearize_concat(&root, 1, results); }
void f_dump_line(const struct f_line *, uint indent);
@@ -87,15 +94,17 @@ void f_add_lines(const struct f_line_item *what, struct filter_iterator *fit);
struct filter *f_new_where(struct f_inst *);
-static inline struct f_dynamic_attr f_new_dynamic_attr(u8 type, enum f_type f_type, uint code) /* Type as core knows it, type as filters know it, and code of dynamic attribute */
-{ return (struct f_dynamic_attr) { .type = type, .f_type = f_type, .ea_code = code }; } /* f_type currently unused; will be handy for static type checking */
-static inline struct f_dynamic_attr f_new_dynamic_attr_bit(u8 bit, enum f_type f_type, uint code) /* Type as core knows it, type as filters know it, and code of dynamic attribute */
-{ return (struct f_dynamic_attr) { .type = EAF_TYPE_BITFIELD, .bit = bit, .f_type = f_type, .ea_code = code }; } /* f_type currently unused; will be handy for static type checking */
-static inline struct f_static_attr f_new_static_attr(int f_type, int code, int readonly)
-{ return (struct f_static_attr) { .f_type = f_type, .sa_code = code, .readonly = readonly }; }
-struct f_inst *f_generate_complex(enum f_instruction_code fi_code, struct f_dynamic_attr da, struct f_inst *argument);
+static inline struct f_static_attr f_new_static_attr(btype type, int code, int readonly)
+{ return (struct f_static_attr) { .type = type, .sa_code = code, .readonly = readonly }; }
struct f_inst *f_generate_roa_check(struct rtable_config *table, struct f_inst *prefix, struct f_inst *asn);
+struct f_attr_bit {
+ const struct ea_class *class;
+ uint bit;
+};
+
+#define f_new_dynamic_attr_bit(_bit, _name) ((struct f_attr_bit) { .bit = _bit, .class = ea_class_find(_name) })
+
/* Hook for call bt_assert() function in configuration */
extern void (*bt_assert_hook)(int result, const struct f_line_item *assert);
diff --git a/filter/f-util.c b/filter/f-util.c
index 410999a6..82a06bdd 100644
--- a/filter/f-util.c
+++ b/filter/f-util.c
@@ -2,7 +2,7 @@
* Filters: utility functions
*
* Copyright 1998 Pavel Machek <pavel@ucw.cz>
- * 2017 Jan Maria Matejka <mq@ucw.cz>
+ * 2017 Maria Matejka <mq@ucw.cz>
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@@ -13,7 +13,7 @@
#include "filter/f-inst.h"
#include "lib/idm.h"
#include "nest/protocol.h"
-#include "nest/route.h"
+#include "nest/rt.h"
#define P(a,b) ((a<<8) | b)
@@ -37,147 +37,6 @@ struct filter *f_new_where(struct f_inst *where)
f_new_inst(FI_DIE, F_REJECT));
struct filter *f = cfg_allocz(sizeof(struct filter));
- f->root = f_linearize(cond);
+ f->root = f_linearize(cond, 0);
return f;
}
-
-#define CA_KEY(n) n->name, n->fda.type
-#define CA_NEXT(n) n->next
-#define CA_EQ(na,ta,nb,tb) (!strcmp(na,nb) && (ta == tb))
-#define CA_FN(n,t) (mem_hash(n, strlen(n)) ^ (t*0xaae99453U))
-#define CA_ORDER 8 /* Fixed */
-
-struct ca_storage {
- struct ca_storage *next;
- struct f_dynamic_attr fda;
- u32 uc;
- char name[0];
-};
-
-HASH(struct ca_storage) ca_hash;
-
-static struct idm ca_idm;
-static struct ca_storage **ca_storage;
-static uint ca_storage_max;
-
-static void
-ca_free(resource *r)
-{
- struct custom_attribute *ca = (void *) r;
- struct ca_storage *cas = HASH_FIND(ca_hash, CA, ca->name, ca->fda->type);
- ASSERT(cas);
-
- ca->name = NULL;
- ca->fda = NULL;
- if (!--cas->uc) {
- uint id = EA_CUSTOM_ID(cas->fda.ea_code);
- idm_free(&ca_idm, id);
- HASH_REMOVE(ca_hash, CA, cas);
- ca_storage[id] = NULL;
- mb_free(cas);
- }
-}
-
-static void
-ca_dump(resource *r)
-{
- struct custom_attribute *ca = (void *) r;
- debug("name \"%s\" id 0x%04x ea_type 0x%02x f_type 0x%02x\n",
- ca->name, ca->fda->ea_code, ca->fda->type, ca->fda->f_type);
-}
-
-static struct resclass ca_class = {
- .name = "Custom attribute",
- .size = sizeof(struct custom_attribute),
- .free = ca_free,
- .dump = ca_dump,
- .lookup = NULL,
- .memsize = NULL,
-};
-
-struct custom_attribute *
-ca_lookup(pool *p, const char *name, int f_type)
-{
- int ea_type;
-
- switch (f_type) {
- case T_INT:
- ea_type = EAF_TYPE_INT;
- break;
- case T_IP:
- ea_type = EAF_TYPE_IP_ADDRESS;
- break;
- case T_QUAD:
- ea_type = EAF_TYPE_ROUTER_ID;
- break;
- case T_PATH:
- ea_type = EAF_TYPE_AS_PATH;
- break;
- case T_CLIST:
- ea_type = EAF_TYPE_INT_SET;
- break;
- case T_ECLIST:
- ea_type = EAF_TYPE_EC_SET;
- break;
- case T_LCLIST:
- ea_type = EAF_TYPE_LC_SET;
- break;
- default:
- cf_error("Custom route attribute of unsupported type");
- }
-
- static int inited = 0;
- if (!inited) {
- idm_init(&ca_idm, &root_pool, 8);
- HASH_INIT(ca_hash, &root_pool, CA_ORDER);
-
- ca_storage_max = 256;
- ca_storage = mb_allocz(&root_pool, sizeof(struct ca_storage *) * ca_storage_max);
-
- inited++;
- }
-
- struct ca_storage *cas = HASH_FIND(ca_hash, CA, name, ea_type);
- if (cas) {
- cas->uc++;
- } else {
-
- uint id = idm_alloc(&ca_idm);
-
- if (id >= EA_CUSTOM_BIT)
- cf_error("Too many custom attributes.");
-
- if (id >= ca_storage_max) {
- ca_storage_max *= 2;
- ca_storage = mb_realloc(ca_storage, sizeof(struct ca_storage *) * ca_storage_max * 2);
- }
-
- cas = mb_allocz(&root_pool, sizeof(struct ca_storage) + strlen(name) + 1);
- cas->fda = f_new_dynamic_attr(ea_type, f_type, EA_CUSTOM(id));
- cas->uc = 1;
-
- strcpy(cas->name, name);
- ca_storage[id] = cas;
-
- HASH_INSERT(ca_hash, CA, cas);
- }
-
- struct custom_attribute *ca = ralloc(p, &ca_class);
- ca->fda = &(cas->fda);
- ca->name = cas->name;
- return ca;
-}
-
-const char *
-ea_custom_name(uint ea)
-{
- uint id = EA_CUSTOM_ID(ea);
- if (id >= ca_storage_max)
- return NULL;
-
- if (!ca_storage[id])
- return NULL;
-
- return ca_storage[id]->name;
-}
-
diff --git a/filter/filter.c b/filter/filter.c
index 625b3ade..9a94545c 100644
--- a/filter/filter.c
+++ b/filter/filter.c
@@ -35,10 +35,10 @@
#include "lib/ip.h"
#include "lib/net.h"
#include "lib/flowspec.h"
-#include "nest/route.h"
+#include "nest/rt.h"
#include "nest/protocol.h"
#include "nest/iface.h"
-#include "nest/attrs.h"
+#include "lib/attrs.h"
#include "conf/conf.h"
#include "filter/filter.h"
#include "filter/f-inst.h"
@@ -79,9 +79,6 @@ struct filter_state {
/* Cached pointer to ea_list */
struct ea_list **eattrs;
- /* Linpool for adata allocation */
- struct linpool *pool;
-
/* Buffer for log output */
struct buffer buf;
@@ -99,28 +96,7 @@ void (*bt_assert_hook)(int result, const struct f_line_item *assert);
static inline void f_cache_eattrs(struct filter_state *fs)
{
- fs->eattrs = &(fs->rte->attrs->eattrs);
-}
-
-/*
- * rta_cow - prepare rta for modification by filter
- */
-static void
-f_rta_cow(struct filter_state *fs)
-{
- if (!rta_is_cached(fs->rte->attrs))
- return;
-
- /*
- * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
- * with fs->old_rta (they will be copied when the cached rta will be obtained
- * at the end of f_run()), also the lock of hostentry is inherited (we
- * suppose hostentry is not changed by filters).
- */
- fs->rte->attrs = rta_do_cow(fs->rte->attrs, fs->pool);
-
- /* Re-cache the ea_list */
- f_cache_eattrs(fs);
+ fs->eattrs = &(fs->rte->attrs);
}
static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
@@ -185,8 +161,8 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
return F_ERROR; \
} while(0)
-#define falloc(size) lp_alloc(fs->pool, size)
-#define fpool fs->pool
+#define falloc(size) tmp_alloc(size)
+#define fpool tmp_linpool
#define ACCESS_EATTRS do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
@@ -203,8 +179,7 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
}
/* End of current line. Drop local variables before exiting. */
- fstk->vcnt -= curline.line->vars;
- fstk->vcnt -= curline.line->args;
+ fstk->vcnt = curline.ventry + curline.line->results;
fstk->ecnt--;
}
@@ -237,7 +212,7 @@ interpret(struct filter_state *fs, const struct f_line *line, struct f_val *val)
* tmp_pool, otherwise the filters may modify it.
*/
enum filter_return
-f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags)
+f_run(const struct filter *filter, struct rte *rte, int flags)
{
if (filter == FILTER_ACCEPT)
return F_ACCEPT;
@@ -250,7 +225,6 @@ f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, in
/* Initialize the filter state */
filter_state = (struct filter_state) {
.rte = rte,
- .pool = tmp_pool,
.flags = flags,
};
@@ -285,11 +259,10 @@ f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, in
*/
enum filter_return
-f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool)
+f_eval_rte(const struct f_line *expr, struct rte *rte)
{
filter_state = (struct filter_state) {
.rte = rte,
- .pool = tmp_pool,
};
f_stack_init(filter_state);
@@ -308,11 +281,9 @@ f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool)
* @pres: here the output will be stored
*/
enum filter_return
-f_eval(const struct f_line *expr, struct linpool *tmp_pool, struct f_val *pres)
+f_eval(const struct f_line *expr, struct f_val *pres)
{
- filter_state = (struct filter_state) {
- .pool = tmp_pool,
- };
+ filter_state = (struct filter_state) {};
f_stack_init(filter_state);
@@ -331,9 +302,7 @@ uint
f_eval_int(const struct f_line *expr)
{
/* Called independently in parse-time to eval expressions */
- filter_state = (struct filter_state) {
- .pool = cfg_mem,
- };
+ filter_state = (struct filter_state) {};
f_stack_init(filter_state);
@@ -354,10 +323,10 @@ f_eval_int(const struct f_line *expr)
* f_eval_buf - get a value of a term and print it to the supplied buffer
*/
enum filter_return
-f_eval_buf(const struct f_line *expr, struct linpool *tmp_pool, buffer *buf)
+f_eval_buf(const struct f_line *expr, buffer *buf)
{
struct f_val val;
- enum filter_return fret = f_eval(expr, tmp_pool, &val);
+ enum filter_return fret = f_eval(expr, &val);
if (fret <= F_RETURN)
val_format(&val, buf);
return fret;
diff --git a/filter/filter.h b/filter/filter.h
index 9964831c..3f2e62eb 100644
--- a/filter/filter.h
+++ b/filter/filter.h
@@ -13,8 +13,8 @@
#include "lib/resource.h"
#include "lib/ip.h"
#include "lib/macro.h"
-#include "nest/route.h"
-#include "nest/attrs.h"
+#include "nest/rt.h"
+#include "lib/attrs.h"
/* Possible return values of filter execution */
enum filter_return {
@@ -51,10 +51,10 @@ struct filter {
struct rte;
-enum filter_return f_run(const struct filter *filter, struct rte *rte, struct linpool *tmp_pool, int flags);
-enum filter_return f_eval_rte(const struct f_line *expr, struct rte *rte, struct linpool *tmp_pool);
+enum filter_return f_run(const struct filter *filter, struct rte *rte, int flags);
+enum filter_return f_eval_rte(const struct f_line *expr, struct rte *rte);
uint f_eval_int(const struct f_line *expr);
-enum filter_return f_eval_buf(const struct f_line *expr, struct linpool *tmp_pool, buffer *buf);
+enum filter_return f_eval_buf(const struct f_line *expr, buffer *buf);
const char *filter_name(const struct filter *filter);
int filter_same(const struct filter *new, const struct filter *old);
@@ -70,13 +70,4 @@ void filters_dump_all(void);
#define FF_SILENT 2 /* Silent filter execution */
-/* Custom route attributes */
-struct custom_attribute {
- resource r;
- struct f_dynamic_attr *fda;
- const char *name;
-};
-
-struct custom_attribute *ca_lookup(pool *p, const char *name, int ea_type);
-
#endif
diff --git a/filter/filter_test.c b/filter/filter_test.c
index 2a0b5431..5b24a765 100644
--- a/filter/filter_test.c
+++ b/filter/filter_test.c
@@ -46,9 +46,7 @@ run_function(const void *arg)
if (t->cmp)
return t->result == f_same(t->fn, t->cmp);
- linpool *tmp = lp_new_default(&root_pool);
- enum filter_return fret = f_eval(t->fn, tmp, NULL);
- rfree(tmp);
+ enum filter_return fret = f_eval(t->fn, NULL);
return (fret < F_REJECT);
}
diff --git a/filter/test.conf b/filter/test.conf
index 93e7a770..22f5dea3 100644
--- a/filter/test.conf
+++ b/filter/test.conf
@@ -9,7 +9,109 @@ router id 62.168.0.1;
/* We have to setup any protocol */
protocol device { }
-
+/* Setting some custom attributes, enough to force BIRD to reallocate the attribute idmap */
+attribute int test_ca_int1;
+attribute int test_ca_int2;
+attribute int test_ca_int3;
+attribute int test_ca_int4;
+attribute int test_ca_int5;
+attribute int test_ca_int6;
+attribute int test_ca_int7;
+attribute int test_ca_int8;
+attribute int test_ca_int9;
+attribute int test_ca_int10;
+
+attribute ip test_ca_ip1;
+attribute ip test_ca_ip2;
+attribute ip test_ca_ip3;
+attribute ip test_ca_ip4;
+attribute ip test_ca_ip5;
+attribute ip test_ca_ip6;
+attribute ip test_ca_ip7;
+attribute ip test_ca_ip8;
+attribute ip test_ca_ip9;
+attribute ip test_ca_ip10;
+
+attribute quad test_ca_quad1;
+attribute quad test_ca_quad2;
+attribute quad test_ca_quad3;
+attribute quad test_ca_quad4;
+attribute quad test_ca_quad5;
+attribute quad test_ca_quad6;
+attribute quad test_ca_quad7;
+attribute quad test_ca_quad8;
+attribute quad test_ca_quad9;
+attribute quad test_ca_quad10;
+
+attribute bgppath test_ca_bgppath1;
+attribute bgppath test_ca_bgppath2;
+attribute bgppath test_ca_bgppath3;
+attribute bgppath test_ca_bgppath4;
+attribute bgppath test_ca_bgppath5;
+attribute bgppath test_ca_bgppath6;
+attribute bgppath test_ca_bgppath7;
+attribute bgppath test_ca_bgppath8;
+attribute bgppath test_ca_bgppath9;
+attribute bgppath test_ca_bgppath10;
+
+attribute clist test_ca_clist1;
+attribute clist test_ca_clist2;
+attribute clist test_ca_clist3;
+attribute clist test_ca_clist4;
+attribute clist test_ca_clist5;
+attribute clist test_ca_clist6;
+attribute clist test_ca_clist7;
+attribute clist test_ca_clist8;
+attribute clist test_ca_clist9;
+attribute clist test_ca_clist10;
+
+attribute eclist test_ca_eclist1;
+attribute eclist test_ca_eclist2;
+attribute eclist test_ca_eclist3;
+attribute eclist test_ca_eclist4;
+attribute eclist test_ca_eclist5;
+attribute eclist test_ca_eclist6;
+attribute eclist test_ca_eclist7;
+attribute eclist test_ca_eclist8;
+attribute eclist test_ca_eclist9;
+attribute eclist test_ca_eclist10;
+
+attribute lclist test_ca_lclist1;
+attribute lclist test_ca_lclist2;
+attribute lclist test_ca_lclist3;
+attribute lclist test_ca_lclist4;
+attribute lclist test_ca_lclist5;
+attribute lclist test_ca_lclist6;
+attribute lclist test_ca_lclist7;
+attribute lclist test_ca_lclist8;
+attribute lclist test_ca_lclist9;
+attribute lclist test_ca_lclist10;
+
+attribute lclist test_ca_lclist_max1;
+attribute lclist test_ca_lclist_max2;
+attribute lclist test_ca_lclist_max3;
+attribute lclist test_ca_lclist_max4;
+attribute lclist test_ca_lclist_max5;
+attribute lclist test_ca_lclist_max6;
+attribute lclist test_ca_lclist_max7;
+attribute lclist test_ca_lclist_max8;
+attribute lclist test_ca_lclist_max9;
+attribute lclist test_ca_lclist_max10;
+attribute lclist test_ca_lclist_max11;
+attribute lclist test_ca_lclist_max12;
+attribute lclist test_ca_lclist_max13;
+attribute lclist test_ca_lclist_max14;
+attribute lclist test_ca_lclist_max15;
+attribute lclist test_ca_lclist_max16;
+attribute lclist test_ca_lclist_max17;
+attribute lclist test_ca_lclist_max18;
+attribute lclist test_ca_lclist_max19;
+attribute lclist test_ca_lclist_max20;
+attribute lclist test_ca_lclist_max21;
+
+
+/* Uncomment this to get an error */
+#attribute int bgp_path;
/*
* Common definitions and functions
@@ -44,9 +146,8 @@ bt_test_same(onef, twof, 0);
*/
function t_bool()
-bool b;
{
- b = true;
+ bool b = true;
bt_assert(b);
bt_assert(!!b);
@@ -82,12 +183,11 @@ define xyzzy = (120+10);
define '1a-a1' = (xyzzy-100);
function t_int()
-int i;
{
bt_assert(xyzzy = 130);
bt_assert('1a-a1' = 30);
- i = four;
+ int i = four;
i = 12*100 + 60/2 + i;
i = (i + 0);
bt_assert(i = 1234);
@@ -111,6 +211,14 @@ int i;
bt_assert(!(i = 4));
bt_assert(1 <= 1);
bt_assert(!(1234 < 1234));
+
+ bt_assert(10 - 5 = 5);
+ bt_assert(4294967295 + 1 = 0);
+ bt_assert(6*9=54);
+ bt_assert(984/41 = 24);
+ bt_assert(123/45 = 2);
+ bt_assert(0xfee1a | 0xbeef = 0xffeff);
+ bt_assert(0xfee1a & 0xbeef = 0xae0a);
}
bt_test_suite(t_int, "Testing integers");
@@ -128,14 +236,19 @@ define is2 = [(17+2), 17, 15, 11, 8, 5, 3, 2];
define is3 = [5, 17, 2, 11, 8, 15, 3, 19];
function t_int_set()
-int set is;
{
+ int set is = [];
+ bt_assert(is = []);
+ bt_assert(0 !~ is);
+
bt_assert(1 ~ [1,2,3]);
bt_assert(5 ~ [1..20]);
bt_assert(2 ~ [ 1, 2, 3 ]);
bt_assert(5 ~ [ 4 .. 7 ]);
bt_assert(1 !~ [ 2, 3, 4 ]);
bt_assert(999 !~ [ 666, 333 ]);
+ bt_assert(1 !~ []);
+ bt_assert(1 !~ is);
is = [ 2, 3, 4, 7..11 ];
bt_assert(10 ~ is);
@@ -170,6 +283,7 @@ int set is;
bt_assert([1,4..10,20] = [1,4..10,20]);
bt_assert(format([ 1, 2, 1, 1, 1, 3, 4, 1, 1, 1, 5 ]) = "[1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 5]");
+ bt_assert(format([]) = "[]");
}
bt_test_suite(t_int_set, "Testing sets of integers");
@@ -183,9 +297,8 @@ bt_test_suite(t_int_set, "Testing sets of integers");
*/
function t_string()
-string st;
{
- st = "Hello";
+ string st = "Hello";
bt_assert(format(st) = "Hello");
bt_assert(st ~ "Hell*");
bt_assert(st ~ "?ello");
@@ -210,9 +323,8 @@ function 'mkpair-a'(int a)
}
function t_pair()
-pair pp;
{
- pp = (1, 2);
+ pair pp = (1, 2);
bt_assert(format(pp) = "(1,2)");
bt_assert((1,2) = pp);
bt_assert((1,1+1) = pp);
@@ -233,10 +345,11 @@ bt_test_suite(t_pair, "Testing pairs");
*/
function t_pair_set()
-pair pp;
-pair set ps;
{
- pp = (1, 2);
+ pair pp = (1, 2);
+ pair set ps = [];
+ bt_assert(pp !~ ps);
+
ps = [(1,(one+one)), (3,4)..(4,8), (5,*), (6,3..6)];
bt_assert(format(ps) = "[(1,2), (3,4)..(4,8), (5,0)..(5,65535), (6,3)..(6,6)]");
bt_assert(pp ~ ps);
@@ -253,6 +366,7 @@ pair set ps;
bt_assert((6,6+one) !~ ps);
bt_assert(((one+6),2) !~ ps);
bt_assert((1,1) !~ ps);
+ bt_assert(pp !~ []);
ps = [(20..150, 200..300), (50100..50200, 1000..50000), (*, 5+5)];
bt_assert((100,200) ~ ps);
@@ -304,6 +418,7 @@ quad qq;
qq = 1.2.3.4;
bt_assert(qq ~ [1.2.3.4, 5.6.7.8]);
bt_assert(qq !~ [1.2.1.1, 1.2.3.5]);
+ bt_assert(qq !~ []);
}
bt_test_suite(t_quad_set, "Testing sets of quads");
@@ -335,6 +450,26 @@ ip p;
p = 1234:5678::;
bt_assert(!p.is_v4);
bt_assert(p.mask(24) = 1234:5600::);
+
+ p = 1:2:3:4:5:6:7:8;
+ bt_assert(!p.is_v4);
+ bt_assert(format(p) = "1:2:3:4:5:6:7:8");
+ bt_assert(p.mask(64) = 1:2:3:4::);
+
+ p = 10:20:30:40:50:60:70:80;
+ bt_assert(!p.is_v4);
+ bt_assert(format(p) = "10:20:30:40:50:60:70:80");
+ bt_assert(p.mask(64) = 10:20:30:40::);
+
+ p = 1090:20a0:30b0:40c0:50d0:60e0:70f0:8000;
+ bt_assert(!p.is_v4);
+ bt_assert(format(p) = "1090:20a0:30b0:40c0:50d0:60e0:70f0:8000");
+ bt_assert(p.mask(64) = 1090:20a0:30b0:40c0::);
+
+ p = ::fffe:6:c0c:936d:88c7:35d3;
+ bt_assert(!p.is_v4);
+ bt_assert(format(p) = "::fffe:6:c0c:936d:88c7:35d3");
+ bt_assert(p.mask(64) = 0:0:fffe:6::);
}
bt_test_suite(t_ip, "Testing ip address");
@@ -364,6 +499,7 @@ ip set ips;
bt_assert(1.2.3.4 !~ [ 1.2.3.3, 1.2.3.5 ]);
bt_assert(1.2.3.4 ~ [ 1.2.3.3..1.2.3.5 ]);
+ bt_assert(1.2.3.4 !~ []);
}
bt_test_suite(t_ip_set, "Testing sets of ip address");
@@ -378,9 +514,9 @@ bt_test_suite(t_ip_set, "Testing sets of ip address");
function t_enum()
{
- bt_assert(format(RTS_STATIC) = "(enum 30)1");
- bt_assert(format(NET_IP4) = "(enum 36)1");
- bt_assert(format(NET_VPN6) = "(enum 36)4");
+ bt_assert(format(RTS_STATIC) = "(enum 31)1");
+ bt_assert(format(NET_IP4) = "(enum 3b)1");
+ bt_assert(format(NET_VPN6) = "(enum 3b)4");
bt_assert(RTS_STATIC ~ [RTS_STATIC, RTS_DEVICE]);
bt_assert(RTS_BGP !~ [RTS_STATIC, RTS_DEVICE]);
@@ -452,13 +588,34 @@ function test_pxset(prefix set pxs)
bt_assert(1.0.0.0/8 ~ [ 1.0.0.0/8+ ]);
bt_assert(1.0.0.0/9 !~ [ 1.0.0.0/8- ]);
bt_assert(1.2.0.0/17 !~ [ 1.0.0.0/8{ 15 , 16 } ]);
+ bt_assert(net10 !~ []);
bt_assert([ 10.0.0.0/8{ 15 , 17 } ] = [ 10.0.0.0/8{ 15 , 17 } ]);
}
+function test_empty_pxset(prefix set pxs)
+int set s0;
+prefix set s1;
+{
+ s0 = [];
+ s1 = [];
+ bt_assert(pxs != s0);
+ bt_assert(pxs = s1);
+ bt_assert(pxs = []);
+}
+
function t_prefix_set()
prefix set pxs;
{
+ pxs = [];
+ bt_assert(format(pxs) = "[]");
+ bt_assert(pxs = []);
+ bt_assert(1.2.0.0/16 !~ []);
+ bt_assert(1.2.0.0/16 !~ pxs);
+
+ test_empty_pxset([]);
+ test_empty_pxset(pxs);
+
pxs = [ 1.2.0.0/16, 1.4.0.0/16+, 44.66.88.64/30{24,28}, 12.34.56.0/24{8,16} ];
bt_assert(format(pxs) = "[1.2.0.0/16{0.1.0.0}, 1.4.0.0/16{0.1.255.255}, 12.34.0.0/16{1.255.0.0}, 44.66.88.64/28{0.0.1.240}]");
@@ -478,6 +635,33 @@ prefix set pxs;
bt_assert(1.2.0.0/16 ~ [ 1.0.0.0/8{ 15 , 17 } ]);
bt_assert([ 10.0.0.0/8{ 15 , 17 } ] != [ 11.0.0.0/8{ 15 , 17 } ]);
+
+ /* Formatting of prefix sets, some cases are a bit strange */
+ bt_assert(format([ 0.0.0.0/0 ]) = "[0.0.0.0/0]");
+ bt_assert(format([ 10.10.0.0/32 ]) = "[10.10.0.0/32{0.0.0.1}]");
+ bt_assert(format([ 10.10.0.0/17 ]) = "[10.10.0.0/17{0.0.128.0}]");
+ bt_assert(format([ 10.10.0.0/17{17,19} ]) = "[10.10.0.0/17{0.0.224.0}]"); # 224 = 128+64+32
+ bt_assert(format([ 10.10.128.0/17{18,19} ]) = "[10.10.128.0/18{0.0.96.0}, 10.10.192.0/18{0.0.96.0}]"); # 96 = 64+32
+ bt_assert(format([ 10.10.64.0/18- ]) = "[0.0.0.0/0, 0.0.0.0/1{128.0.0.0}, 0.0.0.0/2{64.0.0.0}, 0.0.0.0/3{32.0.0.0}, 10.10.0.0/16{255.255.0.0}, 10.10.0.0/17{0.0.128.0}, 10.10.64.0/18{0.0.64.0}]");
+ bt_assert(format([ 10.10.64.0/18+ ]) = "[10.10.64.0/18{0.0.96.0}, 10.10.64.0/20{0.0.31.255}, 10.10.80.0/20{0.0.31.255}, 10.10.96.0/20{0.0.31.255}, 10.10.112.0/20{0.0.31.255}]");
+
+ bt_assert(format([ 10.10.160.0/19 ]) = "[10.10.160.0/19{0.0.32.0}]");
+ bt_assert(format([ 10.10.160.0/19{19,22} ]) = "[10.10.160.0/19{0.0.32.0}, 10.10.160.0/20{0.0.28.0}, 10.10.176.0/20{0.0.28.0}]"); # 28 = 16+8+4
+ bt_assert(format([ 10.10.160.0/19+ ]) = "[10.10.160.0/19{0.0.32.0}, 10.10.160.0/20{0.0.31.255}, 10.10.176.0/20{0.0.31.255}]");
+
+ bt_assert(format([ ::/0 ]) = "[::/0]");
+ bt_assert(format([ 11:22:33:44:55:66:77:88/128 ]) = "[11:22:33:44:55:66:77:88/128{::1}]");
+ bt_assert(format([ 11:22:33:44::/64 ]) = "[11:22:33:44::/64{0:0:0:1::}]");
+ bt_assert(format([ 11:22:33:44::/64+ ]) = "[11:22:33:44::/64{::1:ffff:ffff:ffff:ffff}]");
+
+ bt_assert(format([ 11:22:33:44::/65 ]) = "[11:22:33:44::/65{::8000:0:0:0}]");
+ bt_assert(format([ 11:22:33:44::/65{65,67} ]) = "[11:22:33:44::/65{::e000:0:0:0}]"); # e = 8+4+2
+ bt_assert(format([ 11:22:33:44:8000::/65{66,67} ]) = "[11:22:33:44:8000::/66{::6000:0:0:0}, 11:22:33:44:c000::/66{::6000:0:0:0}]"); # 6 = 4+2
+ bt_assert(format([ 11:22:33:44:4000::/66- ]) = "[::/0, ::/1{8000::}, ::/2{4000::}, ::/3{2000::}, 11:22:33:44::/64{ffff:ffff:ffff:ffff::}, 11:22:33:44::/65{::8000:0:0:0}, 11:22:33:44:4000::/66{::4000:0:0:0}]");
+ bt_assert(format([ 11:22:33:44:4000::/66+ ]) = "[11:22:33:44:4000::/66{::6000:0:0:0}, 11:22:33:44:4000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:5000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:6000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:7000::/68{::1fff:ffff:ffff:ffff}]");
+ bt_assert(format([ 11:22:33:44:c000::/67 ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}]");
+ bt_assert(format([ 11:22:33:44:c000::/67{67,71} ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}, 11:22:33:44:c000::/68{::1e00:0:0:0}, 11:22:33:44:d000::/68{::1e00:0:0:0}]");
+ bt_assert(format([ 11:22:33:44:c000::/67+ ]) = "[11:22:33:44:c000::/67{::2000:0:0:0}, 11:22:33:44:c000::/68{::1fff:ffff:ffff:ffff}, 11:22:33:44:d000::/68{::1fff:ffff:ffff:ffff}]");
}
bt_test_suite(t_prefix_set, "Testing prefix sets");
@@ -516,6 +700,12 @@ bt_test_suite(t_prefix6, "Testing prefix IPv6");
function t_prefix6_set()
prefix set pxs;
{
+ pxs = [];
+ bt_assert(format(pxs) = "[]");
+ bt_assert(pxs = []);
+ bt_assert(12::34/128 !~ []);
+ bt_assert(12::34/128 !~ pxs);
+
bt_assert(1180::/16 ~ [ 1100::/8{15, 17} ]);
bt_assert(12::34 = 12::34);
bt_assert(12::34 ~ [ 12::33..12::35 ]);
@@ -557,6 +747,12 @@ prefix set pxs;
bt_assert(2000::/29 !~ pxs);
bt_assert(1100::/10 !~ pxs);
bt_assert(2010::/26 !~ pxs);
+
+ pxs = [ 52E0::/13{13,128} ];
+ bt_assert(52E7:BE81:379B:E6FD:541F:B0D0::/93 ~ pxs);
+
+ pxs = [ 41D8:8718::/30{0,30}, 413A:99A8:6C00::/38{38,128} ];
+ bt_assert(4180::/9 ~ pxs);
}
bt_test_suite(t_prefix6_set, "Testing prefix IPv6 sets");
@@ -627,6 +823,7 @@ int set set12;
bt_assert(3 ~ p2);
bt_assert(p2 ~ [2, 10..20]);
bt_assert(p2 ~ [4, 10..20]);
+ bt_assert(p2 !~ []);
p2 = prepend(p2, 5);
bt_assert(p2 !~ pm1);
@@ -637,6 +834,8 @@ int set set12;
bt_assert(p2 ~ [= 5 [2, 4, 6] 3 [1..2] 1 =]);
bt_assert(p2 ~ [= 5 set35 3 set12 set12 =]);
bt_assert(p2 ~ mkpath(5, 4));
+ bt_assert(p2 ~ [= * [3] * =]);
+ bt_assert(p2 !~ [= * [] * =]);
bt_assert(p2.len = 5);
bt_assert(p2.first = 5);
@@ -645,6 +844,10 @@ int set set12;
bt_assert(p2.len = 5);
bt_assert(delete(p2, 3) = prepend(prepend(prepend(prepend(+empty+, 1), 2), 4), 5));
bt_assert(filter(p2, [1..3]) = prepend(prepend(prepend(+empty+, 1), 2), 3));
+ bt_assert(delete(p2, []) = p2);
+ bt_assert(filter(p2, []) = +empty+);
+ bt_assert(delete(prepend(prepend(+empty+, 0), 1), []) = prepend(prepend(+empty+, 0), 1));
+ bt_assert(filter(prepend(prepend(+empty+, 0), 1), []) = +empty+);
p2 = prepend( + empty +, 5 );
p2 = prepend( p2, 4 );
@@ -664,6 +867,15 @@ int set set12;
bt_assert(delete(p2, [4..5]) = prepend(prepend(prepend(prepend(+empty+, 3), 3), 2), 1));
bt_assert(format([= 1 2+ 3 =]) = "[= 1 2 + 3 =]");
+
+ # iteration over path
+ int x = 0;
+ int y = 0;
+ for int i in p2 do {
+ x = x + i;
+ y = y + x;
+ }
+ bt_assert(x = 18 && y = 50);
}
bt_test_suite(t_path, "Testing paths");
@@ -683,6 +895,11 @@ clist l;
clist l2;
clist r;
{
+ bt_assert((10, 20).asn = 10);
+ bt_assert((10, 20).data = 20);
+ bt_assert(p23.asn = 2);
+ bt_assert(p23.data = 3);
+
l = - empty -;
bt_assert(l !~ [(*,*)]);
bt_assert((l ~ [(*,*)]) != (l !~ [(*,*)]));
@@ -700,6 +917,7 @@ clist r;
bt_assert(l ~ [(2,2..3)]);
bt_assert(l ~ [(1,1..2)]);
bt_assert(l ~ [(1,1)..(1,2)]);
+ bt_assert(l !~ []);
l = add(l, (2,5));
l = add(l, (5,one));
@@ -737,6 +955,9 @@ clist r;
bt_assert(l !~ [(*,(one+6))]);
bt_assert(l !~ [(*, (one+one+one))]);
+ bt_assert(delete(l, []) = l);
+ bt_assert(filter(l, []) = -empty-);
+
l = delete(l, [(*,(one+onef(3)))]);
l = delete(l, [(*,(4+one))]);
bt_assert(l = add(-empty-, (3,1)));
@@ -775,6 +996,18 @@ clist r;
r = filter(l, [(3,1), (*,2)]);
bt_assert(r = add(add(-empty-, (3,1)), (3,2)));
bt_assert(format(r) = "(clist (3,1) (3,2))");
+
+ # minimim & maximum element
+ r = add(add(add(add(add(-empty-, (2,1)), (1,3)), (2,2)), (3,1)), (2,3));
+ bt_assert(format(r) = "(clist (2,1) (1,3) (2,2) (3,1) (2,3))");
+ bt_assert(r.min = (1,3));
+ bt_assert(r.max = (3,1));
+
+ # iteration over clist
+ int x = 0;
+ for pair c in r do
+ x = x + c.asn * c.asn * c.data;
+ bt_assert(x = 36);
}
bt_test_suite(t_clist, "Testing lists of communities");
@@ -848,11 +1081,15 @@ eclist r;
bt_assert((ro, 10.20.30.40, 100) !~ el);
bt_assert(el !~ [(rt, 10, 35..40)]);
bt_assert(el !~ [(ro, 10, *)]);
+ bt_assert(el !~ []);
el = add(el, (rt, 10, 40));
el2 = filter(el, [(rt, 10, 20..40)] );
el2 = add(el2, (rt, 10, 50));
+ bt_assert(delete(el, []) = el);
+ bt_assert(filter(el, []) = --empty--);
+
# eclist A (1,30,40)
bt_assert(el = add(add(add(--empty--, (rt, 10, 1)), (rt, 10, 30)), (rt, 10, 40)));
bt_assert(format(el) = "(eclist (rt, 10, 1) (rt, 10, 30) (rt, 10, 40))");
@@ -880,6 +1117,19 @@ eclist r;
r = filter(el, [(rt, 10, 1), (rt, 10, 25..30), (ro, 10, 40)]);
bt_assert(r = add(add(--empty--, (rt, 10, 1)), (rt, 10, 30)));
bt_assert(format(r) = "(eclist (rt, 10, 1) (rt, 10, 30))");
+
+ # minimim & maximum element
+ r = add(add(add(add(add(--empty--, (rt, 2, 1)), (rt, 1, 3)), (rt, 2, 2)), (rt, 3, 1)), (rt, 2, 3));
+ bt_assert(format(r) = "(eclist (rt, 2, 1) (rt, 1, 3) (rt, 2, 2) (rt, 3, 1) (rt, 2, 3))");
+ bt_assert(r.min = (rt, 1, 3));
+ bt_assert(r.max = (rt, 3, 1));
+
+ # iteration over eclist
+ int x = 0;
+ for ec c in r do
+ if c > (rt, 2, 0) && c < (rt, 3, 0) then
+ x = x + 1;
+ bt_assert(x = 3);
}
bt_test_suite(t_eclist, "Testing lists of extended communities");
@@ -939,6 +1189,10 @@ lclist r;
bt_assert(---empty--- = ---empty---);
bt_assert((10, 20, 30) !~ ---empty---);
+ bt_assert((10, 20, 30).asn = 10);
+ bt_assert((10, 20, 30).data1 = 20);
+ bt_assert((10, 20, 30).data2 = 30);
+
ll = --- empty ---;
ll = add(ll, (ten, 20, 30));
ll = add(ll, (1000, 2000, 3000));
@@ -960,6 +1214,9 @@ lclist r;
ll2 = add(ll2, (30, 30, 30));
ll2 = add(ll2, (40, 40, 40));
+ bt_assert(delete(ll, []) = ll);
+ bt_assert(filter(ll, []) = ---empty---);
+
# lclist A (10, 20, 30)
bt_assert(format(ll) = "(lclist (10, 10, 10) (20, 20, 20) (30, 30, 30))");
@@ -985,6 +1242,25 @@ lclist r;
r = filter(ll, [(5..15, *, *), (20, 15..25, *)]);
bt_assert(r = add(add(---empty---, (10, 10, 10)), (20, 20, 20)));
bt_assert(format(r) = "(lclist (10, 10, 10) (20, 20, 20))");
+
+ # minimim & maximum element
+ r = add(add(add(add(add(---empty---, (2, 3, 3)), (1, 2, 3)), (2, 3, 1)), (3, 1, 2)), (2, 1, 3));
+ bt_assert(format(r) = "(lclist (2, 3, 3) (1, 2, 3) (2, 3, 1) (3, 1, 2) (2, 1, 3))");
+ bt_assert(r.min = (1, 2, 3));
+ bt_assert(r.max = (3, 1, 2));
+
+ # iteration over lclist
+ int x = 0;
+ int y = 0;
+ lc mx = (0, 0, 0);
+ for lc c in r do {
+ int asn2 = c.asn * c.asn;
+ x = x + asn2 * c.data1;
+ y = y + asn2 * c.data2;
+ if c > mx then mx = c;
+ }
+ bt_assert(x = 39 && y = 49);
+ bt_assert(mx = r.max);
}
bt_test_suite(t_lclist, "Testing lists of large communities");
@@ -1013,6 +1289,7 @@ lc set lls;
bt_assert(ll !~ [(5,10,15), (10,21,30)]);
bt_assert(ll !~ [(10,21..25,*)]);
bt_assert(ll !~ [(11, *, *)]);
+ bt_assert(ll !~ []);
lls = [(10, 10, 10), (20, 20, 15..25), (30, 30, *), (40, 35..45, *), (50, *, *), (55..65, *, *)];
bt_assert(format(lls) = "[(10, 10, 10), (20, 20, 15)..(20, 20, 25), (30, 30, 0)..(30, 30, 4294967295), (40, 35, 0)..(40, 45, 4294967295), (50, 0, 0)..(50, 4294967295, 4294967295), (55, 0, 0)..(65, 4294967295, 4294967295)]");
@@ -1069,6 +1346,10 @@ bt_test_suite(t_rd, "Testing route distinguishers");
function t_rd_set()
rd set rds;
{
+ rds = [];
+ bt_assert(rds = []);
+ bt_assert(10:20 !~ rds);
+
rds = [10:20, 100000:100..100000:200];
bt_assert(format(rds) = "[10:20, 100000:100..100000:200]");
@@ -1079,6 +1360,7 @@ rd set rds;
bt_assert(100000:128 ~ rds);
bt_assert(100000:200 ~ rds);
bt_assert(100010:150 !~ rds);
+ bt_assert(100010:150 !~ []);
}
bt_test_suite(t_rd_set, "Testing sets of route distinguishers");
@@ -1145,7 +1427,85 @@ function fifteen()
return 15;
}
+function local_vars(int j)
+{
+ int k = 10;
+ bt_assert(j = 5 && k = 10);
+ {
+ int j = 15;
+ k = 20;
+ bt_assert(j = 15 && k = 20);
+ }
+ bt_assert(j = 5 && k = 20);
+
+ if j < 10 then
+ {
+ int j = 25;
+ string k = "hello";
+ bt_assert(j = 25 && k = "hello");
+ }
+ bt_assert(j = 5 && k = 20);
+
+ int m = 100;
+ {
+ j = 35;
+ int k = 40;
+ bt_assert(j = 35 && k = 40 && m = 100);
+ }
+ bt_assert(j = 35 && k = 20 && m = 100);
+}
+
+function factorial(int x)
+{
+ if x = 0 then return 0;
+ if x = 1 then return 1;
+ else return x * factorial(x - 1);
+}
+
+function fibonacci(int x)
+{
+ if x = 0 then return 0;
+ if x = 1 then return 1;
+ else return fibonacci(x - 1) + fibonacci(x - 2);
+}
+
+function hanoi_init(int a; int b)
+{
+ if b = 0
+ then return +empty+;
+ else return prepend(hanoi_init(a + 1, b - 1), a);
+}
+
+function hanoi_solve(int n; bgppath h_src; bgppath h_dst; bgppath h_aux; bool x; bool y)
+{
+ # x -> return src or dst
+ # y -> print state
+
+ if n = 0 then { if x then return h_src; else return h_dst; }
+
+ bgppath tmp1 = hanoi_solve(n - 1, h_src, h_aux, h_dst, true, y);
+ bgppath tmp2 = hanoi_solve(n - 1, h_src, h_aux, h_dst, false, false);
+ h_src = tmp1;
+ h_aux = tmp2;
+
+ int v = h_src.first;
+ # bt_assert(h_dst = +empty+ || v < h_dst.first);
+ h_src = delete(h_src, v);
+ h_dst = prepend(h_dst, v);
+
+ if y then
+ print "move: ", v, " src: ", h_src, " dst:", h_dst, " aux:", h_aux;
+
+ tmp1 = hanoi_solve(n - 1, h_aux, h_dst, h_src, true, y);
+ tmp2 = hanoi_solve(n - 1, h_aux, h_dst, h_src, false, false);
+ h_aux = tmp1;
+ h_dst = tmp2;
+
+ if x then return h_src; else return h_dst;
+}
+
function t_call_function()
+bgppath h_src;
{
bt_assert(fifteen() = 15);
@@ -1157,6 +1517,17 @@ function t_call_function()
bt_assert(callme(4, 4) = 16);
bt_assert(callme(7, 2) = 14);
bt_assert(callmeagain(1, 2, 3) = 6);
+ local_vars(5);
+
+ bt_assert(factorial(5) = 120);
+ bt_assert(factorial(10) = 3628800);
+
+ bt_assert(fibonacci(10) = 55);
+ bt_assert(fibonacci(20) = 6765);
+
+ h_src = hanoi_init(1, 6);
+ bt_assert(format(h_src) = "(path 1 2 3 4 5 6)");
+ bt_assert(hanoi_solve(6, h_src, +empty+, +empty+, false, false) = h_src);
}
bt_test_suite(t_call_function, "Testing calling functions");
@@ -1243,6 +1614,7 @@ function __test2()
filter testf
int j;
+bool t;
{
print "Heya, filtering route to ", net.ip, " prefixlen ", net.len, " source ", source;
print "This route was from ", from;
@@ -1254,6 +1626,54 @@ int j;
rip_metric = 14;
unset(rip_metric);
+ preference = 1234;
+
+ test_ca_int1 = 42;
+ test_ca_ip2 = 1.3.5.7;
+ test_ca_quad3 = 2.4.6.8;
+ test_ca_bgppath4 = +empty+;
+ test_ca_clist5 = -empty-;
+ test_ca_eclist6 = --empty--;
+ test_ca_lclist7 = ---empty---;
+
+ igp_metric = 53;
+ babel_metric = 64;
+ t = defined(babel_router_id);
+ j = babel_seqno;
+
+ bgp_origin = ORIGIN_IGP;
+ bgp_path = +empty+;
+ bgp_next_hop = 3456:789a:bcde:f012::3456:789a;
+ bgp_med = 71;
+ bgp_local_pref = 942;
+ t = defined(bgp_atomic_aggr);
+ t = defined(bgp_aggregator);
+ bgp_community = -empty-;
+ bgp_originator_id = 9.7.5.3;
+ bgp_cluster_list = -empty-;
+ bgp_ext_community = --empty--;
+ t = defined(bgp_aigp);
+ bgp_large_community = ---empty---;
+ t = defined(bgp_mpls_label_stack);
+
+ ospf_metric1 = 64;
+ ospf_metric2 = 111;
+ ospf_tag = 654432;
+
+ radv_preference = RA_PREF_LOW;
+ radv_lifetime = 28;
+
+ rip_metric = 2;
+ rip_tag = 4;
+ t = defined(rip_from);
+
+ krt_source = 17;
+ krt_metric = 19;
+
+# krt_lock_mtu = false;
+# krt_lock_window = true;
+# krt_lock_rtt = krt_lock_rttvar && krt_lock_sstresh || krt_lock_cwnd;
+
accept "ok I take that";
}
@@ -1353,13 +1773,16 @@ filter vpn_filter
bt_assert(net.type != NET_IP6);
bt_assert(net.rd = 0:1:2);
+ bool b = false;
case (net.type) {
NET_IP4: print "IPV4";
NET_IP6: print "IPV6";
+ else: b = true;
}
+ bt_assert(b);
bt_check_assign(from, 10.20.30.40);
- bt_check_assign(gw, 55.55.55.44);
+ # bt_check_assign(gw, 55.55.55.44);
bgp_community.add((3,5));
bgp_ext_community.add((ro, 135, 999));
diff --git a/filter/test.conf2 b/filter/test.conf2
index e95f9563..9fc8330f 100644
--- a/filter/test.conf2
+++ b/filter/test.conf2
@@ -38,12 +38,6 @@ protocol static {
print from;
from = 1.2.3.4;
print from;
- print scope;
- scope = SCOPE_HOST;
- print scope;
- if !(scope ~ [ SCOPE_HOST, SCOPE_SITE ]) then {
- print "Failed in test";
- }
preference = 15;
print preference;
diff --git a/filter/tree_test.c b/filter/tree_test.c
index 6472d17e..05702f81 100644
--- a/filter/tree_test.c
+++ b/filter/tree_test.c
@@ -19,10 +19,7 @@ static void
start_conf_env(void)
{
bt_bird_init();
-
- pool *p = rp_new(&root_pool, "helper_pool");
- linpool *l = lp_new_default(p);
- cfg_mem = l;
+ cfg_mem = tmp_linpool;
}
static struct f_tree *
diff --git a/filter/trie.c b/filter/trie.c
index 1a4e1ac3..12ba0b82 100644
--- a/filter/trie.c
+++ b/filter/trie.c
@@ -1,7 +1,8 @@
/*
* Filters: Trie for prefix sets
*
- * Copyright 2009 Ondrej Zajicek <santiago@crfreenet.org>
+ * (c) 2009--2021 Ondrej Zajicek <santiago@crfreenet.org>
+ * (c) 2009--2021 CZ.NIC z.s.p.o.
*
* Can be freely distributed and used under the terms of the GNU GPL.
*/
@@ -9,53 +10,68 @@
/**
* DOC: Trie for prefix sets
*
- * We use a (compressed) trie to represent prefix sets. Every node
- * in the trie represents one prefix (&addr/&plen) and &plen also
- * indicates the index of the bit in the address that is used to
- * branch at the node. If we need to represent just a set of
- * prefixes, it would be simple, but we have to represent a
- * set of prefix patterns. Each prefix pattern consists of
- * &ppaddr/&pplen and two integers: &low and &high, and a prefix
- * &paddr/&plen matches that pattern if the first MIN(&plen, &pplen)
- * bits of &paddr and &ppaddr are the same and &low <= &plen <= &high.
- *
- * We use a bitmask (&accept) to represent accepted prefix lengths
- * at a node. As there are 33 prefix lengths (0..32 for IPv4), but
- * there is just one prefix of zero length in the whole trie so we
- * have &zero flag in &f_trie (indicating whether the trie accepts
- * prefix 0.0.0.0/0) as a special case, and &accept bitmask
+ * We use a (compressed) trie to represent prefix sets. Every node in the trie
+ * represents one prefix (&addr/&plen) and &plen also indicates the index of
+ * bits in the address that are used to branch at the node. Note that such
+ * prefix is not necessary a member of the prefix set, it is just a canonical
+ * prefix associated with a node. Prefix lengths of nodes are aligned to
+ * multiples of &TRIE_STEP (4) and there is 16-way branching in each
+ * node. Therefore, we say that a node is associated with a range of prefix
+ * lengths (&plen .. &plen + TRIE_STEP - 1).
+ *
+ * The prefix set is not just a set of prefixes, it is defined by a set of
+ * prefix patterns. Each prefix pattern consists of &ppaddr/&pplen and two
+ * integers: &low and &high. The tested prefix &paddr/&plen matches that pattern
+ * if the first MIN(&plen, &pplen) bits of &paddr and &ppaddr are the same and
+ * &low <= &plen <= &high.
+ *
+ * There are two ways to represent accepted prefixes for a node. First, there is
+ * a bitmask &local, which represents independently all 15 prefixes that extend
+ * the canonical prefix of the node and are within a range of prefix lengths
+ * associated with the node. E.g., for node 10.0.0.0/8 they are 10.0.0.0/8,
+ * 10.0.0.0/9, 10.128.0.0/9, .. 10.224.0.0/11. This order (first by length, then
+ * lexicographically) is used for indexing the bitmask &local, starting at
+ * position 1. I.e., index is 2^(plen - base) + offset within the same length,
+ * see function trie_local_mask6() for details.
+ *
+ * Second, we use a bitmask &accept to represent accepted prefix lengths at a
+ * node. The bit is set means that all prefixes of given length that are either
+ * subprefixes or superprefixes of the canonical prefix are accepted. As there
+ * are 33 prefix lengths (0..32 for IPv4), but there is just one prefix of zero
+ * length in the whole trie so we have &zero flag in &f_trie (indicating whether
+ * the trie accepts prefix 0.0.0.0/0) as a special case, and &accept bitmask
* represents accepted prefix lengths from 1 to 32.
*
- * There are two cases in prefix matching - a match when the length
- * of the prefix is smaller that the length of the prefix pattern,
- * (&plen < &pplen) and otherwise. The second case is simple - we
- * just walk through the trie and look at every visited node
- * whether that prefix accepts our prefix length (&plen). The
- * first case is tricky - we don't want to examine every descendant
- * of a final node, so (when we create the trie) we have to propagate
- * that information from nodes to their ascendants.
- *
- * Suppose that we have two masks (M1 and M2) for a node. Mask M1
- * represents accepted prefix lengths by just the node and mask M2
- * represents accepted prefix lengths by the node or any of its
- * descendants. Therefore M2 is a bitwise or of M1 and children's
- * M2 and this is a maintained invariant during trie building.
- * Basically, when we want to match a prefix, we walk through the trie,
- * check mask M1 for our prefix length and when we came to
- * final node, we check mask M2.
- *
- * There are two differences in the real implementation. First,
- * we use a compressed trie so there is a case that we skip our
- * final node (if it is not in the trie) and we came to node that
- * is either extension of our prefix, or completely out of path
- * In the first case, we also have to check M2.
- *
- * Second, we really need not to maintain two separate bitmasks.
- * Checks for mask M1 are always larger than &applen and we need
- * just the first &pplen bits of mask M2 (if trie compression
- * hadn't been used it would suffice to know just $applen-th bit),
- * so we have to store them together in &accept mask - the first
- * &pplen bits of mask M2 and then mask M1.
+ * One complication is handling of prefix patterns with unaligned prefix length.
+ * When such pattern is to be added, we add a primary node above (with rounded
+ * down prefix length &nlen) and a set of secondary nodes below (with rounded up
+ * prefix lengths &slen). Accepted prefix lengths of the original prefix pattern
+ * are then represented in different places based on their lengths. For prefixes
+ * shorter than &nlen, it is &accept bitmask of the primary node, for prefixes
+ * between &nlen and &slen - 1 it is &local bitmask of the primary node, and for
+ * prefixes longer of equal &slen it is &accept bitmasks of secondary nodes.
+ *
+ * There are two cases in prefix matching - a match when the length of the
+ * prefix is smaller that the length of the prefix pattern, (&plen < &pplen) and
+ * otherwise. The second case is simple - we just walk through the trie and look
+ * at every visited node whether that prefix accepts our prefix length (&plen).
+ * The first case is tricky - we do not want to examine every descendant of a
+ * final node, so (when we create the trie) we have to propagate that
+ * information from nodes to their ascendants.
+ *
+ * There are two kinds of propagations - propagation from child's &accept
+ * bitmask to parent's &accept bitmask, and propagation from child's &accept
+ * bitmask to parent's &local bitmask. The first kind is simple - as all
+ * superprefixes of a parent are also all superprefixes of appropriate length of
+ * a child, then we can just add (by bitwise or) a child &accept mask masked by
+ * parent prefix length mask to the parent &accept mask. This handles prefixes
+ * shorter than node &plen.
+ *
+ * The second kind of propagation is necessary to handle superprefixes of a
+ * child that are represented by parent &local mask - that are in the range of
+ * prefix lengths associated with the parent. For each accepted (by child
+ * &accept mask) prefix length from that range, we need to set appropriate bit
+ * in &local mask. See function trie_amask_to_local() for details.
*
* There are four cases when we walk through a trie:
*
@@ -65,8 +81,32 @@
* - we are beyond the end of path (node length > &plen)
* - we are still on path and keep walking (node length < &plen)
*
- * The walking code in trie_match_prefix() is structured according to
- * these cases.
+ * The walking code in trie_match_net() is structured according to these cases.
+ *
+ * Iteration over prefixes in a trie can be done using TRIE_WALK() macro, or
+ * directly using trie_walk_init() and trie_walk_next() functions. The second
+ * approach allows suspending the iteration and continuing in it later.
+ * Prefixes are enumerated in the usual lexicographic order and may be
+ * restricted to a subset of the trie (all subnets of a specified prefix).
+ *
+ * Note that the trie walk does not reliably enumerate `implicit' prefixes
+ * defined by &low and &high fields in prefix patterns, it is supposed to be
+ * used on tries constructed from `explicit' prefixes (&low == &plen == &high
+ * in call to trie_add_prefix()).
+ *
+ * The trie walk has three basic state variables stored in the struct
+ * &f_trie_walk_state -- the current node in &stack[stack_pos], &accept_length
+ * for iteration over inter-node prefixes (non-branching prefixes on compressed
+ * path between the current node and its parent node, stored in the bitmap
+ * &accept of the current node) and &local_pos for iteration over intra-node
+ * prefixes (stored in the bitmap &local).
+ *
+ * The trie also supports longest-prefix-match query by trie_match_longest_ip4()
+ * and it can be extended to iteration over all covering prefixes for a given
+ * prefix (from longest to shortest) using TRIE_WALK_TO_ROOT_IP4() macro. There
+ * are also IPv6 versions (for practical reasons, these functions and macros are
+ * separate for IPv4 and IPv6). There is the same limitation to enumeration of
+ * `implicit' prefixes like with the previous TRIE_WALK() macro.
*/
#include "nest/bird.h"
@@ -86,7 +126,10 @@
#define ipa_mkmask(x) ip6_mkmask(x)
#define ipa_masklen(x) ip6_masklen(&x)
#define ipa_pxlen(x,y) ip6_pxlen(x,y)
-#define ipa_getbit(x,n) ip6_getbit(x,n)
+#define ipa_getbit(a,p) ip6_getbit(a,p)
+#define ipa_getbits(a,p,n) ip6_getbits(a,p,n)
+#define ipa_setbits(a,p,n) ip6_setbits(a,p,n)
+#define trie_local_mask(a,b,c) trie_local_mask6(a,b,c)
#define ipt_from_ip4(x) _MI6(_I(x), 0, 0, 0)
#define ipt_to_ip4(x) _MI4(_I0(x))
@@ -109,10 +152,11 @@ f_new_trie(linpool *lp, uint data_size)
}
static inline struct f_trie_node4 *
-new_node4(struct f_trie *t, int plen, ip4_addr paddr, ip4_addr pmask, ip4_addr amask)
+new_node4(struct f_trie *t, uint plen, uint local, ip4_addr paddr, ip4_addr pmask, ip4_addr amask)
{
struct f_trie_node4 *n = lp_allocz(t->lp, sizeof(struct f_trie_node4) + t->data_size);
n->plen = plen;
+ n->local = local;
n->addr = paddr;
n->mask = pmask;
n->accept = amask;
@@ -120,10 +164,11 @@ new_node4(struct f_trie *t, int plen, ip4_addr paddr, ip4_addr pmask, ip4_addr a
}
static inline struct f_trie_node6 *
-new_node6(struct f_trie *t, int plen, ip6_addr paddr, ip6_addr pmask, ip6_addr amask)
+new_node6(struct f_trie *t, uint plen, uint local, ip6_addr paddr, ip6_addr pmask, ip6_addr amask)
{
struct f_trie_node6 *n = lp_allocz(t->lp, sizeof(struct f_trie_node6) + t->data_size);
n->plen = plen;
+ n->local = local;
n->addr = paddr;
n->mask = pmask;
n->accept = amask;
@@ -131,24 +176,24 @@ new_node6(struct f_trie *t, int plen, ip6_addr paddr, ip6_addr pmask, ip6_addr a
}
static inline struct f_trie_node *
-new_node(struct f_trie *t, int plen, ip_addr paddr, ip_addr pmask, ip_addr amask)
+new_node(struct f_trie *t, uint plen, uint local, ip_addr paddr, ip_addr pmask, ip_addr amask)
{
if (t->ipv4)
- return (struct f_trie_node *) new_node4(t, plen, ipt_to_ip4(paddr), ipt_to_ip4(pmask), ipt_to_ip4(amask));
+ return (struct f_trie_node *) new_node4(t, plen, local, ipt_to_ip4(paddr), ipt_to_ip4(pmask), ipt_to_ip4(amask));
else
- return (struct f_trie_node *) new_node6(t, plen, ipa_to_ip6(paddr), ipa_to_ip6(pmask), ipa_to_ip6(amask));
+ return (struct f_trie_node *) new_node6(t, plen, local, ipa_to_ip6(paddr), ipa_to_ip6(pmask), ipa_to_ip6(amask));
}
static inline void
attach_node4(struct f_trie_node4 *parent, struct f_trie_node4 *child)
{
- parent->c[ip4_getbit(child->addr, parent->plen) ? 1 : 0] = child;
+ parent->c[ip4_getbits(child->addr, parent->plen, TRIE_STEP)] = child;
}
static inline void
attach_node6(struct f_trie_node6 *parent, struct f_trie_node6 *child)
{
- parent->c[ip6_getbit(child->addr, parent->plen) ? 1 : 0] = child;
+ parent->c[ip6_getbits(child->addr, parent->plen, TRIE_STEP)] = child;
}
static inline void
@@ -160,63 +205,96 @@ attach_node(struct f_trie_node *parent, struct f_trie_node *child, int v4)
attach_node6(&parent->v6, &child->v6);
}
-#define GET_ADDR(N,F,X) ((X) ? ipt_from_ip4((N)->v4.F) : ipa_from_ip6((N)->v6.F))
-#define SET_ADDR(N,F,X,V) ({ if (X) (N)->v4.F =ipt_to_ip4(V); else (N)->v6.F =ipa_to_ip6(V); })
-#define GET_CHILD(N,F,X,I) ((X) ? (struct f_trie_node *) (N)->v4.c[I] : (struct f_trie_node *) (N)->v6.c[I])
-/**
- * trie_add_prefix
- * @t: trie to add to
- * @net: IP network prefix
- * @l: prefix lower bound
- * @h: prefix upper bound
+/*
+ * Internal prefixes of a node a represented by the local bitmask, each bit for
+ * one prefix. Bit 0 is unused, Bit 1 is for the main prefix of the node,
+ * remaining bits correspond to subprefixes by this pattern:
*
- * Adds prefix (prefix pattern) @n to trie @t. @l and @h are lower
- * and upper bounds on accepted prefix lengths, both inclusive.
- * 0 <= l, h <= 32 (128 for IPv6).
+ * 1
+ * 2 3
+ * 4 5 6 7
+ * 8 9 A B C D E F
*
- * Returns a pointer to the allocated node. The function can return a pointer to
- * an existing node if @px and @plen are the same. If px/plen == 0/0 (or ::/0),
- * a pointer to the root node is returned. Returns NULL when called with
- * mismatched IPv4/IPv6 net type.
+ * E.g. for 10.0.0.0/8 node, the 10.64.0.0/10 would be position 5.
*/
-void *
-trie_add_prefix(struct f_trie *t, const net_addr *net, uint l, uint h)
+/*
+ * Compute appropriate mask representing prefix px/plen in local bitmask of node
+ * with prefix length nlen. Assuming that nlen <= plen < (nlen + TRIE_STEP).
+ */
+static inline uint
+trie_local_mask4(ip4_addr px, uint plen, uint nlen)
{
- uint plen = net_pxlen(net);
- ip_addr px;
- int v4;
+ uint step = plen - nlen;
+ uint pos = (1u << step) + ip4_getbits(px, nlen, step);
+ return 1u << pos;
+}
- switch (net->type)
- {
- case NET_IP4: px = ipt_from_ip4(net4_prefix(net)); v4 = 1; break;
- case NET_IP6: px = ipa_from_ip6(net6_prefix(net)); v4 = 0; break;
- default: bug("invalid type");
- }
+static inline uint
+trie_local_mask6(ip6_addr px, uint plen, uint nlen)
+{
+ uint step = plen - nlen;
+ uint pos = (1u << step) + ip6_getbits(px, nlen, step);
+ return 1u << pos;
+}
- if (t->ipv4 != v4)
- {
- if (t->ipv4 < 0)
- t->ipv4 = v4;
- else
- return NULL;
- }
+/*
+ * Compute an appropriate local mask (for a node with prefix length nlen)
+ * representing prefixes of px that are accepted by amask and fall within the
+ * range associated with that node. Used for propagation of child accept mask
+ * to parent local mask.
+ */
+static inline uint
+trie_amask_to_local(ip_addr px, ip_addr amask, uint nlen)
+{
+ uint local = 0;
- if (l == 0)
- t->zero = 1;
- else
- l--;
+ for (uint plen = MAX(nlen, 1); plen < (nlen + TRIE_STEP); plen++)
+ if (ipa_getbit(amask, plen - 1))
+ local |= trie_local_mask(px, plen, nlen);
- if (h < plen)
- plen = h;
+ return local;
+}
+
+/*
+ * Compute a bitmask representing a level of subprefixes (of the same length),
+ * using specified position as a root. E.g., level 2 from root position 3 would
+ * be bit positions C-F, returned as bitmask 0xf000.
+ */
+static inline uint
+trie_level_mask(uint pos, uint level)
+{
+ return ((1u << (1u << level)) - 1) << (pos << level);
+}
- ip_addr amask = ipa_xor(ipa_mkmask(l), ipa_mkmask(h));
+
+#define GET_ADDR(N,F,X) ((X) ? ipt_from_ip4((N)->v4.F) : ipa_from_ip6((N)->v6.F))
+#define SET_ADDR(N,F,X,V) ({ if (X) (N)->v4.F =ipt_to_ip4(V); else (N)->v6.F =ipa_to_ip6(V); })
+
+#define GET_LOCAL(N,X) ((X) ? (N)->v4.local : (N)->v6.local)
+#define ADD_LOCAL(N,X,V) ({ uint v_ = (V); if (X) (N)->v4.local |= v_; else (N)->v6.local |= v_; })
+
+#define GET_CHILD(N,X,I) ((X) ? (struct f_trie_node *) (N)->v4.c[I] : (struct f_trie_node *) (N)->v6.c[I])
+
+
+static void *
+trie_add_node(struct f_trie *t, uint plen, ip_addr px, uint local, uint l, uint h)
+{
+ uint l_ = l ? (l - 1) : 0;
+ ip_addr amask = (l_ < h) ? ipa_xor(ipa_mkmask(l_), ipa_mkmask(h)) : IPA_NONE;
ip_addr pmask = ipa_mkmask(plen);
ip_addr paddr = ipa_and(px, pmask);
struct f_trie_node *o = NULL;
struct f_trie_node *n = &t->root;
+ int v4 = t->ipv4;
+ /* Add all bits for each active level (0x0002 0x000c 0x00f0 0xff00) */
+ for (uint i = 0; i < TRIE_STEP; i++)
+ if ((l <= (plen + i)) && ((plen + i) <= h))
+ local |= trie_level_mask(1, i);
+
+ DBG("Insert node %I/%u (%I %x)\n", paddr, plen, amask, local);
while (n)
{
ip_addr naddr = GET_ADDR(n, addr, v4);
@@ -225,23 +303,31 @@ trie_add_prefix(struct f_trie *t, const net_addr *net, uint l, uint h)
ip_addr cmask = ipa_and(nmask, pmask);
uint nlen = v4 ? n->v4.plen : n->v6.plen;
+ DBG("Found node %I/%u (%I %x)\n",
+ naddr, nlen, accept, v4 ? n->v4.local : n->v6.local);
+
if (ipa_compare(ipa_and(paddr, cmask), ipa_and(naddr, cmask)))
{
/* We are out of path - we have to add branching node 'b'
between node 'o' and node 'n', and attach new node 'a'
as the other child of 'b'. */
- int blen = ipa_pxlen(paddr, naddr);
+ int blen = ROUND_DOWN_POW2(ipa_pxlen(paddr, naddr), TRIE_STEP);
ip_addr bmask = ipa_mkmask(blen);
ip_addr baddr = ipa_and(px, bmask);
/* Merge accept masks from children to get accept mask for node 'b' */
ip_addr baccm = ipa_and(ipa_or(amask, accept), bmask);
+ uint bloc = trie_amask_to_local(naddr, accept, blen) |
+ trie_amask_to_local(paddr, amask, blen);
- struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask);
- struct f_trie_node *b = new_node(t, blen, baddr, bmask, baccm);
+ struct f_trie_node *a = new_node(t, plen, local, paddr, pmask, amask);
+ struct f_trie_node *b = new_node(t, blen, bloc, baddr, bmask, baccm);
attach_node(o, b, v4);
attach_node(b, n, v4);
attach_node(b, a, v4);
+ t->prefix_count++;
+
+ DBG("Case 1\n");
return a;
}
@@ -249,66 +335,195 @@ trie_add_prefix(struct f_trie *t, const net_addr *net, uint l, uint h)
{
/* We add new node 'a' between node 'o' and node 'n' */
amask = ipa_or(amask, ipa_and(accept, pmask));
- struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask);
+ local |= trie_amask_to_local(naddr, accept, plen);
+ struct f_trie_node *a = new_node(t, plen, local, paddr, pmask, amask);
attach_node(o, a, v4);
attach_node(a, n, v4);
+ t->prefix_count++;
+
+ DBG("Case 2\n");
return a;
}
if (plen == nlen)
{
- /* We already found added node in trie. Just update accept mask */
+ /* We already found added node in trie. Just update accept and local mask */
accept = ipa_or(accept, amask);
SET_ADDR(n, accept, v4, accept);
+
+ if ((GET_LOCAL(n, v4) & local) != local)
+ t->prefix_count++;
+
+ ADD_LOCAL(n, v4, local);
+
+ DBG("Case 3\n");
return n;
}
/* Update accept mask part M2 and go deeper */
accept = ipa_or(accept, ipa_and(amask, nmask));
SET_ADDR(n, accept, v4, accept);
+ ADD_LOCAL(n, v4, trie_amask_to_local(paddr, amask, nlen));
+
+ DBG("Step %u\n", ipa_getbits(paddr, nlen));
/* n->plen < plen and plen <= 32 (128) */
o = n;
- n = GET_CHILD(n, c, v4, ipa_getbit(paddr, nlen) ? 1 : 0);
+ n = GET_CHILD(n, v4, ipa_getbits(paddr, nlen, TRIE_STEP));
}
/* We add new tail node 'a' after node 'o' */
- struct f_trie_node *a = new_node(t, plen, paddr, pmask, amask);
+ struct f_trie_node *a = new_node(t, plen, local, paddr, pmask, amask);
attach_node(o, a, v4);
+ t->prefix_count++;
+ DBG("Case 4\n");
return a;
}
+/**
+ * trie_add_prefix
+ * @t: trie to add to
+ * @net: IP network prefix
+ * @l: prefix lower bound
+ * @h: prefix upper bound
+ *
+ * Adds prefix (prefix pattern) @n to trie @t. @l and @h are lower
+ * and upper bounds on accepted prefix lengths, both inclusive.
+ * 0 <= l, h <= 32 (128 for IPv6).
+ *
+ * Returns a pointer to the allocated node. The function can return a pointer to
+ * an existing node if @px and @plen are the same. If px/plen == 0/0 (or ::/0),
+ * a pointer to the root node is returned. Returns NULL when called with
+ * mismatched IPv4/IPv6 net type.
+ */
+void *
+trie_add_prefix(struct f_trie *t, const net_addr *net, uint l, uint h)
+{
+ uint plen = net_pxlen(net);
+ ip_addr px;
+ int v4;
+
+ switch (net->type)
+ {
+ case NET_IP4:
+ case NET_VPN4:
+ case NET_ROA4:
+ px = ipt_from_ip4(net4_prefix(net));
+ v4 = 1;
+ break;
+
+ case NET_IP6:
+ case NET_VPN6:
+ case NET_ROA6:
+ case NET_IP6_SADR:
+ px = ipa_from_ip6(net6_prefix(net));
+ v4 = 0;
+ break;
+
+ default:
+ bug("invalid type");
+ }
+
+ if (t->ipv4 != v4)
+ {
+ if (t->ipv4 < 0)
+ t->ipv4 = v4;
+ else
+ return NULL;
+ }
+
+ DBG("\nInsert net %N (%u-%u)\n", net, l, h);
+
+ if (l == 0)
+ t->zero = 1;
+
+ if (h < plen)
+ plen = h;
+
+ /* Primary node length, plen rounded down */
+ uint nlen = ROUND_DOWN_POW2(plen, TRIE_STEP);
+
+ if (plen == nlen)
+ return trie_add_node(t, nlen, px, 0, l, h);
+
+ /* Secondary node length, plen rouned up */
+ uint slen = nlen + TRIE_STEP;
+ void *node = NULL;
+
+ /*
+ * For unaligned prefix lengths it is more complicated. We need to encode
+ * matching prefixes of lengths from l to h. There are three cases of lengths:
+ *
+ * 1) 0..nlen are encoded by the accept mask of the primary node
+ * 2) nlen..(slen-1) are encoded by the local mask of the primary node
+ * 3) slen..max are encoded in secondary nodes
+ */
+
+ if (l < slen)
+ {
+ uint local = 0;
+
+ /* Compute local bits for accepted nlen..(slen-1) prefixes */
+ for (uint i = 0; i < TRIE_STEP; i++)
+ if ((l <= (nlen + i)) && ((nlen + i) <= h))
+ {
+ uint pos = (1u << i) + ipa_getbits(px, nlen, i);
+ uint len = ((nlen + i) <= plen) ? 1 : (1u << (nlen + i - plen));
+
+ /* We need to fill 'len' bits starting at 'pos' position */
+ local |= ((1u << len) - 1) << pos;
+ }
+
+ /* Add the primary node */
+ node = trie_add_node(t, nlen, px, local, l, nlen);
+ }
+
+ if (slen <= h)
+ {
+ uint l2 = MAX(l, slen);
+ uint max = (1u << (slen - plen));
+
+ /* Add secondary nodes */
+ for (uint i = 0; i < max; i++)
+ node = trie_add_node(t, slen, ipa_setbits(px, slen - 1, i), 0, l2, h);
+ }
+
+ return node;
+}
+
+
static int
trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen)
{
- ip4_addr pmask = ip4_mkmask(plen);
- ip4_addr paddr = ip4_and(px, pmask);
-
if (plen == 0)
return t->zero;
int plentest = plen - 1;
+ uint nlen = ROUND_DOWN_POW2(plen, TRIE_STEP);
+ uint local = trie_local_mask4(px, plen, nlen);
const struct f_trie_node4 *n = &t->root.v4;
while (n)
{
- ip4_addr cmask = ip4_and(n->mask, pmask);
-
/* We are out of path */
- if (ip4_compare(ip4_and(paddr, cmask), ip4_and(n->addr, cmask)))
+ if (!ip4_prefix_equal(px, n->addr, MIN(plen, n->plen)))
return 0;
+ /* Check local mask */
+ if ((n->plen == nlen) && (n->local & local))
+ return 1;
+
/* Check accept mask */
if (ip4_getbit(n->accept, plentest))
return 1;
/* We finished trie walk and still no match */
- if (plen <= n->plen)
+ if (nlen <= n->plen)
return 0;
/* Choose children */
- n = n->c[(ip4_getbit(paddr, n->plen)) ? 1 : 0];
+ n = n->c[ip4_getbits(px, n->plen, TRIE_STEP)];
}
return 0;
@@ -317,33 +532,34 @@ trie_match_net4(const struct f_trie *t, ip4_addr px, uint plen)
static int
trie_match_net6(const struct f_trie *t, ip6_addr px, uint plen)
{
- ip6_addr pmask = ip6_mkmask(plen);
- ip6_addr paddr = ip6_and(px, pmask);
-
if (plen == 0)
return t->zero;
int plentest = plen - 1;
+ uint nlen = ROUND_DOWN_POW2(plen, TRIE_STEP);
+ uint local = trie_local_mask6(px, plen, nlen);
const struct f_trie_node6 *n = &t->root.v6;
while (n)
{
- ip6_addr cmask = ip6_and(n->mask, pmask);
-
/* We are out of path */
- if (ip6_compare(ip6_and(paddr, cmask), ip6_and(n->addr, cmask)))
+ if (!ip6_prefix_equal(px, n->addr, MIN(plen, n->plen)))
return 0;
+ /* Check local mask */
+ if ((n->plen == nlen) && (n->local & local))
+ return 1;
+
/* Check accept mask */
if (ip6_getbit(n->accept, plentest))
return 1;
/* We finished trie walk and still no match */
- if (plen <= n->plen)
+ if (nlen <= n->plen)
return 0;
/* Choose children */
- n = n->c[(ip6_getbit(paddr, n->plen)) ? 1 : 0];
+ n = n->c[ip6_getbits(px, n->plen, TRIE_STEP)];
}
return 0;
@@ -378,6 +594,412 @@ trie_match_net(const struct f_trie *t, const net_addr *n)
}
}
+
+/**
+ * trie_match_longest_ip4
+ * @t: trie
+ * @net: net address
+ * @dst: return value
+ * @found0: optional returned bitmask of found nodes
+ *
+ * Perform longest prefix match for the address @net and return the resulting
+ * prefix in the buffer @dst. The bitmask @found0 is used to report lengths of
+ * prefixes on the path from the root to the resulting prefix. E.g., if there is
+ * also a /20 shorter matching prefix, then 20-th bit is set in @found0. This
+ * can be used to enumerate all matching prefixes for the network @net using
+ * function trie_match_next_longest_ip4() or macro TRIE_WALK_TO_ROOT_IP4().
+ *
+ * This function assumes IPv4 trie, there is also an IPv6 variant. The @net
+ * argument is typed as net_addr_ip4, but would accept any IPv4-based net_addr,
+ * like net4_prefix(). Anyway, returned @dst is always net_addr_ip4.
+ *
+ * Result: 1 if a matching prefix was found, 0 if not.
+ */
+int
+trie_match_longest_ip4(const struct f_trie *t, const net_addr_ip4 *net, net_addr_ip4 *dst, ip4_addr *found0)
+{
+ ASSERT(t->ipv4);
+
+ const ip4_addr prefix = net->prefix;
+ const int pxlen = net->pxlen;
+
+ const struct f_trie_node4 *n = &t->root.v4;
+ int len = 0;
+
+ ip4_addr found = IP4_NONE;
+ int last = -1;
+
+ while (n)
+ {
+ /* We are out of path */
+ if (!ip4_prefix_equal(prefix, n->addr, MIN(pxlen, n->plen)))
+ goto done;
+
+ /* Check accept mask */
+ for (; len < n->plen; len++)
+ {
+ if (len > pxlen)
+ goto done;
+
+ if (ip4_getbit(n->accept, len - 1))
+ {
+ /* len is always < 32 due to len < n->plen */
+ ip4_setbit(&found, len);
+ last = len;
+ }
+ }
+
+ /* Special case for max length, there is only one valid local position */
+ if (len == IP4_MAX_PREFIX_LENGTH)
+ {
+ if (n->local & (1u << 1))
+ last = len;
+
+ goto done;
+ }
+
+ /* Check local mask */
+ for (int pos = 1; pos < (1 << TRIE_STEP); pos = 2 * pos + ip4_getbit(prefix, len), len++)
+ {
+ if (len > pxlen)
+ goto done;
+
+ if (n->local & (1u << pos))
+ {
+ /* len is always < 32 due to special case above */
+ ip4_setbit(&found, len);
+ last = len;
+ }
+ }
+
+ /* Choose child */
+ n = n->c[ip4_getbits(prefix, n->plen, TRIE_STEP)];
+ }
+
+done:
+ if (last < 0)
+ return 0;
+
+ *dst = NET_ADDR_IP4(ip4_and(prefix, ip4_mkmask(last)), last);
+
+ if (found0)
+ *found0 = found;
+
+ return 1;
+}
+
+
+/**
+ * trie_match_longest_ip6
+ * @t: trie
+ * @net: net address
+ * @dst: return value
+ * @found0: optional returned bitmask of found nodes
+ *
+ * Perform longest prefix match for the address @net and return the resulting
+ * prefix in the buffer @dst. The bitmask @found0 is used to report lengths of
+ * prefixes on the path from the root to the resulting prefix. E.g., if there is
+ * also a /20 shorter matching prefix, then 20-th bit is set in @found0. This
+ * can be used to enumerate all matching prefixes for the network @net using
+ * function trie_match_next_longest_ip6() or macro TRIE_WALK_TO_ROOT_IP6().
+ *
+ * This function assumes IPv6 trie, there is also an IPv4 variant. The @net
+ * argument is typed as net_addr_ip6, but would accept any IPv6-based net_addr,
+ * like net6_prefix(). Anyway, returned @dst is always net_addr_ip6.
+ *
+ * Result: 1 if a matching prefix was found, 0 if not.
+ */
+int
+trie_match_longest_ip6(const struct f_trie *t, const net_addr_ip6 *net, net_addr_ip6 *dst, ip6_addr *found0)
+{
+ ASSERT(!t->ipv4);
+
+ const ip6_addr prefix = net->prefix;
+ const int pxlen = net->pxlen;
+
+ const struct f_trie_node6 *n = &t->root.v6;
+ int len = 0;
+
+ ip6_addr found = IP6_NONE;
+ int last = -1;
+
+ while (n)
+ {
+ /* We are out of path */
+ if (!ip6_prefix_equal(prefix, n->addr, MIN(pxlen, n->plen)))
+ goto done;
+
+ /* Check accept mask */
+ for (; len < n->plen; len++)
+ {
+ if (len > pxlen)
+ goto done;
+
+ if (ip6_getbit(n->accept, len - 1))
+ {
+ /* len is always < 128 due to len < n->plen */
+ ip6_setbit(&found, len);
+ last = len;
+ }
+ }
+
+ /* Special case for max length, there is only one valid local position */
+ if (len == IP6_MAX_PREFIX_LENGTH)
+ {
+ if (n->local & (1u << 1))
+ last = len;
+
+ goto done;
+ }
+
+ /* Check local mask */
+ for (int pos = 1; pos < (1 << TRIE_STEP); pos = 2 * pos + ip6_getbit(prefix, len), len++)
+ {
+ if (len > pxlen)
+ goto done;
+
+ if (n->local & (1u << pos))
+ {
+ /* len is always < 128 due to special case above */
+ ip6_setbit(&found, len);
+ last = len;
+ }
+ }
+
+ /* Choose child */
+ n = n->c[ip6_getbits(prefix, n->plen, TRIE_STEP)];
+ }
+
+done:
+ if (last < 0)
+ return 0;
+
+ *dst = NET_ADDR_IP6(ip6_and(prefix, ip6_mkmask(last)), last);
+
+ if (found0)
+ *found0 = found;
+
+ return 1;
+}
+
+#define SAME_PREFIX(A,B,X,L) ((X) ? ip4_prefix_equal((A)->v4.addr, net4_prefix(B), (L)) : ip6_prefix_equal((A)->v6.addr, net6_prefix(B), (L)))
+#define GET_NET_BITS(N,X,A,B) ((X) ? ip4_getbits(net4_prefix(N), (A), (B)) : ip6_getbits(net6_prefix(N), (A), (B)))
+
+/**
+ * trie_walk_init
+ * @s: walk state
+ * @t: trie
+ * @net: optional subnet for walk
+ *
+ * Initialize walk state for subsequent walk through nodes of the trie @t by
+ * trie_walk_next(). The argument @net allows to restrict walk to given subnet,
+ * otherwise full walk over all nodes is used. This is done by finding node at
+ * or below @net and starting position in it.
+ */
+void
+trie_walk_init(struct f_trie_walk_state *s, const struct f_trie *t, const net_addr *net)
+{
+ *s = (struct f_trie_walk_state) {
+ .ipv4 = t->ipv4,
+ .accept_length = 0,
+ .start_pos = 1,
+ .local_pos = 1,
+ .stack_pos = 0,
+ .stack[0] = &t->root
+ };
+
+ if (!net)
+ return;
+
+ /* We want to find node of level at least plen */
+ int plen = ROUND_DOWN_POW2(net->pxlen, TRIE_STEP);
+ const struct f_trie_node *n = &t->root;
+ const int v4 = t->ipv4;
+
+ while (n)
+ {
+ int nlen = v4 ? n->v4.plen : n->v6.plen;
+
+ /* We are out of path */
+ if (!SAME_PREFIX(n, net, v4, MIN(net->pxlen, nlen)))
+ break;
+
+ /* We found final node */
+ if (nlen >= plen)
+ {
+ if (nlen == plen)
+ {
+ /* Find proper local_pos, while accept_length is not used */
+ int step = net->pxlen - plen;
+ s->start_pos = s->local_pos = (1u << step) + GET_NET_BITS(net, v4, plen, step);
+ s->accept_length = plen;
+ }
+ else
+ {
+ /* Start from pos 1 in local node, but first try accept mask */
+ s->accept_length = net->pxlen;
+ }
+
+ s->stack[0] = n;
+ return;
+ }
+
+ /* Choose child */
+ n = GET_CHILD(n, v4, GET_NET_BITS(net, v4, nlen, TRIE_STEP));
+ }
+
+ s->stack[0] = NULL;
+ return;
+}
+
+#define GET_ACCEPT_BIT(N,X,B) ((X) ? ip4_getbit((N)->v4.accept, (B)) : ip6_getbit((N)->v6.accept, (B)))
+#define GET_LOCAL_BIT(N,X,B) (((X) ? (N)->v4.local : (N)->v6.local) & (1u << (B)))
+
+/**
+ * trie_walk_next
+ * @s: walk state
+ * @net: return value
+ *
+ * Find the next prefix in the trie walk and return it in the buffer @net.
+ * Prefixes are walked in the usual lexicographic order and may be restricted
+ * to a subset of the trie during walk setup by trie_walk_init(). Note that the
+ * trie walk does not iterate reliably over 'implicit' prefixes defined by &low
+ * and &high fields in prefix patterns, it is supposed to be used on tries
+ * constructed from 'explicit' prefixes (&low == &plen == &high in call to
+ * trie_add_prefix()).
+ *
+ * Result: 1 if the next prefix was found, 0 for the end of walk.
+ */
+int
+trie_walk_next(struct f_trie_walk_state *s, net_addr *net)
+{
+ const struct f_trie_node *n = s->stack[s->stack_pos];
+ int len = s->accept_length;
+ int pos = s->local_pos;
+ int v4 = s->ipv4;
+
+ /*
+ * The walk has three basic state variables -- n, len and pos. In each node n,
+ * we first walk superprefixes (by len in &accept bitmask), and then we walk
+ * internal positions (by pos in &local bitmask). These positions are:
+ *
+ * 1
+ * 2 3
+ * 4 5 6 7
+ * 8 9 A B C D E F
+ *
+ * We walk them depth-first, including virtual positions 10-1F that are
+ * equivalent of position 1 in child nodes 0-F.
+ */
+
+ if (!n)
+ {
+ memset(net, 0, v4 ? sizeof(net_addr_ip4) : sizeof(net_addr_ip6));
+ return 0;
+ }
+
+next_node:;
+ /* Current node prefix length */
+ int nlen = v4 ? n->v4.plen : n->v6.plen;
+
+ /* First, check for accept prefix */
+ for (; len < nlen; len++)
+ if (GET_ACCEPT_BIT(n, v4, len - 1))
+ {
+ if (v4)
+ net_fill_ip4(net, ip4_and(n->v4.addr, ip4_mkmask(len)), len);
+ else
+ net_fill_ip6(net, ip6_and(n->v6.addr, ip6_mkmask(len)), len);
+
+ s->local_pos = pos;
+ s->accept_length = len + 1;
+ return 1;
+ }
+
+next_pos:
+ /* Bottom of this node */
+ if (pos >= (1 << TRIE_STEP))
+ {
+ const struct f_trie_node *child = GET_CHILD(n, v4, pos - (1 << TRIE_STEP));
+ int dir = 0;
+
+ /* No child node */
+ if (!child)
+ {
+ /* Step up until return from left child (pos is even) */
+ do
+ {
+ /* Step up from start node */
+ if ((s->stack_pos == 0) && (pos == s->start_pos))
+ {
+ s->stack[0] = NULL;
+ memset(net, 0, v4 ? sizeof(net_addr_ip4) : sizeof(net_addr_ip6));
+ return 0;
+ }
+
+ /* Top of this node */
+ if (pos == 1)
+ {
+ ASSERT(s->stack_pos);
+ const struct f_trie_node *old = n;
+
+ /* Move to parent node */
+ s->stack_pos--;
+ n = s->stack[s->stack_pos];
+ nlen = v4 ? n->v4.plen : n->v6.plen;
+
+ pos = v4 ?
+ ip4_getbits(old->v4.addr, nlen, TRIE_STEP) :
+ ip6_getbits(old->v6.addr, nlen, TRIE_STEP);
+ pos += (1 << TRIE_STEP);
+ len = nlen;
+
+ ASSERT(GET_CHILD(n, v4, pos - (1 << TRIE_STEP)) == old);
+ }
+
+ /* Step up */
+ dir = pos % 2;
+ pos = pos / 2;
+ }
+ while (dir);
+
+ /* Continue with step down to the right child */
+ pos = 2 * pos + 1;
+ goto next_pos;
+ }
+
+ /* Move to child node */
+ pos = 1;
+ len = nlen + TRIE_STEP;
+
+ s->stack_pos++;
+ n = s->stack[s->stack_pos] = child;
+ goto next_node;
+ }
+
+ /* Check for local prefix */
+ if (GET_LOCAL_BIT(n, v4, pos))
+ {
+ /* Convert pos to address of local network */
+ int x = (pos >= 2) + (pos >= 4) + (pos >= 8);
+ int y = pos & ((1u << x) - 1);
+
+ if (v4)
+ net_fill_ip4(net, !x ? n->v4.addr : ip4_setbits(n->v4.addr, nlen + x - 1, y), nlen + x);
+ else
+ net_fill_ip6(net, !x ? n->v6.addr : ip6_setbits(n->v6.addr, nlen + x - 1, y), nlen + x);
+
+ s->local_pos = 2 * pos;
+ s->accept_length = len;
+ return 1;
+ }
+
+ /* Step down */
+ pos = 2 * pos;
+ goto next_pos;
+}
+
+
static int
trie_node_same4(const struct f_trie_node4 *t1, const struct f_trie_node4 *t2)
{
@@ -392,7 +1014,11 @@ trie_node_same4(const struct f_trie_node4 *t1, const struct f_trie_node4 *t2)
(! ip4_equal(t1->accept, t2->accept)))
return 0;
- return trie_node_same4(t1->c[0], t2->c[0]) && trie_node_same4(t1->c[1], t2->c[1]);
+ for (uint i = 0; i < (1 << TRIE_STEP); i++)
+ if (! trie_node_same4(t1->c[i], t2->c[i]))
+ return 0;
+
+ return 1;
}
static int
@@ -409,7 +1035,11 @@ trie_node_same6(const struct f_trie_node6 *t1, const struct f_trie_node6 *t2)
(! ip6_equal(t1->accept, t2->accept)))
return 0;
- return trie_node_same6(t1->c[0], t2->c[0]) && trie_node_same6(t1->c[1], t2->c[1]);
+ for (uint i = 0; i < (1 << TRIE_STEP); i++)
+ if (! trie_node_same6(t1->c[i], t2->c[i]))
+ return 0;
+
+ return 1;
}
/**
@@ -431,30 +1061,70 @@ trie_same(const struct f_trie *t1, const struct f_trie *t2)
return trie_node_same6(&t1->root.v6, &t2->root.v6);
}
+
+static const u8 log2[16] = {0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3};
+
static void
-trie_node_format4(const struct f_trie_node4 *t, buffer *buf)
+trie_node_format(const struct f_trie_node *n, buffer *buf, int v4)
{
- if (t == NULL)
+ if (n == NULL)
return;
- if (ip4_nonzero(t->accept))
- buffer_print(buf, "%I4/%d{%I4}, ", t->addr, t->plen, t->accept);
+ if (v4)
+ {
+ if (ip4_nonzero(n->v4.accept))
+ buffer_print(buf, "%I4/%d{%I4}, ", n->v4.addr, n->v4.plen, n->v4.accept);
+ }
+ else
+ {
+ if (ip6_nonzero(n->v6.accept))
+ buffer_print(buf, "%I6/%d{%I6}, ", n->v6.addr, n->v6.plen, n->v6.accept);
+ }
- trie_node_format4(t->c[0], buf);
- trie_node_format4(t->c[1], buf);
-}
+ int nlen = v4 ? n->v4.plen : n->v6.plen;
+ uint local = v4 ? n->v4.local : n->v6.local;
-static void
-trie_node_format6(const struct f_trie_node6 *t, buffer *buf)
-{
- if (t == NULL)
- return;
+ for (int i = (nlen ? 0 : 1); i < TRIE_STEP; i++)
+ if (GET_ACCEPT_BIT(n, v4, nlen + i - 1))
+ local &= ~trie_level_mask(1, i);
- if (ip6_nonzero(t->accept))
- buffer_print(buf, "%I6/%d{%I6}, ", t->addr, t->plen, t->accept);
+ for (int pos = 2; local && (pos < (1 << TRIE_STEP)); pos++)
+ if (local & (1u << pos))
+ {
+ int lvl = log2[pos];
+ int plen = nlen + lvl;
+
+ int i;
+ for (i = 0; lvl + i < TRIE_STEP; i++)
+ {
+ uint lmask = trie_level_mask(pos, i);
+
+ if ((local & lmask) != lmask)
+ break;
+
+ local &= ~lmask;
+ }
+
+ uint addr_bits = pos & ((1u << lvl) - 1);
+ uint accept_bits = (1u << i) - 1;
+ int h = plen + i - 1;
+
+ if (v4)
+ {
+ ip4_addr addr = ip4_setbits(n->v4.addr, plen - 1, addr_bits);
+ ip4_addr mask = ip4_setbits(IP4_NONE, h - 1, accept_bits);
+ buffer_print(buf, "%I4/%d{%I4}, ", addr, plen, mask);
+ }
+ else
+ {
+ ip6_addr addr = ip6_setbits(n->v6.addr, plen - 1, addr_bits);
+ ip6_addr mask = ip6_setbits(IP6_NONE, h - 1, accept_bits);
+ buffer_print(buf, "%I6/%d{%I6}, ", addr, plen, mask);
+ }
+ }
- trie_node_format6(t->c[0], buf);
- trie_node_format6(t->c[1], buf);
+ for (int i = 0; i < (1 << TRIE_STEP); i++)
+ trie_node_format(GET_CHILD(n, v4, i), buf, v4);
}
/**
@@ -472,10 +1142,7 @@ trie_format(const struct f_trie *t, buffer *buf)
if (t->zero)
buffer_print(buf, "%I/%d, ", t->ipv4 ? IPA_NONE4 : IPA_NONE6, 0);
- if (t->ipv4)
- trie_node_format4(&t->root.v4, buf);
- else
- trie_node_format6(&t->root.v6, buf);
+ trie_node_format(&t->root, buf, t->ipv4);
if (buf->pos == buf->end)
return;
diff --git a/filter/trie_test.c b/filter/trie_test.c
index b2b36716..dc791280 100644
--- a/filter/trie_test.c
+++ b/filter/trie_test.c
@@ -14,9 +14,12 @@
#include "conf/conf.h"
#define TESTS_NUM 10
-#define PREFIXES_NUM 10
+#define PREFIXES_NUM 32
#define PREFIX_TESTS_NUM 10000
+#define PREFIX_BENCH_NUM 100000000
+#define TRIE_BUFFER_SIZE 1024
+#define TEST_BUFFER_SIZE (1024*1024)
#define BIG_BUFFER_SIZE 10000
/* Wrapping structure for storing f_prefixes structures in list */
@@ -31,146 +34,849 @@ xrandom(u32 max)
return (bt_random() % max);
}
+static inline uint
+get_exp_random(void)
+{
+ uint r, n = 0;
+
+ for (r = bt_random(); r & 1; r = r >> 1)
+ n++;
+
+ return n;
+}
+
static int
-is_prefix_included(list *prefixes, struct f_prefix *needle)
+compare_prefixes(const void *a, const void *b)
{
- struct f_prefix_node *n;
- WALK_LIST(n, *prefixes)
- {
- ip6_addr cmask = ip6_mkmask(MIN(n->prefix.net.pxlen, needle->net.pxlen));
+ return net_compare(&((const struct f_prefix *) a)->net,
+ &((const struct f_prefix *) b)->net);
+}
+
+static inline int
+matching_ip4_nets(const net_addr_ip4 *a, const net_addr_ip4 *b)
+{
+ ip4_addr cmask = ip4_mkmask(MIN(a->pxlen, b->pxlen));
+ return ip4_compare(ip4_and(a->prefix, cmask), ip4_and(b->prefix, cmask)) == 0;
+}
+
+static inline int
+matching_ip6_nets(const net_addr_ip6 *a, const net_addr_ip6 *b)
+{
+ ip6_addr cmask = ip6_mkmask(MIN(a->pxlen, b->pxlen));
+ return ip6_compare(ip6_and(a->prefix, cmask), ip6_and(b->prefix, cmask)) == 0;
+}
- ip6_addr ip = net6_prefix(&n->prefix.net);
- ip6_addr needle_ip = net6_prefix(&needle->net);
+static inline int
+matching_nets(const net_addr *a, const net_addr *b)
+{
+ if (a->type != b->type)
+ return 0;
+
+ return (a->type == NET_IP4) ?
+ matching_ip4_nets((const net_addr_ip4 *) a, (const net_addr_ip4 *) b) :
+ matching_ip6_nets((const net_addr_ip6 *) a, (const net_addr_ip6 *) b);
+}
- if ((ipa_compare(ipa_and(ip, cmask), ipa_and(needle_ip, cmask)) == 0) &&
- (n->prefix.lo <= needle->net.pxlen) && (needle->net.pxlen <= n->prefix.hi))
+static int
+is_prefix_included(list *prefixes, const net_addr *needle)
+{
+ struct f_prefix_node *n;
+ WALK_LIST(n, *prefixes)
+ if (matching_nets(&n->prefix.net, needle) &&
+ (n->prefix.lo <= needle->pxlen) && (needle->pxlen <= n->prefix.hi))
{
- bt_debug("FOUND\t" PRIip6 "/%d %d-%d\n", ARGip6(net6_prefix(&n->prefix.net)), n->prefix.net.pxlen, n->prefix.lo, n->prefix.hi);
+ char buf[64];
+ bt_format_net(buf, 64, &n->prefix.net);
+ bt_debug("FOUND %s %d-%d\n", buf, n->prefix.lo, n->prefix.hi);
+
return 1; /* OK */
}
- }
+
return 0; /* FAIL */
}
-static struct f_prefix
-get_random_ip6_prefix(void)
+static void
+get_random_net(net_addr *net, int v6)
{
- struct f_prefix p;
- u8 pxlen = xrandom(120)+8;
- ip6_addr ip6 = ip6_build(bt_random(),bt_random(),bt_random(),bt_random());
- net_addr_ip6 net6 = NET_ADDR_IP6(ip6, pxlen);
+ if (!v6)
+ {
+ uint pxlen = xrandom(24)+8;
+ ip4_addr ip4 = ip4_from_u32((u32) bt_random());
+ net_fill_ip4(net, ip4_and(ip4, ip4_mkmask(pxlen)), pxlen);
+ }
+ else
+ {
+ uint pxlen = xrandom(120)+8;
+ ip6_addr ip6 = ip6_build(bt_random(), bt_random(), bt_random(), bt_random());
+ net_fill_ip6(net, ip6_and(ip6, ip6_mkmask(pxlen)), pxlen);
+ }
+}
- p.net = *((net_addr*) &net6);
+static void
+get_random_prefix(struct f_prefix *px, int v6, int tight)
+{
+ get_random_net(&px->net, v6);
+
+ if (tight)
+ {
+ px->lo = px->hi = px->net.pxlen;
+ }
+ else if (bt_random() % 2)
+ {
+ px->lo = 0;
+ px->hi = px->net.pxlen;
+ }
+ else
+ {
+ px->lo = px->net.pxlen;
+ px->hi = net_max_prefix_length[px->net.type];
+ }
+}
+
+static void
+get_random_ip4_subnet(net_addr_ip4 *net, const net_addr_ip4 *src, int pxlen)
+{
+ *net = NET_ADDR_IP4(ip4_and(src->prefix, ip4_mkmask(pxlen)), pxlen);
+
+ if (pxlen > src->pxlen)
+ {
+ ip4_addr rnd = ip4_from_u32((u32) bt_random());
+ ip4_addr mask = ip4_xor(ip4_mkmask(src->pxlen), ip4_mkmask(pxlen));
+ net->prefix = ip4_or(net->prefix, ip4_and(rnd, mask));
+ }
+}
+
+static void
+get_random_ip6_subnet(net_addr_ip6 *net, const net_addr_ip6 *src, int pxlen)
+{
+ *net = NET_ADDR_IP6(ip6_and(src->prefix, ip6_mkmask(pxlen)), pxlen);
+
+ if (pxlen > src->pxlen)
+ {
+ ip6_addr rnd = ip6_build(bt_random(), bt_random(), bt_random(), bt_random());
+ ip6_addr mask = ip6_xor(ip6_mkmask(src->pxlen), ip6_mkmask(pxlen));
+ net->prefix = ip6_or(net->prefix, ip6_and(rnd, mask));
+ }
+}
+
+static void
+get_random_subnet(net_addr *net, const net_addr *src, int pxlen)
+{
+ if (src->type == NET_IP4)
+ get_random_ip4_subnet((net_addr_ip4 *) net, (const net_addr_ip4 *) src, pxlen);
+ else
+ get_random_ip6_subnet((net_addr_ip6 *) net, (const net_addr_ip6 *) src, pxlen);
+}
+
+static void
+get_inner_net(net_addr *net, const struct f_prefix *src)
+{
+ int pxlen, step;
if (bt_random() % 2)
{
- p.lo = 0;
- p.hi = p.net.pxlen;
+ step = get_exp_random();
+ step = MIN(step, src->hi - src->lo);
+ pxlen = (bt_random() % 2) ? (src->lo + step) : (src->hi - step);
}
else
+ pxlen = src->lo + bt_random() % (src->hi - src->lo + 1);
+
+ get_random_subnet(net, &src->net, pxlen);
+}
+
+static void
+swap_random_bits_ip4(net_addr_ip4 *net, int num)
+{
+ for (int i = 0; i < num; i++)
{
- p.lo = p.net.pxlen;
- p.hi = net_max_prefix_length[p.net.type];
+ ip4_addr swap = IP4_NONE;
+ ip4_setbit(&swap, bt_random() % net->pxlen);
+ net->prefix = ip4_xor(net->prefix, swap);
}
+}
- return p;
+static void
+swap_random_bits_ip6(net_addr_ip6 *net, int num)
+{
+ for (int i = 0; i < num; i++)
+ {
+ ip6_addr swap = IP6_NONE;
+ ip6_setbit(&swap, bt_random() % net->pxlen);
+ net->prefix = ip6_xor(net->prefix, swap);
+ }
}
static void
-generate_random_ipv6_prefixes(list *prefixes)
+swap_random_bits(net_addr *net, int num)
{
- int i;
- for (i = 0; i < PREFIXES_NUM; i++)
+ if (net->type == NET_IP4)
+ swap_random_bits_ip4((net_addr_ip4 *) net, num);
+ else
+ swap_random_bits_ip6((net_addr_ip6 *) net, num);
+}
+
+static void
+get_outer_net(net_addr *net, const struct f_prefix *src)
+{
+ int pxlen, step;
+ int inside = 0;
+ int max = net_max_prefix_length[src->net.type];
+
+ if ((src->lo > 0) && (bt_random() % 3))
+ {
+ step = 1 + get_exp_random();
+ step = MIN(step, src->lo);
+ pxlen = src->lo - step;
+ }
+ else if ((src->hi < max) && (bt_random() % 2))
{
- struct f_prefix f = get_random_ip6_prefix();
+ step = 1 + get_exp_random();
+ step = MIN(step, max - src->hi);
+ pxlen = src->hi + step;
+ }
+ else
+ {
+ pxlen = src->lo + bt_random() % (src->hi - src->lo + 1);
+ inside = 1;
+ }
- struct f_prefix_node *px = calloc(1, sizeof(struct f_prefix_node));
- px->prefix = f;
+ get_random_subnet(net, &src->net, pxlen);
- bt_debug("ADD\t" PRIip6 "/%d %d-%d\n", ARGip6(net6_prefix(&px->prefix.net)), px->prefix.net.pxlen, px->prefix.lo, px->prefix.hi);
+ /* Perhaps swap some bits in prefix */
+ if ((net->pxlen > 0) && (inside || (bt_random() % 4)))
+ swap_random_bits(net, 1 + get_exp_random());
+}
+
+static list *
+make_random_prefix_list(int num, int v6, int tight)
+{
+ list *prefixes = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
+ init_list(prefixes);
+
+ for (int i = 0; i < num; i++)
+ {
+ struct f_prefix_node *px = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
+ get_random_prefix(&px->prefix, v6, tight);
add_tail(prefixes, &px->n);
+
+ char buf[64];
+ bt_format_net(buf, 64, &px->prefix.net);
+ bt_debug("ADD %s{%d,%d}\n", buf, px->prefix.lo, px->prefix.hi);
+ }
+
+ return prefixes;
+}
+
+static struct f_trie *
+make_trie_from_prefix_list(list *prefixes)
+{
+ struct f_trie *trie = f_new_trie(tmp_linpool, 0);
+
+ struct f_prefix_node *n;
+ WALK_LIST(n, *prefixes)
+ trie_add_prefix(trie, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+
+ return trie;
+}
+
+/*
+ * Read sequence of prefixes from file handle and return prefix list.
+ * Each prefix is on one line, sequence terminated by empty line or eof.
+ * Arg @plus means prefix should include all longer ones.
+ */
+static list *
+read_prefix_list(FILE *f, int v6, int plus)
+{
+ ASSERT(!v6);
+
+ uint a0, a1, a2, a3, pl;
+ char s[32];
+ int n;
+
+ list *pxlist = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
+ init_list(pxlist);
+
+ errno = 0;
+ while (fgets(s, 32, f))
+ {
+ if (s[0] == '\n')
+ return pxlist;
+
+ n = sscanf(s, "%u.%u.%u.%u/%u", &a0, &a1, &a2, &a3, &pl);
+
+ if (n != 5)
+ bt_abort_msg("Invalid content of trie_data");
+
+ struct f_prefix_node *px = lp_allocz(tmp_linpool, sizeof(struct f_prefix_node));
+ net_fill_ip4(&px->prefix.net, ip4_build(a0, a1, a2, a3), pl);
+ px->prefix.lo = pl;
+ px->prefix.hi = plus ? IP4_MAX_PREFIX_LENGTH : pl;
+ add_tail(pxlist, &px->n);
+
+ char buf[64];
+ bt_format_net(buf, 64, &px->prefix.net);
+ bt_debug("ADD %s{%d,%d}\n", buf, px->prefix.lo, px->prefix.hi);
}
+
+ bt_syscall(errno, "fgets()");
+ return EMPTY_LIST(*pxlist) ? NULL : pxlist;
}
+/*
+ * Open file, read multiple sequences of prefixes from it. Fill @data with
+ * prefix lists and @trie with generated tries. Return number of sequences /
+ * tries. Use separate linpool @lp0 for prefix lists and @lp1 for tries.
+ * Arg @plus means prefix should include all longer ones.
+ */
static int
-t_match_net(void)
+read_prefix_file(const char *filename, int plus,
+ list *data[], struct f_trie *trie[])
+{
+ FILE *f = fopen(filename, "r");
+ bt_syscall(!f, "fopen(%s)", filename);
+
+ int n = 0;
+ list *pxlist;
+ while (pxlist = read_prefix_list(f, 0, plus))
+ {
+ data[n] = pxlist;
+ trie[n] = make_trie_from_prefix_list(pxlist);
+ bt_debug("NEXT\n");
+ n++;
+ }
+
+ fclose(f);
+ bt_debug("DONE reading %d tries\n", n);
+
+ return n;
+}
+
+/*
+ * Select random subset of @dn prefixes from prefix list @src of length @sn,
+ * and store them to buffer @dst (of size @dn). Prefixes may be chosen multiple
+ * times. Randomize order of prefixes in @dst buffer.
+ */
+static void
+select_random_prefix_subset(list *src[], net_addr dst[], int sn, int dn)
+{
+ int pn = 0;
+
+ if (!dn)
+ return;
+
+ /* Compute total prefix number */
+ for (int i = 0; i < sn; i++)
+ pn += list_length(src[i]);
+
+ /* Change of selecting a prefix */
+ int rnd = (pn / dn) + 10;
+ int n = 0;
+
+ /* Iterate indefinitely over src array */
+ for (int i = 0; 1; i++, i = (i < sn) ? i : 0)
+ {
+ struct f_prefix_node *px;
+ WALK_LIST(px, *src[i])
+ {
+ if (xrandom(rnd) != 0)
+ continue;
+
+ net_copy(&dst[n], &px->prefix.net);
+ n++;
+
+ /* We have enough */
+ if (n == dn)
+ goto done;
+ }
+ }
+
+done:
+ /* Shuffle networks */
+ for (int i = 0; i < dn; i++)
+ {
+ int j = xrandom(dn);
+
+ if (i == j)
+ continue;
+
+ net_addr tmp;
+ net_copy(&tmp, &dst[i]);
+ net_copy(&dst[i], &dst[j]);
+ net_copy(&dst[j], &tmp);
+ }
+}
+
+/* Fill @dst buffer with @dn randomly generated /32 prefixes */
+static void
+make_random_addresses(net_addr dst[], int dn)
+{
+ for (int i = 0; i < dn; i++)
+ net_fill_ip4(&dst[i], ip4_from_u32((u32) bt_random()), IP4_MAX_PREFIX_LENGTH);
+}
+
+static void
+test_match_net(list *prefixes, struct f_trie *trie, const net_addr *net)
+{
+ char buf[64];
+ bt_format_net(buf, 64, net);
+ bt_debug("TEST %s\n", buf);
+
+ int should_be = is_prefix_included(prefixes, net);
+ int is_there = trie_match_net(trie, net);
+
+ bt_assert_msg(should_be == is_there, "Prefix %s %s match", buf,
+ (should_be ? "should" : "should not"));
+}
+
+static int
+t_match_random_net(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
- uint round;
- for (round = 0; round < TESTS_NUM; round++)
+ int v6 = 0;
+ for (int round = 0; round < TESTS_NUM; round++)
{
- list prefixes; /* of structs f_extended_prefix */
- init_list(&prefixes);
- struct f_trie *trie = f_new_trie(config->mem, 0);
+ list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
+ struct f_trie *trie = make_trie_from_prefix_list(prefixes);
- generate_random_ipv6_prefixes(&prefixes);
- struct f_prefix_node *n;
- WALK_LIST(n, prefixes)
+ for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
- trie_add_prefix(trie, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+ net_addr net;
+ get_random_net(&net, v6);
+ test_match_net(prefixes, trie, &net);
}
- int i;
- for (i = 0; i < PREFIX_TESTS_NUM; i++)
+ v6 = !v6;
+ tmp_flush();
+ }
+
+ bt_bird_cleanup();
+ return 1;
+}
+
+static int
+t_match_inner_net(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ int v6 = 0;
+ for (int round = 0; round < TESTS_NUM; round++)
+ {
+ list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
+ struct f_trie *trie = make_trie_from_prefix_list(prefixes);
+
+ struct f_prefix_node *n = HEAD(*prefixes);
+ for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
- struct f_prefix f = get_random_ip6_prefix();
- bt_debug("TEST\t" PRIip6 "/%d\n", ARGip6(net6_prefix(&f.net)), f.net.pxlen);
+ net_addr net;
+ get_inner_net(&net, &n->prefix);
+ test_match_net(prefixes, trie, &net);
- int should_be = is_prefix_included(&prefixes, &f);
- int is_there = trie_match_net(trie, &f.net);
- bt_assert_msg(should_be == is_there, "Prefix " PRIip6 "/%d %s", ARGip6(net6_prefix(&f.net)), f.net.pxlen, (should_be ? "should be found in trie" : "should not be found in trie"));
+ n = NODE_VALID(NODE_NEXT(n)) ? NODE_NEXT(n) : HEAD(*prefixes);
}
- struct f_prefix_node *nxt;
- WALK_LIST_DELSAFE(n, nxt, prefixes)
+ v6 = !v6;
+ tmp_flush();
+ }
+
+ bt_bird_cleanup();
+ return 1;
+}
+
+static int
+t_match_outer_net(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ int v6 = 0;
+ for (int round = 0; round < TESTS_NUM; round++)
+ {
+ list *prefixes = make_random_prefix_list(PREFIXES_NUM, v6, 0);
+ struct f_trie *trie = make_trie_from_prefix_list(prefixes);
+
+ struct f_prefix_node *n = HEAD(*prefixes);
+ for (int i = 0; i < PREFIX_TESTS_NUM; i++)
{
- free(n);
+ net_addr net;
+ get_outer_net(&net, &n->prefix);
+ test_match_net(prefixes, trie, &net);
+
+ n = NODE_VALID(NODE_NEXT(n)) ? NODE_NEXT(n) : HEAD(*prefixes);
}
+
+ v6 = !v6;
+ tmp_flush();
}
+ v6 = !v6;
+ bt_bird_cleanup();
+ return 1;
+}
+
+/*
+ * Read prefixes from @filename, build set of tries, prepare test data and do
+ * PREFIX_BENCH_NUM trie lookups. With @plus = 0, use random subset of known
+ * prefixes as test data, with @plus = 1, use randomly generated /32 prefixes
+ * as test data.
+ */
+static int
+benchmark_trie_dataset(const char *filename, int plus)
+{
+ int n = 0;
+ list *data[TRIE_BUFFER_SIZE];
+ struct f_trie *trie[TRIE_BUFFER_SIZE];
+ net_addr *nets;
+
+ bt_reset_suite_case_timer();
+ bt_log_suite_case_result(1, "Reading %s", filename, n);
+ n = read_prefix_file(filename, plus, data, trie);
+ bt_log_suite_case_result(1, "Read prefix data, %d lists, ", n);
+
+ size_t trie_size = rmemsize(tmp_linpool).effective * 1000 / (1024*1024);
+ bt_log_suite_case_result(1, "Trie size %u.%03u MB",
+ (uint) (trie_size / 1000), (uint) (trie_size % 1000));
+
+ int t = PREFIX_BENCH_NUM / n;
+ int tb = MIN(t, TEST_BUFFER_SIZE);
+ nets = tmp_alloc(tb * sizeof(net_addr));
+
+ if (!plus)
+ select_random_prefix_subset(data, nets, n, tb);
+ else
+ make_random_addresses(nets, tb);
+
+ bt_log_suite_case_result(1, "Make test data, %d (%d) tests", t, tb);
+ bt_reset_suite_case_timer();
+
+ /*
+ int match = 0;
+ for (int i = 0; i < t; i++)
+ for (int j = 0; j < n; j++)
+ test_match_net(data[j], trie[j], &nets[i]);
+ */
+
+ int match = 0;
+ for (int i = 0; i < t; i++)
+ for (int j = 0; j < n; j++)
+ if (trie_match_net(trie[j], &nets[i % TEST_BUFFER_SIZE]))
+ match++;
+
+ bt_log_suite_case_result(1, "Matching done, %d / %d matches", match, t * n);
+
+ tmp_flush();
+ return 1;
+}
+
+static int UNUSED
+t_bench_trie_datasets_subset(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ /* Specific datasets, not included */
+ benchmark_trie_dataset("trie-data-bgp-1", 0);
+ benchmark_trie_dataset("trie-data-bgp-10", 0);
+ benchmark_trie_dataset("trie-data-bgp-100", 0);
+ benchmark_trie_dataset("trie-data-bgp-1000", 0);
+
bt_bird_cleanup();
+
return 1;
}
+static int UNUSED
+t_bench_trie_datasets_random(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ /* Specific datasets, not included */
+ benchmark_trie_dataset("trie-data-bgp-1", 1);
+ benchmark_trie_dataset("trie-data-bgp-10", 1);
+ benchmark_trie_dataset("trie-data-bgp-100", 1);
+ benchmark_trie_dataset("trie-data-bgp-1000", 1);
+
+ bt_bird_cleanup();
+
+ return 1;
+}
+
+
static int
t_trie_same(void)
{
bt_bird_init();
bt_config_parse(BT_CONFIG_SIMPLE);
- int round;
- for (round = 0; round < TESTS_NUM*4; round++)
+ int v6 = 0;
+ for (int round = 0; round < TESTS_NUM*4; round++)
{
- struct f_trie * trie1 = f_new_trie(config->mem, 0);
- struct f_trie * trie2 = f_new_trie(config->mem, 0);
+ list *prefixes = make_random_prefix_list(100 * PREFIXES_NUM, v6, 0);
+ struct f_trie *trie1 = f_new_trie(tmp_linpool, 0);
+ struct f_trie *trie2 = f_new_trie(tmp_linpool, 0);
- list prefixes; /* a list of f_extended_prefix structures */
- init_list(&prefixes);
- int i;
- for (i = 0; i < 100; i++)
- generate_random_ipv6_prefixes(&prefixes);
+ struct f_prefix_node *n;
+ WALK_LIST(n, *prefixes)
+ trie_add_prefix(trie1, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+
+ WALK_LIST_BACKWARDS(n, *prefixes)
+ trie_add_prefix(trie2, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+
+ bt_assert(trie_same(trie1, trie2));
+
+ v6 = !v6;
+ tmp_flush();
+ }
+
+ bt_bird_cleanup();
+ return 1;
+}
+
+static inline void
+log_networks(const net_addr *a, const net_addr *b)
+{
+ if (bt_verbose >= BT_VERBOSE_ABSOLUTELY_ALL)
+ {
+ char buf0[64];
+ char buf1[64];
+ bt_format_net(buf0, 64, a);
+ bt_format_net(buf1, 64, b);
+ bt_debug("Found %s expected %s\n", buf0, buf1);
+ }
+}
+
+static int
+t_trie_walk(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ for (int round = 0; round < TESTS_NUM*8; round++)
+ {
+ int level = round / TESTS_NUM;
+ int v6 = level % 2;
+ int num = PREFIXES_NUM * (int[]){1, 10, 100, 1000}[level / 2];
+ int pos = 0, end = 0;
+ list *prefixes = make_random_prefix_list(num, v6, 1);
+ struct f_trie *trie = make_trie_from_prefix_list(prefixes);
+ struct f_prefix *pxset = malloc((num + 1) * sizeof(struct f_prefix));
struct f_prefix_node *n;
- WALK_LIST(n, prefixes)
+ WALK_LIST(n, *prefixes)
+ pxset[pos++] = n->prefix;
+ memset(&pxset[pos], 0, sizeof (struct f_prefix));
+
+ qsort(pxset, num, sizeof(struct f_prefix), compare_prefixes);
+
+
+ /* Full walk */
+ bt_debug("Full walk (round %d, %d nets)\n", round, num);
+
+ pos = 0;
+ uint pxc = 0;
+ TRIE_WALK(trie, net, NULL)
{
- trie_add_prefix(trie1, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+ log_networks(&net, &pxset[pos].net);
+ bt_assert(net_equal(&net, &pxset[pos].net));
+
+ /* Skip possible duplicates */
+ while (net_equal(&pxset[pos].net, &pxset[pos + 1].net))
+ pos++;
+
+ pos++;
+ pxc++;
}
- WALK_LIST_BACKWARDS(n, prefixes)
+ TRIE_WALK_END;
+
+ bt_assert(pos == num);
+ bt_assert(pxc == trie->prefix_count);
+ bt_debug("Full walk done\n");
+
+
+ /* Prepare net for subnet walk - start with random prefix */
+ pos = bt_random() % num;
+ end = pos + (int[]){2, 2, 3, 4}[level / 2];
+ end = MIN(end, num);
+
+ struct f_prefix from = pxset[pos];
+
+ /* Find a common superprefix to several subsequent prefixes */
+ for (; pos < end; pos++)
{
- trie_add_prefix(trie2, &n->prefix.net, n->prefix.lo, n->prefix.hi);
+ if (net_equal(&from.net, &pxset[pos].net))
+ continue;
+
+ int common = !v6 ?
+ ip4_pxlen(net4_prefix(&from.net), net4_prefix(&pxset[pos].net)) :
+ ip6_pxlen(net6_prefix(&from.net), net6_prefix(&pxset[pos].net));
+ from.net.pxlen = MIN(from.net.pxlen, common);
+
+ if (!v6)
+ ((net_addr_ip4 *) &from.net)->prefix =
+ ip4_and(net4_prefix(&from.net), net4_prefix(&pxset[pos].net));
+ else
+ ((net_addr_ip6 *) &from.net)->prefix =
+ ip6_and(net6_prefix(&from.net), net6_prefix(&pxset[pos].net));
}
- bt_assert(trie_same(trie1, trie2));
+ /* Fix irrelevant bits */
+ if (!v6)
+ ((net_addr_ip4 *) &from.net)->prefix =
+ ip4_and(net4_prefix(&from.net), ip4_mkmask(net4_pxlen(&from.net)));
+ else
+ ((net_addr_ip6 *) &from.net)->prefix =
+ ip6_and(net6_prefix(&from.net), ip6_mkmask(net6_pxlen(&from.net)));
+
+
+ /* Find initial position for final prefix */
+ for (pos = 0; pos < num; pos++)
+ if (compare_prefixes(&pxset[pos], &from) >= 0)
+ break;
+
+ int p0 = pos;
+ char buf0[64];
+ bt_format_net(buf0, 64, &from.net);
+ bt_debug("Subnet walk for %s (round %d, %d nets)\n", buf0, round, num);
+
+ /* Subnet walk */
+ TRIE_WALK(trie, net, &from.net)
+ {
+ log_networks(&net, &pxset[pos].net);
+ bt_assert(net_equal(&net, &pxset[pos].net));
+ bt_assert(net_in_netX(&net, &from.net));
+
+ /* Skip possible duplicates */
+ while (net_equal(&pxset[pos].net, &pxset[pos + 1].net))
+ pos++;
- struct f_prefix_node *nxt;
- WALK_LIST_DELSAFE(n, nxt, prefixes)
+ pos++;
+ }
+ TRIE_WALK_END;
+
+ bt_assert((pos == num) || !net_in_netX(&pxset[pos].net, &from.net));
+ bt_debug("Subnet walk done for %s (found %d nets)\n", buf0, pos - p0);
+
+ tmp_flush();
+ }
+
+ bt_bird_cleanup();
+ return 1;
+}
+
+static int
+find_covering_nets(struct f_prefix *prefixes, int num, const net_addr *net, net_addr *found)
+{
+ struct f_prefix key;
+ net_addr *n = &key.net;
+ int found_num = 0;
+
+ net_copy(n, net);
+
+ while (1)
+ {
+ struct f_prefix *px =
+ bsearch(&key, prefixes, num, sizeof(struct f_prefix), compare_prefixes);
+
+ if (px)
+ {
+ net_copy(&found[found_num], n);
+ found_num++;
+ }
+
+ if (n->pxlen == 0)
+ return found_num;
+
+ n->pxlen--;
+
+ if (n->type == NET_IP4)
+ ip4_clrbit(&((net_addr_ip4 *) n)->prefix, n->pxlen);
+ else
+ ip6_clrbit(&((net_addr_ip6 *) n)->prefix, n->pxlen);
+ }
+}
+
+static int
+t_trie_walk_to_root(void)
+{
+ bt_bird_init();
+ bt_config_parse(BT_CONFIG_SIMPLE);
+
+ for (int round = 0; round < TESTS_NUM * 4; round++)
+ {
+ int level = round / TESTS_NUM;
+ int v6 = level % 2;
+ int num = PREFIXES_NUM * (int[]){32, 512}[level / 2];
+ int pos = 0;
+ int st = 0, sn = 0, sm = 0;
+
+ list *prefixes = make_random_prefix_list(num, v6, 1);
+ struct f_trie *trie = make_trie_from_prefix_list(prefixes);
+ struct f_prefix *pxset = malloc((num + 1) * sizeof(struct f_prefix));
+
+ struct f_prefix_node *pxn;
+ WALK_LIST(pxn, *prefixes)
+ pxset[pos++] = pxn->prefix;
+ memset(&pxset[pos], 0, sizeof (struct f_prefix));
+
+ qsort(pxset, num, sizeof(struct f_prefix), compare_prefixes);
+
+ int i;
+ for (i = 0; i < (PREFIX_TESTS_NUM / 10); i++)
{
- free(n);
+ net_addr from;
+ get_random_net(&from, v6);
+
+ net_addr found[129];
+ int found_num = find_covering_nets(pxset, num, &from, found);
+ int n = 0;
+
+ if (bt_verbose >= BT_VERBOSE_ABSOLUTELY_ALL)
+ {
+ char buf[64];
+ bt_format_net(buf, 64, &from);
+ bt_debug("Lookup for %s (expect %d)\n", buf, found_num);
+ }
+
+ /* Walk to root, separate for IPv4 and IPv6 */
+ if (!v6)
+ {
+ TRIE_WALK_TO_ROOT_IP4(trie, (net_addr_ip4 *) &from, net)
+ {
+ log_networks((net_addr *) &net, &found[n]);
+ bt_assert((n < found_num) && net_equal((net_addr *) &net, &found[n]));
+ n++;
+ }
+ TRIE_WALK_TO_ROOT_END;
+ }
+ else
+ {
+ TRIE_WALK_TO_ROOT_IP6(trie, (net_addr_ip6 *) &from, net)
+ {
+ log_networks((net_addr *) &net, &found[n]);
+ bt_assert((n < found_num) && net_equal((net_addr *) &net, &found[n]));
+ n++;
+ }
+ TRIE_WALK_TO_ROOT_END;
+ }
+
+ bt_assert(n == found_num);
+
+ /* Stats */
+ st += n;
+ sn += !!n;
+ sm = MAX(sm, n);
}
+
+ bt_debug("Success in %d / %d, sum %d, max %d\n", sn, i, st, sm);
+
+ tmp_flush();
}
+ bt_bird_cleanup();
return 1;
}
@@ -179,8 +885,15 @@ main(int argc, char *argv[])
{
bt_init(argc, argv);
- bt_test_suite(t_match_net, "Testing random prefix matching");
+ bt_test_suite(t_match_random_net, "Testing random prefix matching");
+ bt_test_suite(t_match_inner_net, "Testing random inner prefix matching");
+ bt_test_suite(t_match_outer_net, "Testing random outer prefix matching");
bt_test_suite(t_trie_same, "A trie filled forward should be same with a trie filled backward.");
+ bt_test_suite(t_trie_walk, "Testing TRIE_WALK() on random tries");
+ bt_test_suite(t_trie_walk_to_root, "Testing TRIE_WALK_TO_ROOT() on random tries");
+
+ // bt_test_suite(t_bench_trie_datasets_subset, "Benchmark tries from datasets by random subset of nets");
+ // bt_test_suite(t_bench_trie_datasets_random, "Benchmark tries from datasets by generated addresses");
return bt_exit_value();
}