diff options
-rw-r--r-- | lib/mempool.c | 66 | ||||
-rw-r--r-- | lib/resource.h | 8 | ||||
-rw-r--r-- | nest/rt-table.c | 2 | ||||
-rw-r--r-- | proto/bgp/attrs.c | 7 | ||||
-rw-r--r-- | proto/bgp/packets.c | 14 | ||||
-rw-r--r-- | proto/mrt/mrt.c | 4 | ||||
-rw-r--r-- | proto/ospf/ospf.c | 2 | ||||
-rw-r--r-- | proto/static/static.c | 2 |
8 files changed, 45 insertions, 60 deletions
diff --git a/lib/mempool.c b/lib/mempool.c index c75f1f5b..325b1ecf 100644 --- a/lib/mempool.c +++ b/lib/mempool.c @@ -27,19 +27,18 @@ struct lp_chunk { struct lp_chunk *next; - uint size; uintptr_t data_align[0]; byte data[0]; }; -const int lp_chunk_size = sizeof(struct lp_chunk); +#define LP_DATA_SIZE (page_size - OFFSETOF(struct lp_chunk, data)) struct linpool { resource r; byte *ptr, *end; struct lp_chunk *first, *current; /* Normal (reusable) chunks */ struct lp_chunk *first_large; /* Large chunks */ - uint chunk_size, threshold, total:31, use_pages:1, total_large; + uint total, total_large; }; _Thread_local linpool *tmp_linpool; @@ -61,25 +60,14 @@ static struct resclass lp_class = { /** * lp_new - create a new linear memory pool * @p: pool - * @blk: block size * * lp_new() creates a new linear memory pool resource inside the pool @p. - * The linear pool consists of a list of memory chunks of size at least - * @blk. + * The linear pool consists of a list of memory chunks of page size. */ linpool -*lp_new(pool *p, uint blk) +*lp_new(pool *p) { - linpool *m = ralloc(p, &lp_class); - if (!blk) - { - m->use_pages = 1; - blk = page_size - lp_chunk_size; - } - - m->chunk_size = blk; - m->threshold = 3*blk/4; - return m; + return ralloc(p, &lp_class); } /** @@ -110,14 +98,13 @@ lp_alloc(linpool *m, uint size) else { struct lp_chunk *c; - if (size >= m->threshold) + if (size > LP_DATA_SIZE) { /* Too large => allocate large chunk */ c = xmalloc(sizeof(struct lp_chunk) + size); m->total_large += size; c->next = m->first_large; m->first_large = c; - c->size = size; } else { @@ -129,14 +116,10 @@ lp_alloc(linpool *m, uint size) else { /* Need to allocate a new chunk */ - if (m->use_pages) - c = alloc_page(); - else - c = xmalloc(sizeof(struct lp_chunk) + m->chunk_size); + c = alloc_page(); - m->total += m->chunk_size; + m->total += LP_DATA_SIZE; c->next = NULL; - c->size = m->chunk_size; if (m->current) m->current->next = c; @@ -145,7 +128,7 @@ lp_alloc(linpool *m, uint size) } m->current = c; m->ptr = c->data + size; - m->end = c->data + m->chunk_size; + m->end = c->data + LP_DATA_SIZE; } return c->data; } @@ -207,7 +190,7 @@ lp_flush(linpool *m) /* Move ptr to the first chunk and free all large chunks */ m->current = c = m->first; m->ptr = c ? c->data : NULL; - m->end = c ? c->data + m->chunk_size : NULL; + m->end = c ? c->data + LP_DATA_SIZE : NULL; while (c = m->first_large) { @@ -230,6 +213,7 @@ lp_save(linpool *m, lp_state *p) { p->current = m->current; p->large = m->first_large; + p->total_large = m->total_large; p->ptr = m->ptr; } @@ -251,12 +235,12 @@ lp_restore(linpool *m, lp_state *p) /* Move ptr to the saved pos and free all newer large chunks */ m->current = c = p->current; m->ptr = p->ptr; - m->end = c ? c->data + m->chunk_size : NULL; + m->end = c ? c->data + LP_DATA_SIZE : NULL; + m->total_large = p->total_large; while ((c = m->first_large) && (c != p->large)) { m->first_large = c->next; - m->total_large -= c->size; xfree(c); } } @@ -270,10 +254,7 @@ lp_free(resource *r) for(d=m->first; d; d = c) { c = d->next; - if (m->use_pages) - free_page(d); - else - xfree(d); + free_page(d); } for(d=m->first_large; d; d = c) { @@ -293,9 +274,7 @@ lp_dump(resource *r) ; for(cntl=0, c=m->first_large; c; c=c->next, cntl++) ; - debug("(chunk=%d threshold=%d count=%d+%d total=%d+%d)\n", - m->chunk_size, - m->threshold, + debug("(count=%d+%d total=%d+%d)\n", cnt, cntl, m->total, @@ -308,20 +287,18 @@ lp_memsize(resource *r) linpool *m = (linpool *) r; struct resmem sz = { .overhead = sizeof(struct linpool) + ALLOC_OVERHEAD, + .effective = m->total_large, }; for (struct lp_chunk *c = m->first_large; c; c = c->next) - { - sz.effective += c->size; - sz.overhead += lp_chunk_size + ALLOC_OVERHEAD; - } + sz.overhead += sizeof(struct lp_chunk) + ALLOC_OVERHEAD; uint regular = 0; for (struct lp_chunk *c = m->first; c; c = c->next) regular++; - sz.effective += m->chunk_size * regular; - sz.overhead += (lp_chunk_size + ALLOC_OVERHEAD) * regular; + sz.effective += LP_DATA_SIZE * regular; + sz.overhead += (sizeof(struct lp_chunk) + ALLOC_OVERHEAD) * regular; return sz; } @@ -334,10 +311,7 @@ lp_lookup(resource *r, unsigned long a) struct lp_chunk *c; for(c=m->first; c; c=c->next) - if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a) - return r; - for(c=m->first_large; c; c=c->next) - if ((unsigned long) c->data <= a && (unsigned long) c->data + c->size > a) + if ((unsigned long) c->data <= a && (unsigned long) c->data + LP_DATA_SIZE > a) return r; return NULL; } diff --git a/lib/resource.h b/lib/resource.h index 8b180603..a4e110a5 100644 --- a/lib/resource.h +++ b/lib/resource.h @@ -69,9 +69,10 @@ typedef struct linpool linpool; typedef struct lp_state { void *current, *large; byte *ptr; + uint total_large; } lp_state; -linpool *lp_new(pool *, unsigned blk); +linpool *lp_new(pool *); void *lp_alloc(linpool *, unsigned size); /* Aligned */ void *lp_allocu(linpool *, unsigned size); /* Unaligned */ void *lp_allocz(linpool *, unsigned size); /* With clear */ @@ -88,10 +89,7 @@ extern _Thread_local linpool *tmp_linpool; /* Temporary linpool autoflushed regu #define tmp_init(p) tmp_linpool = lp_new_default(p) #define tmp_flush() lp_flush(tmp_linpool) -extern const int lp_chunk_size; -#define LP_GAS 1024 -#define LP_GOOD_SIZE(x) (((x + LP_GAS - 1) & (~(LP_GAS - 1))) - lp_chunk_size) -#define lp_new_default(p) lp_new(p, 0) +#define lp_new_default lp_new /* Slabs */ diff --git a/nest/rt-table.c b/nest/rt-table.c index b1ea1d98..30208f3f 100644 --- a/nest/rt-table.c +++ b/nest/rt-table.c @@ -3488,7 +3488,7 @@ rt_init_hostcache(rtable *tab) hc_alloc_table(hc, tab->rp, HC_DEF_ORDER); hc->slab = sl_new(tab->rp, sizeof(struct hostentry)); - hc->lp = lp_new(tab->rp, LP_GOOD_SIZE(1024)); + hc->lp = lp_new(tab->rp); hc->trie = f_new_trie(hc->lp, 0); tab->hostcache = hc; diff --git a/proto/bgp/attrs.c b/proto/bgp/attrs.c index f4f7d15a..9f31c28b 100644 --- a/proto/bgp/attrs.c +++ b/proto/bgp/attrs.c @@ -2074,14 +2074,17 @@ bgp_rte_mergable(rte *pri, rte *sec) u32 p, s; /* Skip suppressed routes (see bgp_rte_recalculate()) */ - /* LLGR draft - depreference stale routes */ - if (pri->pflags != sec->pflags) + if ((pri->pflags ^ sec->pflags) & BGP_REF_SUPPRESSED) return 0; /* RFC 4271 9.1.2.1. Route resolvability test */ if (rta_resolvable(pri->attrs) != rta_resolvable(sec->attrs)) return 0; + /* LLGR draft - depreference stale routes */ + if (rte_stale(pri) != rte_stale(sec)) + return 0; + /* Start with local preferences */ x = ea_find(pri->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_LOCAL_PREF)); y = ea_find(sec->attrs->eattrs, EA_CODE(PROTOCOL_BGP, BA_LOCAL_PREF)); diff --git a/proto/bgp/packets.c b/proto/bgp/packets.c index c88165bc..2ebe2581 100644 --- a/proto/bgp/packets.c +++ b/proto/bgp/packets.c @@ -2324,6 +2324,9 @@ bgp_create_update(struct bgp_channel *c, byte *buf) again: ; + struct lp_state tmpp; + lp_save(tmp_linpool, &tmpp); + /* Initialize write state */ struct bgp_write_state s = { .proto = p, @@ -2354,6 +2357,7 @@ again: ; if (EMPTY_LIST(buck->prefixes)) { bgp_free_bucket(c, buck); + lp_restore(tmp_linpool, &tmpp); goto again; } @@ -2367,7 +2371,10 @@ again: ; bgp_defer_bucket(c, buck); if (!res) + { + lp_restore(tmp_linpool, &tmpp); goto again; + } goto done; } @@ -2378,7 +2385,7 @@ again: ; done: BGP_TRACE_RL(&rl_snd_update, D_PACKETS, "Sending UPDATE"); p->stats.tx_updates++; - lp_flush(s.pool); + lp_restore(tmp_linpool, &tmpp); return res; } @@ -2507,6 +2514,9 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len) bgp_start_timer(conn->hold_timer, conn->hold_time); + struct lp_state tmpp; + lp_save(tmp_linpool, &tmpp); + /* Initialize parse state */ struct bgp_parse_state s = { .proto = p, @@ -2588,7 +2598,7 @@ bgp_rx_update(struct bgp_conn *conn, byte *pkt, uint len) done: rta_free(s.cached_rta); - lp_flush(s.pool); + lp_restore(tmp_linpool, &tmpp); return; } diff --git a/proto/mrt/mrt.c b/proto/mrt/mrt.c index 589e43fb..58b8b671 100644 --- a/proto/mrt/mrt.c +++ b/proto/mrt/mrt.c @@ -557,8 +557,8 @@ mrt_table_dump_init(pool *pp) struct mrt_table_dump_state *s = mb_allocz(pool, sizeof(struct mrt_table_dump_state)); s->pool = pool; - s->linpool = lp_new(pool, 4080); - s->peer_lp = lp_new(pool, 4080); + s->linpool = lp_new(pool); + s->peer_lp = lp_new(pool); mrt_buffer_init(&s->buf, pool, 2 * MRT_ATTR_BUFFER_SIZE); /* We lock the current config as we may reference it indirectly by filter */ diff --git a/proto/ospf/ospf.c b/proto/ospf/ospf.c index d651d48d..ab77de02 100644 --- a/proto/ospf/ospf.c +++ b/proto/ospf/ospf.c @@ -299,7 +299,7 @@ ospf_start(struct proto *P) p->lsab_size = 256; p->lsab_used = 0; p->lsab = mb_alloc(P->pool, p->lsab_size); - p->nhpool = lp_new(P->pool, 12*sizeof(struct nexthop)); + p->nhpool = lp_new(P->pool); init_list(&(p->iface_list)); init_list(&(p->area_list)); fib_init(&p->rtf, P->pool, ospf_get_af(p), sizeof(ort), OFFSETOF(ort, fn), 0, NULL); diff --git a/proto/static/static.c b/proto/static/static.c index 8e0a3489..d027a8e6 100644 --- a/proto/static/static.c +++ b/proto/static/static.c @@ -475,7 +475,7 @@ static_start(struct proto *P) struct static_route *r; if (!static_lp) - static_lp = lp_new(&root_pool, LP_GOOD_SIZE(1024)); + static_lp = lp_new(&root_pool); if (p->igp_table_ip4) rt_lock_table(p->igp_table_ip4); |