summaryrefslogtreecommitdiffhomepage
path: root/src/hashtables.c
diff options
context:
space:
mode:
authorSultan Alsawaf <sultan@kerneltoast.com>2019-02-02 14:13:03 -0800
committerJason A. Donenfeld <Jason@zx2c4.com>2019-02-03 18:27:33 +0100
commit75f476905a1bcd013c8bee07282fd7fb1dca738e (patch)
tree987fcd2a85f2afe63f0da292826646192bfb8eec /src/hashtables.c
parentd063dfb80911854ed273f6374b02f339a86462ba (diff)
hashtables: decouple hashtable allocations from the main device allocation
The hashtable allocations are quite large, and cause the device allocation in the net framework to stall sometimes while it tries to find a contiguous region that can fit the device struct: [<0000000000000000>] __switch_to+0x94/0xb8 [<0000000000000000>] __alloc_pages_nodemask+0x764/0x7e8 [<0000000000000000>] kmalloc_order+0x20/0x40 [<0000000000000000>] __kmalloc+0x144/0x1a0 [<0000000000000000>] alloc_netdev_mqs+0x5c/0x368 [<0000000000000000>] rtnl_create_link+0x48/0x180 [<0000000000000000>] rtnl_newlink+0x410/0x708 [<0000000000000000>] rtnetlink_rcv_msg+0x190/0x1f8 [<0000000000000000>] netlink_rcv_skb+0x4c/0xf8 [<0000000000000000>] rtnetlink_rcv+0x30/0x40 [<0000000000000000>] netlink_unicast+0x18c/0x208 [<0000000000000000>] netlink_sendmsg+0x19c/0x348 [<0000000000000000>] sock_sendmsg+0x3c/0x58 [<0000000000000000>] ___sys_sendmsg+0x290/0x2b0 [<0000000000000000>] __sys_sendmsg+0x58/0xa0 [<0000000000000000>] SyS_sendmsg+0x10/0x20 [<0000000000000000>] el0_svc_naked+0x34/0x38 [<0000000000000000>] 0xffffffffffffffff To fix the allocation stalls, decouple the hashtable allocations from the device allocation and allocate the hashtables with kvmalloc's implicit __GFP_NORETRY so that the allocations fall back to vmalloc with little resistance. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src/hashtables.c')
-rw-r--r--src/hashtables.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/src/hashtables.c b/src/hashtables.c
index 18cac91..8aedc17 100644
--- a/src/hashtables.c
+++ b/src/hashtables.c
@@ -19,11 +19,17 @@ static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
}
-void wg_pubkey_hashtable_init(struct pubkey_hashtable *table)
+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
{
+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
+ return table;
}
void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
@@ -74,10 +80,16 @@ static struct hlist_head *index_bucket(struct index_hashtable *table,
(HASH_SIZE(table->hashtable) - 1)];
}
-void wg_index_hashtable_init(struct index_hashtable *table)
+struct index_hashtable *wg_index_hashtable_alloc(void)
{
+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
+
+ if (!table)
+ return NULL;
+
hash_init(table->hashtable);
spin_lock_init(&table->lock);
+ return table;
}
/* At the moment, we limit ourselves to 2^20 total peers, which generally might