diff options
author | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-07-04 05:52:55 +0200 |
---|---|---|
committer | Jason A. Donenfeld <Jason@zx2c4.com> | 2017-07-04 16:48:22 +0200 |
commit | aeabe1f276cba70a2ba305eee7866b959ea5c662 (patch) | |
tree | 1ba9dcc3cc5d6c0d93e54816927e03724b204367 /src | |
parent | 0d154c26135b65b81dc33f714e0bac05d4924fb6 (diff) |
ratelimiter: use kvzalloc for hash table allocation
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/compat/compat.h | 35 | ||||
-rw-r--r-- | src/ratelimiter.c | 15 |
2 files changed, 41 insertions, 9 deletions
diff --git a/src/compat/compat.h b/src/compat/compat.h index 1da51f1..38803b2 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -300,6 +300,41 @@ static inline u64 ktime_get_ns(void) #define inet_confirm_addr(a,b,c,d,e) inet_confirm_addr(b,c,d,e) #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0) +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/slab.h> +static inline void *kvmalloc(size_t size, gfp_t flags) +{ + gfp_t kmalloc_flags = flags; + void *ret; + if (size > PAGE_SIZE) { + kmalloc_flags |= __GFP_NOWARN; + if (!(kmalloc_flags & __GFP_REPEAT) || (size <= PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + kmalloc_flags |= __GFP_NORETRY; + } + ret = kmalloc(size, kmalloc_flags); + if (ret || size <= PAGE_SIZE) + return ret; + return __vmalloc(size, flags, PAGE_KERNEL); +} +static inline void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | __GFP_ZERO); +} +#endif + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 41)) && !defined(ISUBUNTU1404) +#include <linux/vmalloc.h> +static inline void kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} +#endif + /* https://lkml.org/lkml/2017/6/23/790 */ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/ip.h> diff --git a/src/ratelimiter.c b/src/ratelimiter.c index b3fdd4c..ebad1f4 100644 --- a/src/ratelimiter.c +++ b/src/ratelimiter.c @@ -2,9 +2,8 @@ #include "ratelimiter.h" #include <linux/siphash.h> -#include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/slab.h> -#include <linux/hashtable.h> #include <net/ip.h> static struct kmem_cache *entry_cache; @@ -154,18 +153,16 @@ int ratelimiter_init(void) table_size = (totalram_pages > (1 << 30) / PAGE_SIZE) ? 8192 : max_t(unsigned long, 16, roundup_pow_of_two((totalram_pages << PAGE_SHIFT) / (1 << 14) / sizeof(struct hlist_head))); max_entries = table_size * 8; - table_v4 = vmalloc(table_size * sizeof(struct hlist_head)); + table_v4 = kvzalloc(table_size * sizeof(struct hlist_head), GFP_KERNEL); if (!table_v4) goto err_kmemcache; - __hash_init(table_v4, table_size); #if IS_ENABLED(CONFIG_IPV6) - table_v6 = vmalloc(table_size * sizeof(struct hlist_head)); + table_v6 = kvzalloc(table_size * sizeof(struct hlist_head), GFP_KERNEL); if (!table_v6) { - vfree(table_v4); + kvfree(table_v4); goto err_kmemcache; } - __hash_init(table_v6, table_size); #endif queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); @@ -187,9 +184,9 @@ void ratelimiter_uninit(void) cancel_delayed_work_sync(&gc_work); gc_entries(NULL); synchronize_rcu(); - vfree(table_v4); + kvfree(table_v4); #if IS_ENABLED(CONFIG_IPV6) - vfree(table_v6); + kvfree(table_v6); #endif kmem_cache_destroy(entry_cache); } |