Skip to content

Commit bdfedb7

Browse files
rientjestorvalds
authored andcommitted
mm, mempool: poison elements backed by slab allocator
Mempools keep elements in a reserved pool for contexts in which allocation may not be possible. When an element is allocated from the reserved pool, its memory contents is the same as when it was added to the reserved pool. Because of this, elements lack any free poisoning to detect use-after-free errors. This patch adds free poisoning for elements backed by the slab allocator. This is possible because the mempool layer knows the object size of each element. When an element is added to the reserved pool, it is poisoned with POISON_FREE. When it is removed from the reserved pool, the contents are checked for POISON_FREE. If there is a mismatch, a warning is emitted to the kernel log. This is only effective for configs with CONFIG_DEBUG_SLAB or CONFIG_SLUB_DEBUG_ON. [fabio.estevam@freescale.com: use '%zu' for printing 'size_t' variable] [arnd@arndb.de: add missing include] Signed-off-by: David Rientjes <rientjes@google.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Mikulas Patocka <mpatocka@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Fabio Estevam <fabio.estevam@freescale.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent e244c9e commit bdfedb7

File tree

1 file changed

+90
-2
lines changed

1 file changed

+90
-2
lines changed

mm/mempool.c

Lines changed: 90 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,27 +6,115 @@
66
* extreme VM load.
77
*
88
* started by Ingo Molnar, Copyright (C) 2001
9+
* debugging by David Rientjes, Copyright (C) 2015
910
*/
1011

1112
#include <linux/mm.h>
1213
#include <linux/slab.h>
14+
#include <linux/highmem.h>
1315
#include <linux/kmemleak.h>
1416
#include <linux/export.h>
1517
#include <linux/mempool.h>
1618
#include <linux/blkdev.h>
1719
#include <linux/writeback.h>
1820
#include "slab.h"
1921

22+
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
23+
static void poison_error(mempool_t *pool, void *element, size_t size,
24+
size_t byte)
25+
{
26+
const int nr = pool->curr_nr;
27+
const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
28+
const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
29+
int i;
30+
31+
pr_err("BUG: mempool element poison mismatch\n");
32+
pr_err("Mempool %p size %zu\n", pool, size);
33+
pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
34+
for (i = start; i < end; i++)
35+
pr_cont("%x ", *(u8 *)(element + i));
36+
pr_cont("%s\n", end < size ? "..." : "");
37+
dump_stack();
38+
}
39+
40+
static void __check_element(mempool_t *pool, void *element, size_t size)
41+
{
42+
u8 *obj = element;
43+
size_t i;
44+
45+
for (i = 0; i < size; i++) {
46+
u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
47+
48+
if (obj[i] != exp) {
49+
poison_error(pool, element, size, i);
50+
return;
51+
}
52+
}
53+
memset(obj, POISON_INUSE, size);
54+
}
55+
56+
static void check_element(mempool_t *pool, void *element)
57+
{
58+
/* Mempools backed by slab allocator */
59+
if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
60+
__check_element(pool, element, ksize(element));
61+
62+
/* Mempools backed by page allocator */
63+
if (pool->free == mempool_free_pages) {
64+
int order = (int)(long)pool->pool_data;
65+
void *addr = kmap_atomic((struct page *)element);
66+
67+
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
68+
kunmap_atomic(addr);
69+
}
70+
}
71+
72+
static void __poison_element(void *element, size_t size)
73+
{
74+
u8 *obj = element;
75+
76+
memset(obj, POISON_FREE, size - 1);
77+
obj[size - 1] = POISON_END;
78+
}
79+
80+
static void poison_element(mempool_t *pool, void *element)
81+
{
82+
/* Mempools backed by slab allocator */
83+
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
84+
__poison_element(element, ksize(element));
85+
86+
/* Mempools backed by page allocator */
87+
if (pool->alloc == mempool_alloc_pages) {
88+
int order = (int)(long)pool->pool_data;
89+
void *addr = kmap_atomic((struct page *)element);
90+
91+
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
92+
kunmap_atomic(addr);
93+
}
94+
}
95+
#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
96+
static inline void check_element(mempool_t *pool, void *element)
97+
{
98+
}
99+
static inline void poison_element(mempool_t *pool, void *element)
100+
{
101+
}
102+
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
103+
20104
static void add_element(mempool_t *pool, void *element)
21105
{
22106
BUG_ON(pool->curr_nr >= pool->min_nr);
107+
poison_element(pool, element);
23108
pool->elements[pool->curr_nr++] = element;
24109
}
25110

26111
static void *remove_element(mempool_t *pool)
27112
{
28-
BUG_ON(pool->curr_nr <= 0);
29-
return pool->elements[--pool->curr_nr];
113+
void *element = pool->elements[--pool->curr_nr];
114+
115+
BUG_ON(pool->curr_nr < 0);
116+
check_element(pool, element);
117+
return element;
30118
}
31119

32120
/**

0 commit comments

Comments
 (0)