On some architectures, mapping the scatterlist may coalesce entries:
if that coalesced list is then used for freeing the pages afterwards,
there's a danger that pages may be doubly freed (and others leaked).
Fix Infiniband's __ib_umem_release by freeing from a separate array
beyond the scatterlist: IB_UMEM_MAX_PAGE_CHUNK lowered to fit one page.
Signed-off-by: Hugh Dickins <[email protected]>
Signed-off-by: Roland Dreier <[email protected]>
---
drivers/infiniband/core/uverbs_mem.c | 22 ++++++++++++++++------
include/rdma/ib_verbs.h | 3 +--
2 files changed, 17 insertions(+), 8 deletions(-)
46fc99a4a1429f843e3b6df8ed1f571944bef4e2
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index 36a32c3..87a363e 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -49,15 +49,18 @@ struct ib_umem_account_work {
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
struct ib_umem_chunk *chunk, *tmp;
+ struct page **sg_pages;
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
dma_unmap_sg(dev->dma_device, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
+ /* Scatterlist may have been coalesced: free saved pagelist */
+ sg_pages = (struct page **) (chunk->page_list + chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
- set_page_dirty_lock(chunk->page_list[i].page);
- put_page(chunk->page_list[i].page);
+ set_page_dirty_lock(sg_pages[i]);
+ put_page(sg_pages[i]);
}
kfree(chunk);
@@ -69,11 +72,13 @@ int ib_umem_get(struct ib_device *dev, s
{
struct page **page_list;
struct ib_umem_chunk *chunk;
+ struct page **sg_pages;
unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
int ret = 0;
+ int nents;
int off;
int i;
@@ -121,16 +126,21 @@ int ib_umem_get(struct ib_device *dev, s
off = 0;
while (ret) {
- chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
- min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
+ nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ chunk = kmalloc(sizeof *chunk +
+ sizeof (struct scatterlist) * nents +
+ sizeof (struct page *) * nents,
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
goto out;
}
+ /* Save pages to be freed in array beyond scatterlist */
+ sg_pages = (struct page **) (chunk->page_list + nents);
- chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ chunk->nents = nents;
for (i = 0; i < chunk->nents; ++i) {
+ sg_pages[i] = page_list[i + off];
chunk->page_list[i].page = page_list[i + off];
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
@@ -142,7 +152,7 @@ int ib_umem_get(struct ib_device *dev, s
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
- put_page(chunk->page_list[i].page);
+ put_page(sg_pages[i]);
kfree(chunk);
ret = -ENOMEM;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 22fc886..239c11d 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -696,8 +696,7 @@ struct ib_udata {
#define IB_UMEM_MAX_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
- ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
- (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
+ (sizeof (struct scatterlist) + sizeof (struct page *)))
struct ib_umem_object {
struct ib_uobject uobject;
--
1.1.3
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]