On some architectures, mapping the scatterlist may coalesce entries:
if that coalesced list is then used for freeing the pages afterwards,
there's a danger that pages may be doubly freed (and others leaked).
Fix Infiniband's __ib_umem_release by freeing from a separate array
beyond the scatterlist: IB_UMEM_MAX_PAGE_CHUNK lowered to fit one page.
Signed-off-by: Hugh Dickins <[email protected]>
---
Warning: untested! And please double-check the adjusted definition of
IB_UMEM_MAX_PAGE_CHUNK - the old definition was avoiding "sizeof"s, but
I don't understand why.
drivers/infiniband/core/uverbs_mem.c | 22 ++++++++++++++++------
include/rdma/ib_verbs.h | 3 +--
2 files changed, 17 insertions(+), 8 deletions(-)
--- 2.6.16-rc2/drivers/infiniband/core/uverbs_mem.c 2005-10-28 01:02:08.000000000 +0100
+++ linux/drivers/infiniband/core/uverbs_mem.c 2006-02-03 09:59:37.000000000 +0000
@@ -49,15 +49,18 @@ struct ib_umem_account_work {
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
struct ib_umem_chunk *chunk, *tmp;
+ struct page **sg_pages;
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
dma_unmap_sg(dev->dma_device, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
+ /* Scatterlist may have been coalesced: free saved pagelist */
+ sg_pages = (struct page **) (chunk->page_list + chunk->nents);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
- set_page_dirty_lock(chunk->page_list[i].page);
- put_page(chunk->page_list[i].page);
+ set_page_dirty_lock(sg_pages[i]);
+ put_page(sg_pages[i]);
}
kfree(chunk);
@@ -69,11 +72,13 @@ int ib_umem_get(struct ib_device *dev, s
{
struct page **page_list;
struct ib_umem_chunk *chunk;
+ struct page **sg_pages;
unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
int ret = 0;
+ int nents;
int off;
int i;
@@ -121,16 +126,21 @@ int ib_umem_get(struct ib_device *dev, s
off = 0;
while (ret) {
- chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
- min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
+ nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ chunk = kmalloc(sizeof *chunk +
+ sizeof (struct scatterlist) * nents +
+ sizeof (struct page *) * nents,
GFP_KERNEL);
if (!chunk) {
ret = -ENOMEM;
goto out;
}
+ /* Save pages to be freed in array beyond scatterlist */
+ sg_pages = (struct page **) (chunk->page_list + nents);
- chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+ chunk->nents = nents;
for (i = 0; i < chunk->nents; ++i) {
+ sg_pages[i] =
chunk->page_list[i].page = page_list[i + off];
chunk->page_list[i].offset = 0;
chunk->page_list[i].length = PAGE_SIZE;
@@ -142,7 +152,7 @@ int ib_umem_get(struct ib_device *dev, s
DMA_BIDIRECTIONAL);
if (chunk->nmap <= 0) {
for (i = 0; i < chunk->nents; ++i)
- put_page(chunk->page_list[i].page);
+ put_page(sg_pages[i]);
kfree(chunk);
ret = -ENOMEM;
--- 2.6.16-rc2/include/rdma/ib_verbs.h 2006-02-03 09:32:50.000000000 +0000
+++ linux/include/rdma/ib_verbs.h 2006-02-03 09:59:37.000000000 +0000
@@ -696,8 +696,7 @@ struct ib_udata {
#define IB_UMEM_MAX_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
- ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
- (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
+ (sizeof(struct scatterlist) + sizeof(struct page *)))
struct ib_umem_object {
struct ib_uobject uobject;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]