This patch uses the mem_map pages to find the bigblock descriptor for
large allocations.
-- Steve
Signed-off-by: Steven Rostedt <[email protected]>
Index: linux-2.6.15-rc5-rt2/mm/slob.c
===================================================================
--- linux-2.6.15-rc5-rt2.orig/mm/slob.c 2005-12-19 10:45:55.000000000 -0500
+++ linux-2.6.15-rc5-rt2/mm/slob.c 2005-12-19 14:12:08.000000000 -0500
@@ -50,15 +50,42 @@
struct bigblock {
int order;
void *pages;
- struct bigblock *next;
};
typedef struct bigblock bigblock_t;
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
-static bigblock_t *bigblocks;
static DEFINE_SPINLOCK(slob_lock);
-static DEFINE_SPINLOCK(block_lock);
+
+#define __get_slob_block(b) ((unsigned long)(b) & ~(PAGE_SIZE-1))
+
+static inline struct page *get_slob_page(const void *mem)
+{
+ void *virt = (void*)__get_slob_block(mem);
+
+ return virt_to_page(virt);
+}
+
+static inline void zero_slob_block(const void *b)
+{
+ struct page *page;
+ page = get_slob_page(b);
+ memset(&page->lru, 0, sizeof(page->lru));
+}
+
+static inline void *get_slob_block(const void *b)
+{
+ struct page *page;
+ page = get_slob_page(b);
+ return page->lru.next;
+}
+
+static inline void set_slob_block(const void *b, void *data)
+{
+ struct page *page;
+ page = get_slob_page(b);
+ page->lru.next = data;
+}
static void slob_free(void *b, int size);
@@ -108,6 +135,7 @@
if (!cur)
return 0;
+ zero_slob_block(cur);
slob_free(cur, PAGE_SIZE);
spin_lock_irqsave(&slob_lock, flags);
cur = slobfree;
@@ -162,7 +190,6 @@
{
slob_t *m;
bigblock_t *bb;
- unsigned long flags;
if (size < PAGE_SIZE - SLOB_UNIT) {
m = slob_alloc(size + SLOB_UNIT, gfp, 0);
@@ -177,10 +204,7 @@
bb->pages = (void *)__get_free_pages(gfp, bb->order);
if (bb->pages) {
- spin_lock_irqsave(&block_lock, flags);
- bb->next = bigblocks;
- bigblocks = bb;
- spin_unlock_irqrestore(&block_lock, flags);
+ set_slob_block(bb->pages, bb);
return bb->pages;
}
@@ -192,25 +216,16 @@
void kfree(const void *block)
{
- bigblock_t *bb, **last = &bigblocks;
- unsigned long flags;
+ bigblock_t *bb;
if (!block)
return;
- if (!((unsigned long)block & (PAGE_SIZE-1))) {
- /* might be on the big block list */
- spin_lock_irqsave(&block_lock, flags);
- for (bb = bigblocks; bb; last = &bb->next, bb = bb->next) {
- if (bb->pages == block) {
- *last = bb->next;
- spin_unlock_irqrestore(&block_lock, flags);
- free_pages((unsigned long)block, bb->order);
- slob_free(bb, sizeof(bigblock_t));
- return;
- }
- }
- spin_unlock_irqrestore(&block_lock, flags);
+ bb = get_slob_block(block);
+ if (bb) {
+ free_pages((unsigned long)block, bb->order);
+ slob_free(bb, sizeof(bigblock_t));
+ return;
}
slob_free((slob_t *)block - 1, 0);
@@ -222,20 +237,13 @@
unsigned int ksize(const void *block)
{
bigblock_t *bb;
- unsigned long flags;
if (!block)
return 0;
- if (!((unsigned long)block & (PAGE_SIZE-1))) {
- spin_lock_irqsave(&block_lock, flags);
- for (bb = bigblocks; bb; bb = bb->next)
- if (bb->pages == block) {
- spin_unlock_irqrestore(&slob_lock, flags);
- return PAGE_SIZE << bb->order;
- }
- spin_unlock_irqrestore(&block_lock, flags);
- }
+ bb = get_slob_block(block);
+ if (bb)
+ return PAGE_SIZE << bb->order;
return ((slob_t *)block - 1)->units * SLOB_UNIT;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]