[PATCH for review] [49/145] x86_64: Clean up and minor fixes to TLB flush

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



r

- Convert CR* accesses to dedicated inline functions and rewrite
the rest as C inlines
- Don't do a double flush for global flushes (pointed out by Zach Amsden)
This was a bug workaround for old CPUs that don't do 64bit and is obsolete.
- Add a proper memory clobber to invlpg
- Remove an unused extern

Signed-off-by: Andi Kleen <[email protected]>

---
 include/asm-x86_64/pgtable.h  |    2 -
 include/asm-x86_64/tlbflush.h |   66 +++++++++++++++++++++---------------------
 2 files changed, 33 insertions(+), 35 deletions(-)

Index: linux/include/asm-x86_64/pgtable.h
===================================================================
--- linux.orig/include/asm-x86_64/pgtable.h
+++ linux/include/asm-x86_64/pgtable.h
@@ -25,8 +25,6 @@ extern int nonx_setup(char *str);
 extern void paging_init(void);
 extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
 
-extern unsigned long pgkern_mask;
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
Index: linux/include/asm-x86_64/tlbflush.h
===================================================================
--- linux.orig/include/asm-x86_64/tlbflush.h
+++ linux/include/asm-x86_64/tlbflush.h
@@ -4,44 +4,44 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()							\
-	do {								\
-		unsigned long tmpreg;					\
-									\
-		__asm__ __volatile__(					\
-			"movq %%cr3, %0;  # flush TLB \n"		\
-			"movq %0, %%cr3;              \n"		\
-			: "=r" (tmpreg)					\
-			:: "memory");					\
-	} while (0)
+static inline unsigned long get_cr3(void)
+{
+	unsigned long cr3;
+	asm volatile("mov %%cr3,%0" : "=r" (cr3));
+	return cr3;
+}
 
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global()						\
-	do {								\
-		unsigned long tmpreg, cr4, cr4_orig;			\
-									\
-		__asm__ __volatile__(					\
-			"movq %%cr4, %2;  # turn off PGE     \n"	\
-			"movq %2, %1;                        \n"	\
-			"andq %3, %1;                        \n"	\
-			"movq %1, %%cr4;                     \n"	\
-			"movq %%cr3, %0;  # flush TLB        \n"	\
-			"movq %0, %%cr3;                     \n"	\
-			"movq %2, %%cr4;  # turn PGE back on \n"	\
-			: "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig)	\
-			: "i" (~X86_CR4_PGE)				\
-			: "memory");					\
-	} while (0)
+static inline void set_cr3(unsigned long cr3)
+{
+	asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
+}
+
+static inline void __flush_tlb(void)
+{
+	set_cr3(get_cr3());
+}
+
+static inline unsigned long get_cr4(void)
+{
+	unsigned long cr4;
+	asm volatile("mov %%cr4,%0" : "=r" (cr4));
+	return cr4;
+}
 
-extern unsigned long pgkern_mask;
+static inline void set_cr4(unsigned long cr4)
+{
+	asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
+}
 
-#define __flush_tlb_all() __flush_tlb_global()
+static inline void __flush_tlb_all(void)
+{
+	unsigned long cr4 = get_cr4();
+	set_cr4(cr4 & ~X86_CR4_PGE);	/* clear PGE */
+	set_cr4(cr4);			/* write old PGE again and flush TLBs */
+}
 
 #define __flush_tlb_one(addr) \
-	__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+	__asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
 
 
 /*
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux