i386 Transparent Paravirtualization Patch #4
This change encapsulates TLB flush accessors into the sub-architecture layer.
Diffs against: linux-2.6.13-rc4-mm1
Signed-off-by: Zachary Amsden <[email protected]>
Index: linux-2.6.13/include/asm-i386/tlbflush.h
===================================================================
--- linux-2.6.13.orig/include/asm-i386/tlbflush.h 2005-08-03 16:24:11.000000000 -0700
+++ linux-2.6.13/include/asm-i386/tlbflush.h 2005-08-03 16:29:58.000000000 -0700
@@ -4,38 +4,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
-
-#define __flush_tlb() \
- do { \
- unsigned int tmpreg; \
- \
- __asm__ __volatile__( \
- "movl %%cr3, %0; \n" \
- "movl %0, %%cr3; # flush TLB \n" \
- : "=r" (tmpreg) \
- :: "memory"); \
- } while (0)
-
-/*
- * Global pages have to be flushed a bit differently. Not a real
- * performance problem because this does not happen often.
- */
-#define __flush_tlb_global() \
- do { \
- unsigned int tmpreg, cr4, cr4_orig; \
- \
- __asm__ __volatile__( \
- "movl %%cr4, %2; # turn off PGE \n" \
- "movl %2, %1; \n" \
- "andl %3, %1; \n" \
- "movl %1, %%cr4; \n" \
- "movl %%cr3, %0; \n" \
- "movl %0, %%cr3; # flush TLB \n" \
- "movl %2, %%cr4; # turn PGE back on \n" \
- : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
- : "i" (~X86_CR4_PGE) \
- : "memory"); \
- } while (0)
+#include <mach_tlbflush.h>
extern unsigned long pgkern_mask;
@@ -49,9 +18,6 @@
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-#define __flush_tlb_single(addr) \
- __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
-
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
Index: linux-2.6.13/include/asm-i386/mach-default/mach_tlbflush.h
===================================================================
--- linux-2.6.13.orig/include/asm-i386/mach-default/mach_tlbflush.h 2005-08-03 16:29:58.000000000 -0700
+++ linux-2.6.13/include/asm-i386/mach-default/mach_tlbflush.h 2005-08-03 16:31:31.000000000 -0700
@@ -0,0 +1,47 @@
+/*
+ * include/asm-i386/mach-default/mach_tlbflush.h
+ *
+ * Standard TLB accessors for running on real hardware
+ * Moved from include/asm-i386/tlbflush.h 07/05
+ *
+ */
+
+#ifndef _MACH_TLBFLUSH_H
+#define _MACH_TLBFLUSH_H
+
+#define __flush_tlb() \
+ do { \
+ unsigned int tmpreg; \
+ \
+ __asm__ __volatile__( \
+ "movl %%cr3, %0; \n" \
+ "movl %0, %%cr3; # flush TLB \n" \
+ : "=r" (tmpreg) \
+ :: "memory"); \
+ } while (0)
+
+/*
+ * Global pages have to be flushed a bit differently. Not a real
+ * performance problem because this does not happen often.
+ */
+#define __flush_tlb_global() \
+ do { \
+ unsigned int tmpreg, cr4, cr4_orig; \
+ \
+ __asm__ __volatile__( \
+ "movl %%cr4, %2; # turn off PGE \n" \
+ "movl %2, %1; \n" \
+ "andl %3, %1; \n" \
+ "movl %1, %%cr4; \n" \
+ "movl %%cr3, %0; \n" \
+ "movl %0, %%cr3; # flush TLB \n" \
+ "movl %2, %%cr4; # turn PGE back on \n" \
+ : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+ : "i" (~X86_CR4_PGE) \
+ : "memory"); \
+ } while (0)
+
+#define __flush_tlb_single(addr) \
+ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
+
+#endif /* _MACH_TLBFLUSH_H */
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
|
|