[PATCH] set stack pointer in init_tss and init_thread

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



(Note: Patch also attached because the inline version is certain to get
line wrapped.)

Set the stack pointer correctly in init_thread and init_tss.

Signed-off-by: Jan Beulich <[email protected]>

diff -Npru 2.6.13/arch/x86_64/kernel/init_task.c
2.6.13-x86_64-init/arch/x86_64/kernel/init_task.c
--- 2.6.13/arch/x86_64/kernel/init_task.c	2005-08-29
01:41:01.000000000 +0200
+++ 2.6.13-x86_64-init/arch/x86_64/kernel/init_task.c	2005-03-17
13:20:48.000000000 +0100
@@ -44,6 +44,6 @@ EXPORT_SYMBOL(init_task);
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */ 
-DEFINE_PER_CPU(struct tss_struct, init_tss)
____cacheline_maxaligned_in_smp;
+DEFINE_PER_CPU(struct tss_struct, init_tss)
____cacheline_maxaligned_in_smp = INIT_TSS;
 
 #define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff -Npru 2.6.13/include/asm-x86_64/processor.h
2.6.13-x86_64-init/include/asm-x86_64/processor.h
--- 2.6.13/include/asm-x86_64/processor.h	2005-08-29
01:41:01.000000000 +0200
+++ 2.6.13-x86_64-init/include/asm-x86_64/processor.h	2005-09-01
11:32:12.000000000 +0200
@@ -254,7 +254,13 @@ struct thread_struct {
 	u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
 } __attribute__((aligned(16)));
 
-#define INIT_THREAD  {}
+#define INIT_THREAD  { \
+	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+}
+
+#define INIT_TSS  { \
+	.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
+}
 
 #define INIT_MMAP \
 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1,
NULL, NULL }

Attachment: linux-2.6.13-x86_64-init.patch
Description: Binary data


[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]
  Powered by Linux