* Ingo Molnar <[email protected]> wrote:
> the way i fixed it in my tree was to add a trylock_kernel(), and to
> check for success in init/main.c. See the patch below.
i had a silly bug in the spinlock variant, and some extra unneeded
change from another debug patch - fixed patch is below. Tested on x86,
with and without CONFIG_PREEMPT_BKL.
Ingo
--
introduce trylock_kernel(), to be used by the early init code to acquire
the BKL in an atomic way.
Signed-off-by: Ingo Molnar <[email protected]>
----
include/linux/smp_lock.h | 1 +
init/main.c | 13 ++++++++-----
lib/kernel_lock.c | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+), 5 deletions(-)
Index: linux/include/linux/smp_lock.h
===================================================================
--- linux.orig/include/linux/smp_lock.h
+++ linux/include/linux/smp_lock.h
@@ -39,6 +39,7 @@ static inline int reacquire_kernel_lock(
}
extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
+extern int __lockfunc trylock_kernel(void);
extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
#else
Index: linux/init/main.c
===================================================================
--- linux.orig/init/main.c
+++ linux/init/main.c
@@ -443,11 +443,14 @@ asmlinkage void __init start_kernel(void
{
char * command_line;
extern struct kernel_param __start___param[], __stop___param[];
-/*
- * Interrupts are still disabled. Do necessary setups, then
- * enable them
- */
- lock_kernel();
+
+ /*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them. This is the first time we take the BKL, so
+ * it must succeed:
+ */
+ if (!trylock_kernel())
+ WARN_ON(1);
page_address_init();
printk(KERN_NOTICE);
printk(linux_banner);
Index: linux/lib/kernel_lock.c
===================================================================
--- linux.orig/lib/kernel_lock.c
+++ linux/lib/kernel_lock.c
@@ -76,6 +76,23 @@ void __lockfunc lock_kernel(void)
task->lock_depth = depth;
}
+int __lockfunc trylock_kernel(void)
+{
+ struct task_struct *task = current;
+ int depth = task->lock_depth + 1;
+
+ if (likely(!depth)) {
+ if (unlikely(down_trylock(&kernel_sem)))
+ return 0;
+ else
+ __acquire(kernel_sem);
+ }
+
+ task->lock_depth = depth;
+ return 1;
+}
+
+
void __lockfunc unlock_kernel(void)
{
struct task_struct *task = current;
@@ -194,6 +211,22 @@ void __lockfunc lock_kernel(void)
current->lock_depth = depth;
}
+int __lockfunc trylock_kernel(void)
+{
+ struct task_struct *task = current;
+ int depth = task->lock_depth + 1;
+
+ if (likely(!depth)) {
+ if (unlikely(!spin_trylock(&kernel_flag)))
+ return 0;
+ else
+ __acquire(kernel_sem);
+ }
+
+ task->lock_depth = depth;
+ return 1;
+}
+
void __lockfunc unlock_kernel(void)
{
BUG_ON(current->lock_depth < 0);
@@ -204,5 +237,6 @@ void __lockfunc unlock_kernel(void)
#endif
EXPORT_SYMBOL(lock_kernel);
+/* we do not export trylock_kernel(). BKL code should shrink :-) */
EXPORT_SYMBOL(unlock_kernel);
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]