[RFC][PATCH 1/3]Djprobe (Direct Jump Probe) for 2.6.14-rc5-mm1

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

This patch enables get_insn_slot() to handle slots that have
different size.
The djprobe requires this patch to work it on the machines which
support "NX bit".

---
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: [email protected]

Signed-off-by: Masami Hiramatsu <[email protected]>

 include/linux/kprobes.h |    5 ++++
 kernel/kprobes.c        |   58 +++++++++++++++++++++++++++++++-----------------
 2 files changed, 43 insertions(+), 20 deletions(-)
diff -Narup linux-2.6.14-rc5-mm1/include/linux/kprobes.h linux-2.6.14-rc5-mm1.djp.1/include/linux/kprobes.h
--- linux-2.6.14-rc5-mm1/include/linux/kprobes.h	2005-10-25 11:29:02.000000000 +0900
+++ linux-2.6.14-rc5-mm1.djp.1/include/linux/kprobes.h	2005-10-25 13:11:26.000000000 +0900
@@ -147,6 +147,11 @@ struct kretprobe_instance {
 	struct task_struct *task;
 };

+struct kprobe_insn_page_list {
+	struct hlist_head list;
+	int insn_size;		/* size of an instruction slot */
+};
+
 #ifdef CONFIG_KPROBES
 extern spinlock_t kretprobe_lock;
 extern int arch_prepare_kprobe(struct kprobe *p);
diff -Narup linux-2.6.14-rc5-mm1/kernel/kprobes.c linux-2.6.14-rc5-mm1.djp.1/kernel/kprobes.c
--- linux-2.6.14-rc5-mm1/kernel/kprobes.c	2005-10-25 11:29:02.000000000 +0900
+++ linux-2.6.14-rc5-mm1.djp.1/kernel/kprobes.c	2005-10-25 13:13:58.000000000 +0900
@@ -58,44 +58,50 @@ static DEFINE_PER_CPU(struct kprobe *, k
  * stepping on the instruction on a vmalloced/kmalloced/data page
  * is a recipe for disaster
  */
-#define INSNS_PER_PAGE	(PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
+#define INSNS_PER_PAGE(size) (PAGE_SIZE/(size * sizeof(kprobe_opcode_t)))

 struct kprobe_insn_page {
 	struct hlist_node hlist;
 	kprobe_opcode_t *insns;		/* Page of instruction slots */
-	char slot_used[INSNS_PER_PAGE];
 	int nused;
+	char slot_used[1];
 };

-static struct hlist_head kprobe_insn_pages;
+static struct kprobe_insn_page_list kprobe_insn_pages = {
+	HLIST_HEAD_INIT, MAX_INSN_SIZE
+};

 /**
- * get_insn_slot() - Find a slot on an executable page for an instruction.
+ * __get_insn_slot() - Find a slot on an executable page for an instruction.
  * We allocate an executable page if there's no room on existing ones.
  */
-kprobe_opcode_t __kprobes *get_insn_slot(void)
+kprobe_opcode_t
+	__kprobes * __get_insn_slot(struct kprobe_insn_page_list *pages)
 {
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
+	int ninsns = INSNS_PER_PAGE(pages->insn_size);

-	hlist_for_each(pos, &kprobe_insn_pages) {
+	hlist_for_each(pos, &pages->list) {
 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
-		if (kip->nused < INSNS_PER_PAGE) {
+		if (kip->nused < ninsns) {
 			int i;
-			for (i = 0; i < INSNS_PER_PAGE; i++) {
+			for (i = 0; i < ninsns; i++) {
 				if (!kip->slot_used[i]) {
 					kip->slot_used[i] = 1;
 					kip->nused++;
-					return kip->insns + (i * MAX_INSN_SIZE);
+					return kip->insns +
+					    (i * pages->insn_size);
 				}
 			}
 			/* Surprise!  No unused slots.  Fix kip->nused. */
-			kip->nused = INSNS_PER_PAGE;
+			kip->nused = ninsns;
 		}
 	}

-	/* All out of space.  Need to allocate a new page. Use slot 0.*/
-	kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
+	/* All out of space.  Need to allocate a new page. Use slot 0. */
+	kip = kmalloc(sizeof(struct kprobe_insn_page) +
+		    sizeof(char) * (ninsns - 1), GFP_ATOMIC);
 	if (!kip) {
 		return NULL;
 	}
@@ -111,23 +117,25 @@ kprobe_opcode_t __kprobes *get_insn_slot
 		return NULL;
 	}
 	INIT_HLIST_NODE(&kip->hlist);
-	hlist_add_head(&kip->hlist, &kprobe_insn_pages);
-	memset(kip->slot_used, 0, INSNS_PER_PAGE);
+	hlist_add_head(&kip->hlist, &pages->list);
+	memset(kip->slot_used, 0, ninsns);
 	kip->slot_used[0] = 1;
 	kip->nused = 1;
 	return kip->insns;
 }

-void __kprobes free_insn_slot(kprobe_opcode_t *slot)
+void __kprobes __free_insn_slot(struct kprobe_insn_page_list *pages,
+				kprobe_opcode_t * slot)
 {
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos;
+	int ninsns = INSNS_PER_PAGE(pages->insn_size);

-	hlist_for_each(pos, &kprobe_insn_pages) {
+	hlist_for_each(pos, &pages->list) {
 		kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
 		if (kip->insns <= slot &&
-		    slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
-			int i = (slot - kip->insns) / MAX_INSN_SIZE;
+		    slot < kip->insns + (ninsns * pages->insn_size)) {
+			int i = (slot - kip->insns) / pages->insn_size;
 			kip->slot_used[i] = 0;
 			kip->nused--;
 			if (kip->nused == 0) {
@@ -138,10 +146,10 @@ void __kprobes free_insn_slot(kprobe_opc
 				 * next time somebody inserts a probe.
 				 */
 				hlist_del(&kip->hlist);
-				if (hlist_empty(&kprobe_insn_pages)) {
+				if (hlist_empty(&pages->list)) {
 					INIT_HLIST_NODE(&kip->hlist);
 					hlist_add_head(&kip->hlist,
-						&kprobe_insn_pages);
+						       &pages->list);
 				} else {
 					module_free(NULL, kip->insns);
 					kfree(kip);
@@ -152,6 +160,16 @@ void __kprobes free_insn_slot(kprobe_opc
 	}
 }

+kprobe_opcode_t __kprobes *get_insn_slot(void)
+{
+	return __get_insn_slot(&kprobe_insn_pages);
+}
+
+void __kprobes free_insn_slot(kprobe_opcode_t * slot)
+{
+	__free_insn_slot(&kprobe_insn_pages, slot);
+}
+
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
 {

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

[Index of Archives]     [Kernel Newbies]     [Netfilter]     [Bugtraq]     [Photo]     [Stuff]     [Gimp]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Video 4 Linux]     [Linux for the blind]     [Linux Resources]
  Powered by Linux