Here is the reworked patch, except a comment :
* Christoph Hellwig ([email protected]) wrote:
> > +void blk_probe_disconnect(void)
> > +{
> > + uint8_t i;
> > +
> > + for (i = 0; i < NUM_PROBES; i++) {
> > + marker_remove_probe(probe_array[i].name);
> > + }
> > + synchronize_sched(); /* Wait for probes to finish */
>
> kprobes does this kind of synchronization internally, so the marker
> wrapper should probabl aswell.
>
The problem appears on heavily loaded systems. Doing 50
synchronize_sched() calls in a row can take up to a few seconds on a
4-way machine. This is why I prefer to do it in the module to which
the callbacks belong.
Here is the reviewed patch. It depends on a newer version of markers
I'll send to Andrew soon.
Signed-off-by: Mathieu Desnoyers <[email protected]>
Index: linux-2.6-lttng/block/elevator.c
===================================================================
--- linux-2.6-lttng.orig/block/elevator.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/block/elevator.c 2007-05-03 12:54:58.000000000 -0400
@@ -32,7 +32,7 @@
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/delay.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <linux/hash.h>
#include <asm/uaccess.h>
@@ -571,7 +571,7 @@
unsigned ordseq;
int unplug_it = 1;
- blk_add_trace_rq(q, rq, BLK_TA_INSERT);
+ trace_mark(blk_request_insert, "%p %p", q, rq);
rq->q = q;
@@ -757,7 +757,7 @@
* not be passed by new incoming requests
*/
rq->cmd_flags |= REQ_STARTED;
- blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
+ trace_mark(blk_request_issue, "%p %p", q, rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
Index: linux-2.6-lttng/block/ll_rw_blk.c
===================================================================
--- linux-2.6-lttng.orig/block/ll_rw_blk.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/block/ll_rw_blk.c 2007-05-03 12:54:58.000000000 -0400
@@ -28,6 +28,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
+#include <linux/marker.h>
#include <linux/blktrace_api.h>
#include <linux/fault-inject.h>
@@ -1551,7 +1552,7 @@
if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
- blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+ trace_mark(blk_plug_device, "%p %p %d", q, NULL, 0);
}
}
@@ -1617,7 +1618,7 @@
* devices don't necessarily have an ->unplug_fn defined
*/
if (q->unplug_fn) {
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+ trace_mark(blk_pdu_unplug_io, "%p %p %d", q, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q);
@@ -1628,7 +1629,7 @@
{
request_queue_t *q = container_of(work, request_queue_t, unplug_work);
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+ trace_mark(blk_pdu_unplug_io, "%p %p %d", q, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
q->unplug_fn(q);
@@ -1638,7 +1639,7 @@
{
request_queue_t *q = (request_queue_t *)data;
- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+ trace_mark(blk_pdu_unplug_timer, "%p %p %d", q, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
kblockd_schedule_work(&q->unplug_work);
@@ -2148,7 +2149,7 @@
rq_init(q, rq);
- blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+ trace_mark(blk_get_request, "%p %p %d", q, bio, rw);
out:
return rq;
}
@@ -2178,7 +2179,7 @@
if (!rq) {
struct io_context *ioc;
- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+ trace_mark(blk_sleep_request, "%p %p %d", q, bio, rw);
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
@@ -2252,7 +2253,7 @@
*/
void blk_requeue_request(request_queue_t *q, struct request *rq)
{
- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+ trace_mark(blk_requeue, "%p %p", q, rq);
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
@@ -2937,7 +2938,7 @@
if (!ll_back_merge_fn(q, req, bio))
break;
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+ trace_mark(blk_bio_backmerge, "%p %p", q, bio);
req->biotail->bi_next = bio;
req->biotail = bio;
@@ -2954,7 +2955,7 @@
if (!ll_front_merge_fn(q, req, bio))
break;
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+ trace_mark(blk_bio_frontmerge, "%p %p", q, bio);
bio->bi_next = req->bio;
req->bio = bio;
@@ -3184,10 +3185,10 @@
blk_partition_remap(bio);
if (old_sector != -1)
- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
- old_sector);
+ trace_mark(blk_remap, "%p %p %u %llu %llu", q, bio, old_dev,
+ (u64)bio->bi_sector, (u64)old_sector);
- blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+ trace_mark(blk_bio_queue, "%p %p", q, bio);
old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
@@ -3329,7 +3330,7 @@
int total_bytes, bio_nbytes, error, next_idx = 0;
struct bio *bio;
- blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+ trace_mark(blk_request_complete, "%p %p", req->q, req);
/*
* extend uptodate bool to allow < 0 value to be direct io error
Index: linux-2.6-lttng/block/Kconfig
===================================================================
--- linux-2.6-lttng.orig/block/Kconfig 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/block/Kconfig 2007-05-03 12:54:58.000000000 -0400
@@ -32,6 +32,7 @@
depends on SYSFS
select RELAY
select DEBUG_FS
+ select MARKERS
help
Say Y here, if you want to be able to trace the block layer actions
on a given queue. Tracing allows you to see any traffic happening
Index: linux-2.6-lttng/block/blktrace.c
===================================================================
--- linux-2.6-lttng.orig/block/blktrace.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/block/blktrace.c 2007-05-03 13:05:30.000000000 -0400
@@ -23,11 +23,19 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/time.h>
+#include <linux/marker.h>
#include <asm/uaccess.h>
static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
static unsigned int blktrace_seq __read_mostly = 1;
+/* Global reference count of probes */
+static DEFINE_MUTEX(blk_probe_mutex);
+static int blk_probes_ref;
+
+int blk_probe_arm(void);
+void blk_probe_disarm(void);
+
/*
* Send out a notify message.
*/
@@ -179,7 +187,7 @@
EXPORT_SYMBOL_GPL(__blk_add_trace);
static struct dentry *blk_tree_root;
-static struct mutex blk_tree_mutex;
+static DEFINE_MUTEX(blk_tree_mutex);
static unsigned int root_users;
static inline void blk_remove_root(void)
@@ -229,6 +237,10 @@
blk_remove_tree(bt->dir);
free_percpu(bt->sequence);
kfree(bt);
+ mutex_lock(&blk_probe_mutex);
+ if (--blk_probes_ref == 0)
+ blk_probe_disarm();
+ mutex_unlock(&blk_probe_mutex);
}
static int blk_trace_remove(request_queue_t *q)
@@ -386,6 +398,11 @@
goto err;
}
+ mutex_lock(&blk_probe_mutex);
+ if (!blk_probes_ref++)
+ blk_probe_arm();
+ mutex_unlock(&blk_probe_mutex);
+
return 0;
err:
if (dir)
@@ -549,9 +566,270 @@
#endif
}
+/**
+ * blk_add_trace_rq - Add a trace for a request oriented action
+ * Expected variable arguments :
+ * @q: queue the io is for
+ * @rq: the source request
+ *
+ * Description:
+ * Records an action against a request. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_rq(const struct __mark_marker_data *mdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ u32 what;
+ struct blk_trace *bt;
+ int rw;
+ struct blk_probe_data *pinfo = mdata->pdata;
+ struct request_queue *q;
+ struct request *rq;
+
+ va_start(args, fmt);
+ q = va_arg(args, struct request_queue *);
+ rq = va_arg(args, struct request *);
+ va_end(args);
+
+ what = pinfo->flags;
+ bt = q->blk_trace;
+ rw = rq->cmd_flags & 0x03;
+
+ if (likely(!bt))
+ return;
+
+ if (blk_pc_request(rq)) {
+ what |= BLK_TC_ACT(BLK_TC_PC);
+ __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
+ } else {
+ what |= BLK_TC_ACT(BLK_TC_FS);
+ __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
+ }
+}
+
+/**
+ * blk_add_trace_bio - Add a trace for a bio oriented action
+ * Expected variable arguments :
+ * @q: queue the io is for
+ * @bio: the source bio
+ *
+ * Description:
+ * Records an action against a bio. Will log the bio offset + size.
+ *
+ **/
+static void blk_add_trace_bio(const struct __mark_marker_data *mdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ u32 what;
+ struct blk_trace *bt;
+ struct blk_probe_data *pinfo = mdata->pdata;
+ struct request_queue *q;
+ struct bio *bio;
+
+ va_start(args, fmt);
+ q = va_arg(args, struct request_queue *);
+ bio = va_arg(args, struct bio *);
+ va_end(args);
+
+ what = pinfo->flags;
+ bt = q->blk_trace;
+
+ if (likely(!bt))
+ return;
+
+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+}
+
+/**
+ * blk_add_trace_generic - Add a trace for a generic action
+ * Expected variable arguments :
+ * @q: queue the io is for
+ * @bio: the source bio
+ * @rw: the data direction
+ *
+ * Description:
+ * Records a simple trace
+ *
+ **/
+static void blk_add_trace_generic(const struct __mark_marker_data *mdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct blk_trace *bt;
+ u32 what;
+ struct blk_probe_data *pinfo = mdata->pdata;
+ struct request_queue *q;
+ struct bio *bio;
+ int rw;
+
+ va_start(args, fmt);
+ q = va_arg(args, struct request_queue *);
+ bio = va_arg(args, struct bio *);
+ rw = va_arg(args, int);
+ va_end(args);
+
+ what = pinfo->flags;
+ bt = q->blk_trace;
+
+ if (likely(!bt))
+ return;
+
+ if (bio)
+ blk_add_trace_bio(mdata, "%p %p", q, bio);
+ else
+ __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
+}
+
+/**
+ * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
+ * Expected variable arguments :
+ * @q: queue the io is for
+ * @bio: the source bio
+ * @pdu: the integer payload
+ *
+ * Description:
+ * Adds a trace with some integer payload. This might be an unplug
+ * option given as the action, with the depth at unplug time given
+ * as the payload
+ *
+ **/
+static void blk_add_trace_pdu_int(const struct __mark_marker_data *mdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct blk_trace *bt;
+ u32 what;
+ struct blk_probe_data *pinfo = mdata->pdata;
+ struct request_queue *q;
+ struct bio *bio;
+ unsigned int pdu;
+ __be64 rpdu;
+
+ va_start(args, fmt);
+ q = va_arg(args, struct request_queue *);
+ bio = va_arg(args, struct bio *);
+ pdu = va_arg(args, unsigned int);
+ va_end(args);
+
+ what = pinfo->flags;
+ bt = q->blk_trace;
+ rpdu = cpu_to_be64(pdu);
+
+ if (likely(!bt))
+ return;
+
+ if (bio)
+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
+ else
+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+}
+
+/**
+ * blk_add_trace_remap - Add a trace for a remap operation
+ * Expected variable arguments :
+ * @q: queue the io is for
+ * @bio: the source bio
+ * @dev: target device
+ * @from: source sector
+ * @to: target sector
+ *
+ * Description:
+ * Device mapper or raid target sometimes need to split a bio because
+ * it spans a stripe (or similar). Add a trace for that action.
+ *
+ **/
+static void blk_add_trace_remap(const struct __mark_marker_data *mdata,
+ const char *fmt, ...)
+{
+ va_list args;
+ struct blk_trace *bt;
+ struct blk_io_trace_remap r;
+ u32 what;
+ struct blk_probe_data *pinfo = mdata->pdata;
+ struct request_queue *q;
+ struct bio *bio;
+ u64 dev, from, to;
+
+ va_start(args, fmt);
+ q = va_arg(args, struct request_queue *);
+ bio = va_arg(args, struct bio *);
+ dev = va_arg(args, u64);
+ from = va_arg(args, u64);
+ to = va_arg(args, u64);
+ va_end(args);
+
+ what = pinfo->flags;
+ bt = q->blk_trace;
+
+ if (likely(!bt))
+ return;
+
+ r.device = cpu_to_be32(dev);
+ r.sector = cpu_to_be64(to);
+
+ __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+}
+
+#define FACILITY_NAME "blk"
+
+static struct blk_probe_data probe_array[] =
+{
+ { "blk_bio_queue", "%p %p", BLK_TA_QUEUE, blk_add_trace_bio },
+ { "blk_bio_backmerge", "%p %p", BLK_TA_BACKMERGE, blk_add_trace_bio },
+ { "blk_bio_frontmerge", "%p %p", BLK_TA_FRONTMERGE, blk_add_trace_bio },
+ { "blk_get_request", "%p %p %d", BLK_TA_GETRQ, blk_add_trace_generic },
+ { "blk_sleep_request", "%p %p %d", BLK_TA_SLEEPRQ,
+ blk_add_trace_generic },
+ { "blk_requeue", "%p %p", BLK_TA_REQUEUE, blk_add_trace_rq },
+ { "blk_request_issue", "%p %p", BLK_TA_ISSUE, blk_add_trace_rq },
+ { "blk_request_complete", "%p %p", BLK_TA_COMPLETE, blk_add_trace_rq },
+ { "blk_plug_device", "%p %p %d", BLK_TA_PLUG, blk_add_trace_generic },
+ { "blk_pdu_unplug_io", "%p %p %d", BLK_TA_UNPLUG_IO,
+ blk_add_trace_pdu_int },
+ { "blk_pdu_unplug_timer", "%p %p %d", BLK_TA_UNPLUG_TIMER,
+ blk_add_trace_pdu_int },
+ { "blk_request_insert", "%p %p", BLK_TA_INSERT,
+ blk_add_trace_rq },
+ { "blk_pdu_split", "%p %p %d", BLK_TA_SPLIT,
+ blk_add_trace_pdu_int },
+ { "blk_bio_bounce", "%p %p", BLK_TA_BOUNCE, blk_add_trace_bio },
+ { "blk_remap", "%p %p %u %llu %llu", BLK_TA_REMAP,
+ blk_add_trace_remap },
+};
+
+
+int blk_probe_arm(void)
+{
+ int result;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probe_array); i++) {
+ result = marker_set_probe(probe_array[i].name,
+ probe_array[i].format,
+ probe_array[i].callback, &probe_array[i]);
+ if (!result)
+ printk(KERN_INFO
+ "blktrace unable to register probe %s\n",
+ probe_array[i].name);
+ }
+ return 0;
+}
+
+void blk_probe_disarm(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(probe_array); i++) {
+ marker_remove_probe(probe_array[i].name);
+ }
+ synchronize_sched(); /* Wait for probes to finish */
+}
+
+
static __init int blk_trace_init(void)
{
- mutex_init(&blk_tree_mutex);
on_each_cpu(blk_trace_check_cpu_time, NULL, 1, 1);
blk_trace_set_ht_offsets();
Index: linux-2.6-lttng/include/linux/blktrace_api.h
===================================================================
--- linux-2.6-lttng.orig/include/linux/blktrace_api.h 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/include/linux/blktrace_api.h 2007-05-03 12:54:58.000000000 -0400
@@ -3,6 +3,7 @@
#include <linux/blkdev.h>
#include <linux/relay.h>
+#include <linux/marker.h>
/*
* Trace categories
@@ -142,149 +143,24 @@
u32 pid;
};
+/* Probe data used for probe-marker connection */
+struct blk_probe_data {
+ const char *name;
+ const char *format;
+ u32 flags;
+ marker_probe_func *callback;
+};
+
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(request_queue_t *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
-
-/**
- * blk_add_trace_rq - Add a trace for a request oriented action
- * @q: queue the io is for
- * @rq: the source request
- * @what: the action
- *
- * Description:
- * Records an action against a request. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
- int rw = rq->cmd_flags & 0x03;
-
- if (likely(!bt))
- return;
-
- if (blk_pc_request(rq)) {
- what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
- } else {
- what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
- }
-}
-
-/**
- * blk_add_trace_bio - Add a trace for a bio oriented action
- * @q: queue the io is for
- * @bio: the source bio
- * @what: the action
- *
- * Description:
- * Records an action against a bio. Will log the bio offset + size.
- *
- **/
-static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
- u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
-}
-
-/**
- * blk_add_trace_generic - Add a trace for a generic action
- * @q: queue the io is for
- * @bio: the source bio
- * @rw: the data direction
- * @what: the action
- *
- * Description:
- * Records a simple trace
- *
- **/
-static inline void blk_add_trace_generic(struct request_queue *q,
- struct bio *bio, int rw, u32 what)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (likely(!bt))
- return;
-
- if (bio)
- blk_add_trace_bio(q, bio, what);
- else
- __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
-}
-
-/**
- * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
- * @q: queue the io is for
- * @what: the action
- * @bio: the source bio
- * @pdu: the integer payload
- *
- * Description:
- * Adds a trace with some integer payload. This might be an unplug
- * option given as the action, with the depth at unplug time given
- * as the payload
- *
- **/
-static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
- struct bio *bio, unsigned int pdu)
-{
- struct blk_trace *bt = q->blk_trace;
- __be64 rpdu = cpu_to_be64(pdu);
-
- if (likely(!bt))
- return;
-
- if (bio)
- __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
- else
- __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
-}
-
-/**
- * blk_add_trace_remap - Add a trace for a remap operation
- * @q: queue the io is for
- * @bio: the source bio
- * @dev: target device
- * @from: source sector
- * @to: target sector
- *
- * Description:
- * Device mapper or raid target sometimes need to split a bio because
- * it spans a stripe (or similar). Add a trace for that action.
- *
- **/
-static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t from, sector_t to)
-{
- struct blk_trace *bt = q->blk_trace;
- struct blk_io_trace_remap r;
-
- if (likely(!bt))
- return;
-
- r.device = cpu_to_be32(dev);
- r.sector = cpu_to_be64(to);
-
- __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
-}
+extern int blk_probe_connect(void);
+extern void blk_probe_disconnect(void);
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)
-#define blk_add_trace_rq(q, rq, what) do { } while (0)
-#define blk_add_trace_bio(q, rq, what) do { } while (0)
-#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
-#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
-#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#endif
Index: linux-2.6-lttng/mm/bounce.c
===================================================================
--- linux-2.6-lttng.orig/mm/bounce.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/mm/bounce.c 2007-05-03 12:54:58.000000000 -0400
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <asm/tlbflush.h>
#define POOL_SIZE 64
@@ -237,7 +237,7 @@
if (!bio)
return;
- blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
+ trace_mark(blk_bio_bounce, "%p %p", q, *bio_orig);
/*
* at least one page was bounced, fill in possible non-highmem
Index: linux-2.6-lttng/mm/highmem.c
===================================================================
--- linux-2.6-lttng.orig/mm/highmem.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/mm/highmem.c 2007-05-03 12:54:58.000000000 -0400
@@ -26,7 +26,7 @@
#include <linux/init.h>
#include <linux/hash.h>
#include <linux/highmem.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <asm/tlbflush.h>
/*
Index: linux-2.6-lttng/fs/bio.c
===================================================================
--- linux-2.6-lttng.orig/fs/bio.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/fs/bio.c 2007-05-03 12:54:58.000000000 -0400
@@ -25,7 +25,7 @@
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <scsi/sg.h> /* for struct sg_iovec */
#define BIO_POOL_SIZE 2
@@ -1081,7 +1081,7 @@
if (!bp)
return bp;
- blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
+ trace_mark(blk_pdu_split, "%p %p %d", bdev_get_queue(bi->bi_bdev), bi,
bi->bi_sector + first_sectors);
BUG_ON(bi->bi_vcnt != 1);
Index: linux-2.6-lttng/drivers/block/cciss.c
===================================================================
--- linux-2.6-lttng.orig/drivers/block/cciss.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/drivers/block/cciss.c 2007-05-03 12:54:58.000000000 -0400
@@ -37,7 +37,7 @@
#include <linux/hdreg.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -2502,7 +2502,7 @@
}
cmd->rq->data_len = 0;
cmd->rq->completion_data = cmd;
- blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
+ trace_mark(blk_request_complete, "%p %p", cmd->rq->q, cmd->rq);
blk_complete_request(cmd->rq);
}
Index: linux-2.6-lttng/drivers/md/dm.c
===================================================================
--- linux-2.6-lttng.orig/drivers/md/dm.c 2007-05-03 12:27:12.000000000 -0400
+++ linux-2.6-lttng/drivers/md/dm.c 2007-05-03 12:54:58.000000000 -0400
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
-#include <linux/blktrace_api.h>
+#include <linux/marker.h>
#include <linux/smp_lock.h>
#define DM_MSG_PREFIX "core"
@@ -485,8 +485,8 @@
wake_up(&io->md->wait);
if (io->error != DM_ENDIO_REQUEUE) {
- blk_add_trace_bio(io->md->queue, io->bio,
- BLK_TA_COMPLETE);
+ trace_mark(blk_request_complete, "%p %p",
+ io->md->queue, io->bio);
bio_endio(io->bio, io->bio->bi_size, io->error);
}
@@ -582,10 +582,10 @@
r = ti->type->map(ti, clone, &tio->info);
if (r == DM_MAPIO_REMAPPED) {
/* the bio has been remapped so dispatch it */
-
- blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev, sector,
- clone->bi_sector);
+ trace_mark(blk_remap, "%p %p %u %llu %llu",
+ bdev_get_queue(clone->bi_bdev), clone,
+ (u64)tio->io->bio->bi_bdev->bd_dev, (u64)sector,
+ (u64)clone->bi_sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
--
Mathieu Desnoyers
Computer Engineering Ph.D. Student, Ecole Polytechnique de Montreal
OpenPGP key fingerprint: 8CD5 52C3 8E3C 4140 715F BA06 3F25 A8FE 3BAE 9A68
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
[Index of Archives]
[Kernel Newbies]
[Netfilter]
[Bugtraq]
[Photo]
[Stuff]
[Gimp]
[Yosemite News]
[MIPS Linux]
[ARM Linux]
[Linux Security]
[Linux RAID]
[Video 4 Linux]
[Linux for the blind]
[Linux Resources]