From 5848b9fe9ad49151492cb2652a06921fa4b49031 Mon Sep 17 00:00:00 2001 From: Jerome Marchand Date: Thu, 16 Aug 2018 14:58:56 +0200 Subject: [PATCH] Fix tools for RHEL 7 There some differences on RHEL 7 that make some tools fail. This patch fixes the following: - missing /sys/kernel/debug/kprobes/blacklist file - missing __vfs_read() function - aio_read/write methods replaced by read/write_iter in file_operations - changes in mnt_namespace structure - change in finish_task_switch() argument list - changes in sock_common struct - missing TCP_NEW_SYN_RECV TCP state - mm_page_alloc tracepoint returns page struct instead of PFN - iocb argument removed from tcp_sendmsg() --- src/python/bcc/__init__.py | 7 +++++-- tools/btrfsdist.py | 13 +++++++------ tools/btrfsslower.py | 13 +++++++------ tools/cpudist.py | 4 +++- tools/ext4dist.py | 11 ++++++----- tools/ext4slower.py | 13 +++++++------ tools/fileslower.py | 13 +++++++++---- tools/memleak.py | 12 ++++++++++-- tools/mountsnoop.py | 9 ++++----- tools/nfsslower.py | 1 + tools/offcputime.py | 4 +++- tools/offwaketime.py | 4 +++- tools/oomkill.py | 8 ++++---- tools/runqlat.py | 2 +- tools/runqslower.py | 2 +- tools/solisten.py | 2 +- tools/tcpsubnet.py | 4 ++-- tools/tcptracer.py | 9 ++++----- tools/xfsdist.py | 8 ++++---- tools/xfsslower.py | 11 ++++++----- 20 files changed, 88 insertions(+), 62 deletions(-) diff --git a/src/python/bcc/__init__.py b/src/python/bcc/__init__.py index bff5f282..19933cf7 100644 --- a/src/python/bcc/__init__.py +++ b/src/python/bcc/__init__.py @@ -525,8 +525,11 @@ DEBUG_BTF = 0x20 @staticmethod def get_kprobe_functions(event_re): - with open("%s/../kprobes/blacklist" % TRACEFS, "rb") as blacklist_f: - blacklist = set([line.rstrip().split()[1] for line in blacklist_f]) + try: + with open("%s/../kprobes/blacklist" % TRACEFS, "rb") as blacklist_f: + blacklist = set([line.rstrip().split()[1] for line in blacklist_f]) + except: + blacklist = set() fns = [] in_init_section = 0 diff --git a/tools/btrfsdist.py b/tools/btrfsdist.py index 4659ab46..3326b67a 100755 --- a/tools/btrfsdist.py +++ b/tools/btrfsdist.py @@ -60,6 +60,7 @@ debug = 0 bpf_text = """ #include #include +#include #include #define OP_NAME_LEN 8 @@ -81,7 +82,7 @@ int trace_entry(struct pt_regs *ctx) return 0; } -// The current btrfs (Linux 4.5) uses generic_file_read_iter() instead of it's +// btrfs uses generic_file_aio_read() instead of it's // own read function. So we need to trace that and then filter on btrfs, which // I do by checking file->f_op. int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb) @@ -193,13 +194,13 @@ bpf_text = bpf_text.replace('FACTOR', str(factor)) # load BPF program b = BPF(text=bpf_text) -# Common file functions. See earlier comment about generic_file_read_iter(). -b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry") -b.attach_kprobe(event="btrfs_file_write_iter", fn_name="trace_entry") +# Common file functions. See earlier comment about generic_file_aio_read(). +b.attach_kprobe(event="generic_file_aio_read", fn_name="trace_read_entry") +b.attach_kprobe(event="btrfs_file_aio_write", fn_name="trace_entry") b.attach_kprobe(event="generic_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="btrfs_sync_file", fn_name="trace_entry") -b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return") -b.attach_kretprobe(event="btrfs_file_write_iter", fn_name="trace_write_return") +b.attach_kretprobe(event="generic_file_aio_read", fn_name="trace_read_return") +b.attach_kretprobe(event="btrfs_file_aio_write", fn_name="trace_write_return") b.attach_kretprobe(event="generic_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="btrfs_sync_file", fn_name="trace_fsync_return") diff --git a/tools/btrfsslower.py b/tools/btrfsslower.py index bacbc06a..b650cea0 100755 --- a/tools/btrfsslower.py +++ b/tools/btrfsslower.py @@ -62,6 +62,7 @@ debug = 0 bpf_text = """ #include #include +#include #include #include @@ -96,7 +97,7 @@ BPF_PERF_OUTPUT(events); // Store timestamp and size on entry // -// The current btrfs (Linux 4.5) uses generic_file_read_iter() instead of it's +// btrfs uses generic_file_aio_read() instead of it's // own read function. So we need to trace that and then filter on btrfs, which // I do by checking file->f_op. int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb) @@ -123,7 +124,7 @@ int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb) return 0; } -// btrfs_file_write_iter(): +// btrfs_file_aio_write(): int trace_write_entry(struct pt_regs *ctx, struct kiocb *iocb) { u64 id = bpf_get_current_pid_tgid(); @@ -313,12 +314,12 @@ int trace_fsync_return(struct pt_regs *ctx) b = BPF(text=bpf_text) # Common file functions. See earlier comment about generic_*(). -b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry") -b.attach_kprobe(event="btrfs_file_write_iter", fn_name="trace_write_entry") +b.attach_kprobe(event="generic_file_aio_read", fn_name="trace_read_entry") +b.attach_kprobe(event="btrfs_file_aio_write", fn_name="trace_write_entry") b.attach_kprobe(event="generic_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="btrfs_sync_file", fn_name="trace_fsync_entry") -b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return") -b.attach_kretprobe(event="btrfs_file_write_iter", fn_name="trace_write_return") +b.attach_kretprobe(event="generic_file_aio_read", fn_name="trace_read_return") +b.attach_kretprobe(event="btrfs_file_aio_write", fn_name="trace_write_return") b.attach_kretprobe(event="generic_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="btrfs_sync_file", fn_name="trace_fsync_return") diff --git a/tools/cpudist.py b/tools/cpudist.py index 4d7c9eb4..ddb675e2 100755 --- a/tools/cpudist.py +++ b/tools/cpudist.py @@ -94,7 +94,9 @@ static inline void update_hist(u32 tgid, u32 pid, u64 ts) STORE } -int sched_switch(struct pt_regs *ctx, struct task_struct *prev) +struct rq; + +int sched_switch(struct pt_regs *ctx, struct rq *rq, struct task_struct *prev) { u64 ts = bpf_ktime_get_ns(); u64 pid_tgid = bpf_get_current_pid_tgid(); diff --git a/tools/ext4dist.py b/tools/ext4dist.py index 384a4c14..559e4a5f 100755 --- a/tools/ext4dist.py +++ b/tools/ext4dist.py @@ -60,6 +60,7 @@ debug = 0 bpf_text = """ #include #include +#include #include #define OP_NAME_LEN 8 @@ -136,18 +137,18 @@ int trace_fsync_return(struct pt_regs *ctx) """ # Starting from Linux 4.10 ext4_file_operations.read_iter has been changed from -# using generic_file_read_iter() to its own ext4_file_read_iter(). +# using generic_file_aio_read() to its own ext4_file_read_iter(). # # To detect the proper function to trace check if ext4_file_read_iter() is # defined in /proc/kallsyms, if it's defined attach to that function, otherwise -# use generic_file_read_iter() and inside the trace hook filter on ext4 read +# use generic_file_aio_read() and inside the trace hook filter on ext4 read # events (checking if file->f_op == ext4_file_operations). if BPF.get_kprobe_functions(b'ext4_file_read_iter'): ext4_read_fn = 'ext4_file_read_iter' ext4_trace_read_fn = 'trace_entry' ext4_trace_read_code = '' else: - ext4_read_fn = 'generic_file_read_iter' + ext4_read_fn = 'generic_file_aio_read' ext4_trace_read_fn = 'trace_read_entry' ext4_file_ops_addr = '' with open(kallsyms) as syms: @@ -194,11 +195,11 @@ bpf_text = bpf_text.replace('FACTOR', str(factor)) b = BPF(text=bpf_text) b.attach_kprobe(event=ext4_read_fn, fn_name=ext4_trace_read_fn) -b.attach_kprobe(event="ext4_file_write_iter", fn_name="trace_entry") +b.attach_kprobe(event="ext4_file_write", fn_name="trace_entry") b.attach_kprobe(event="ext4_file_open", fn_name="trace_entry") b.attach_kprobe(event="ext4_sync_file", fn_name="trace_entry") b.attach_kretprobe(event=ext4_read_fn, fn_name='trace_read_return') -b.attach_kretprobe(event="ext4_file_write_iter", fn_name="trace_write_return") +b.attach_kretprobe(event="ext4_file_write", fn_name="trace_write_return") b.attach_kretprobe(event="ext4_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="ext4_sync_file", fn_name="trace_fsync_return") diff --git a/tools/ext4slower.py b/tools/ext4slower.py index 0524f22e..d5fbb661 100755 --- a/tools/ext4slower.py +++ b/tools/ext4slower.py @@ -63,6 +63,7 @@ debug = 0 bpf_text = """ #include #include +#include #include #include @@ -97,7 +98,7 @@ BPF_PERF_OUTPUT(events); // Store timestamp and size on entry // -// The current ext4 (Linux 4.5) uses generic_file_read_iter(), instead of it's +// ext4 uses generic_file_aio_read(), instead of it's // own function, for reads. So we need to trace that and then filter on ext4, // which I do by checking file->f_op. // The new Linux version (since form 4.10) uses ext4_file_read_iter(), And if the 'CONFIG_FS_DAX' @@ -127,7 +128,7 @@ int trace_read_entry(struct pt_regs *ctx, struct kiocb *iocb) return 0; } -// ext4_file_write_iter(): +// ext4_file_write(): int trace_write_entry(struct pt_regs *ctx, struct kiocb *iocb) { u64 id = bpf_get_current_pid_tgid(); @@ -314,15 +315,15 @@ b = BPF(text=bpf_text) if BPF.get_kprobe_functions(b'ext4_file_read_iter'): b.attach_kprobe(event="ext4_file_read_iter", fn_name="trace_read_entry") else: - b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry") -b.attach_kprobe(event="ext4_file_write_iter", fn_name="trace_write_entry") + b.attach_kprobe(event="generic_file_aio_read", fn_name="trace_read_entry") +b.attach_kprobe(event="ext4_file_write", fn_name="trace_write_entry") b.attach_kprobe(event="ext4_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="ext4_sync_file", fn_name="trace_fsync_entry") if BPF.get_kprobe_functions(b'ext4_file_read_iter'): b.attach_kretprobe(event="ext4_file_read_iter", fn_name="trace_read_return") else: - b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return") -b.attach_kretprobe(event="ext4_file_write_iter", fn_name="trace_write_return") + b.attach_kretprobe(event="generic_file_aio_read", fn_name="trace_read_return") +b.attach_kretprobe(event="ext4_file_write", fn_name="trace_write_return") b.attach_kretprobe(event="ext4_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="ext4_sync_file", fn_name="trace_fsync_return") diff --git a/tools/fileslower.py b/tools/fileslower.py index 31e3adf9..05582c9e 100755 --- a/tools/fileslower.py +++ b/tools/fileslower.py @@ -123,7 +123,7 @@ int trace_read_entry(struct pt_regs *ctx, struct file *file, char __user *buf, size_t count) { // skip non-sync I/O; see kernel code for __vfs_read() - if (!(file->f_op->read_iter)) + if (!(file->f_op->aio_read)) return 0; return trace_rw_entry(ctx, file, buf, count); } @@ -132,7 +132,7 @@ int trace_write_entry(struct pt_regs *ctx, struct file *file, char __user *buf, size_t count) { // skip non-sync I/O; see kernel code for __vfs_write() - if (!(file->f_op->write_iter)) + if (!(file->f_op->aio_write)) return 0; return trace_rw_entry(ctx, file, buf, count); } @@ -199,8 +199,13 @@ b = BPF(text=bpf_text) # do_sync_read/do_sync_write), but those became static. So trace these from # the parent functions, at the cost of more overhead, instead. # Ultimately, we should be using [V]FS tracepoints. -b.attach_kprobe(event="__vfs_read", fn_name="trace_read_entry") -b.attach_kretprobe(event="__vfs_read", fn_name="trace_read_return") +try: + b.attach_kprobe(event="__vfs_read", fn_name="trace_read_entry") + b.attach_kretprobe(event="__vfs_read", fn_name="trace_read_return") +except: + # older kernels don't have __vfs_read so try vfs_read instead + b.attach_kprobe(event="vfs_read", fn_name="trace_read_entry") + b.attach_kretprobe(event="vfs_read", fn_name="trace_read_return") try: b.attach_kprobe(event="__vfs_write", fn_name="trace_write_entry") b.attach_kretprobe(event="__vfs_write", fn_name="trace_write_return") diff --git a/tools/memleak.py b/tools/memleak.py index fd08bc4d..db8b927a 100755 --- a/tools/memleak.py +++ b/tools/memleak.py @@ -357,13 +357,21 @@ TRACEPOINT_PROBE(kmem, kmem_cache_free) { return gen_free_enter((struct pt_regs *)args, (void *)args->ptr); } +/* + * Upstream reads the PFN here, but on RHEL7 kernel this is not available + * and the address of the pages struct is returned instead. This value is + * used as the key in a hash to identify each allocation. No other allocation + * should return an address belonging to mem_map, so there's no risk of + * colision + */ + TRACEPOINT_PROBE(kmem, mm_page_alloc) { gen_alloc_enter((struct pt_regs *)args, PAGE_SIZE << args->order); - return gen_alloc_exit2((struct pt_regs *)args, args->pfn); + return gen_alloc_exit2((struct pt_regs *)args, (size_t)args->page); } TRACEPOINT_PROBE(kmem, mm_page_free) { - return gen_free_enter((struct pt_regs *)args, (void *)args->pfn); + return gen_free_enter((struct pt_regs *)args, (void *)args->page); } """ diff --git a/tools/mountsnoop.py b/tools/mountsnoop.py index eefb4ec7..5f3678b3 100755 --- a/tools/mountsnoop.py +++ b/tools/mountsnoop.py @@ -24,7 +24,6 @@ bpf_text = r""" #include #include -#include /* * XXX: struct mnt_namespace is defined in fs/mount.h, which is private to the @@ -34,7 +33,7 @@ bpf_text = r""" */ struct mnt_namespace { atomic_t count; - struct ns_common ns; + unsigned int proc_inum; }; /* @@ -69,7 +68,7 @@ struct data_t { union { /* EVENT_MOUNT, EVENT_UMOUNT */ struct { - /* current->nsproxy->mnt_ns->ns.inum */ + /* current->nsproxy->proc_inum */ unsigned int mnt_ns; char comm[TASK_COMM_LEN]; unsigned long flags; @@ -106,7 +105,7 @@ int syscall__mount(struct pt_regs *ctx, char __user *source, task = (struct task_struct *)bpf_get_current_task(); nsproxy = task->nsproxy; mnt_ns = nsproxy->mnt_ns; - event.enter.mnt_ns = mnt_ns->ns.inum; + event.enter.mnt_ns = mnt_ns->proc_inum; events.perf_submit(ctx, &event, sizeof(event)); event.type = EVENT_MOUNT_SOURCE; @@ -161,7 +160,7 @@ int syscall__umount(struct pt_regs *ctx, char __user *target, int flags) task = (struct task_struct *)bpf_get_current_task(); nsproxy = task->nsproxy; mnt_ns = nsproxy->mnt_ns; - event.enter.mnt_ns = mnt_ns->ns.inum; + event.enter.mnt_ns = mnt_ns->proc_inum; events.perf_submit(ctx, &event, sizeof(event)); event.type = EVENT_UMOUNT_TARGET; diff --git a/tools/nfsslower.py b/tools/nfsslower.py index 36918ca0..9c6d8f6c 100755 --- a/tools/nfsslower.py +++ b/tools/nfsslower.py @@ -64,6 +64,7 @@ bpf_text = """ #include #include +#include #include #include diff --git a/tools/offcputime.py b/tools/offcputime.py index ac3b7281..ea36b264 100755 --- a/tools/offcputime.py +++ b/tools/offcputime.py @@ -128,7 +128,9 @@ BPF_HASH(counts, struct key_t); BPF_HASH(start, u32); BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE); -int oncpu(struct pt_regs *ctx, struct task_struct *prev) { +struct rq; + +int oncpu(struct pt_regs *ctx, struct rq *rq, struct task_struct *prev) { u32 pid = prev->pid; u32 tgid = prev->tgid; u64 ts, *tsp; diff --git a/tools/offwaketime.py b/tools/offwaketime.py index 4a1cebab..baa28801 100755 --- a/tools/offwaketime.py +++ b/tools/offwaketime.py @@ -163,7 +163,9 @@ int waker(struct pt_regs *ctx, struct task_struct *p) { return 0; } -int oncpu(struct pt_regs *ctx, struct task_struct *p) { +struct rq; + +int oncpu(struct pt_regs *ctx, struct rq *rq, struct task_struct *p) { // PID and TGID of the previous Process (Process going into waiting) u32 pid = p->pid; u32 tgid = p->tgid; diff --git a/tools/oomkill.py b/tools/oomkill.py index 4f3b6ce7..546c2dc6 100755 --- a/tools/oomkill.py +++ b/tools/oomkill.py @@ -35,15 +35,15 @@ struct data_t { BPF_PERF_OUTPUT(events); -void kprobe__oom_kill_process(struct pt_regs *ctx, struct oom_control *oc, const char *message) +void kprobe__oom_kill_process(struct pt_regs *ctx, struct task_struct *p, + gfp_t gfp_mask, int order, unsigned int points, + unsigned long totalpages, struct mem_cgroup *memcg) { - unsigned long totalpages; - struct task_struct *p = oc->chosen; struct data_t data = {}; u32 pid = bpf_get_current_pid_tgid(); data.fpid = pid; data.tpid = p->pid; - data.pages = oc->totalpages; + data.pages = totalpages; bpf_get_current_comm(&data.fcomm, sizeof(data.fcomm)); bpf_probe_read(&data.tcomm, sizeof(data.tcomm), p->comm); events.perf_submit(ctx, &data, sizeof(data)); diff --git a/tools/runqlat.py b/tools/runqlat.py index 9fd40642..0c9bb1c4 100755 --- a/tools/runqlat.py +++ b/tools/runqlat.py @@ -111,7 +111,7 @@ int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct } // calculate latency -int trace_run(struct pt_regs *ctx, struct task_struct *prev) +int trace_run(struct pt_regs *ctx, struct rq *rq, struct task_struct *prev) { u32 pid, tgid; diff --git a/tools/runqslower.py b/tools/runqslower.py index 1d48be8a..16a5a4c2 100755 --- a/tools/runqslower.py +++ b/tools/runqslower.py @@ -97,7 +97,7 @@ int trace_ttwu_do_wakeup(struct pt_regs *ctx, struct rq *rq, struct task_struct } // calculate latency -int trace_run(struct pt_regs *ctx, struct task_struct *prev) +int trace_run(struct pt_regs *ctx, struct rq *rq, struct task_struct *prev) { u32 pid, tgid; diff --git a/tools/solisten.py b/tools/solisten.py index f2a0a342..16872212 100755 --- a/tools/solisten.py +++ b/tools/solisten.py @@ -100,7 +100,7 @@ int kprobe__inet_listen(struct pt_regs *ctx, struct socket *sock, int backlog) // Get network namespace id, if kernel supports it #ifdef CONFIG_NET_NS - evt.netns = sk->__sk_common.skc_net.net->ns.inum; + evt.netns = sk->__sk_common.skc_net->proc_inum; #else evt.netns = 0; #endif diff --git a/tools/tcpsubnet.py b/tools/tcpsubnet.py index 5f2a8062..1ba275bc 100755 --- a/tools/tcpsubnet.py +++ b/tools/tcpsubnet.py @@ -110,8 +110,8 @@ struct index_key_t { BPF_HASH(ipv4_send_bytes, struct index_key_t); -int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct sock *sk, - struct msghdr *msg, size_t size) +int kprobe__tcp_sendmsg(struct pt_regs *ctx, struct kiocb *iocb, + struct sock *sk, struct msghdr *msg, size_t size) { u16 family = sk->__sk_common.skc_family; diff --git a/tools/tcptracer.py b/tools/tcptracer.py index e61fe9ba..f00f0d19 100755 --- a/tools/tcptracer.py +++ b/tools/tcptracer.py @@ -115,7 +115,7 @@ static int read_ipv4_tuple(struct ipv4_tuple_t *tuple, struct sock *skp) u16 sport = sockp->inet_sport; u16 dport = skp->__sk_common.skc_dport; #ifdef CONFIG_NET_NS - net_ns_inum = skp->__sk_common.skc_net.net->ns.inum; + net_ns_inum = skp->__sk_common.skc_net->proc_inum; #endif ##FILTER_NETNS## @@ -142,7 +142,7 @@ static int read_ipv6_tuple(struct ipv6_tuple_t *tuple, struct sock *skp) u16 sport = sockp->inet_sport; u16 dport = skp->__sk_common.skc_dport; #ifdef CONFIG_NET_NS - net_ns_inum = skp->__sk_common.skc_net.net->ns.inum; + net_ns_inum = skp->__sk_common.skc_net->proc_inum; #endif bpf_probe_read(&saddr, sizeof(saddr), skp->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32); @@ -360,8 +360,7 @@ int trace_close_entry(struct pt_regs *ctx, struct sock *skp) // Don't generate close events for connections that were never // established in the first place. if (oldstate == TCP_SYN_SENT || - oldstate == TCP_SYN_RECV || - oldstate == TCP_NEW_SYN_RECV) + oldstate == TCP_SYN_RECV ) return 0; u8 ipver = 0; @@ -432,7 +431,7 @@ int trace_accept_return(struct pt_regs *ctx) // Get network namespace id, if kernel supports it #ifdef CONFIG_NET_NS - net_ns_inum = newsk->__sk_common.skc_net.net->ns.inum; + net_ns_inum = newsk->__sk_common.skc_net->proc_inum; #endif ##FILTER_NETNS## diff --git a/tools/xfsdist.py b/tools/xfsdist.py index f409f90d..2976f9e2 100755 --- a/tools/xfsdist.py +++ b/tools/xfsdist.py @@ -137,12 +137,12 @@ bpf_text = bpf_text.replace('FACTOR', str(factor)) b = BPF(text=bpf_text) # common file functions -b.attach_kprobe(event="xfs_file_read_iter", fn_name="trace_entry") -b.attach_kprobe(event="xfs_file_write_iter", fn_name="trace_entry") +b.attach_kprobe(event="xfs_file_aio_read", fn_name="trace_entry") +b.attach_kprobe(event="xfs_file_aio_write", fn_name="trace_entry") b.attach_kprobe(event="xfs_file_open", fn_name="trace_entry") b.attach_kprobe(event="xfs_file_fsync", fn_name="trace_entry") -b.attach_kretprobe(event="xfs_file_read_iter", fn_name="trace_read_return") -b.attach_kretprobe(event="xfs_file_write_iter", fn_name="trace_write_return") +b.attach_kretprobe(event="xfs_file_aio_read", fn_name="trace_read_return") +b.attach_kretprobe(event="xfs_file_aio_write", fn_name="trace_write_return") b.attach_kretprobe(event="xfs_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="xfs_file_fsync", fn_name="trace_fsync_return") diff --git a/tools/xfsslower.py b/tools/xfsslower.py index 9fa12566..0ef50f6e 100755 --- a/tools/xfsslower.py +++ b/tools/xfsslower.py @@ -59,6 +59,7 @@ debug = 0 bpf_text = """ #include #include +#include #include #include @@ -93,7 +94,7 @@ BPF_PERF_OUTPUT(events); // Store timestamp and size on entry // -// xfs_file_read_iter(), xfs_file_write_iter(): +// xfs_file_aio_read(), xfs_file_aio_write(): int trace_rw_entry(struct pt_regs *ctx, struct kiocb *iocb) { u64 id = bpf_get_current_pid_tgid(); @@ -264,12 +265,12 @@ int trace_fsync_return(struct pt_regs *ctx) b = BPF(text=bpf_text) # common file functions -b.attach_kprobe(event="xfs_file_read_iter", fn_name="trace_rw_entry") -b.attach_kprobe(event="xfs_file_write_iter", fn_name="trace_rw_entry") +b.attach_kprobe(event="xfs_file_aio_read", fn_name="trace_rw_entry") +b.attach_kprobe(event="xfs_file_aio_write", fn_name="trace_rw_entry") b.attach_kprobe(event="xfs_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="xfs_file_fsync", fn_name="trace_fsync_entry") -b.attach_kretprobe(event="xfs_file_read_iter", fn_name="trace_read_return") -b.attach_kretprobe(event="xfs_file_write_iter", fn_name="trace_write_return") +b.attach_kretprobe(event="xfs_file_aio_read", fn_name="trace_read_return") +b.attach_kretprobe(event="xfs_file_aio_write", fn_name="trace_write_return") b.attach_kretprobe(event="xfs_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="xfs_file_fsync", fn_name="trace_fsync_return") -- 2.20.1