def _attach(self): Tracepoint.attach(self.bpf) for specifier in self.specifiers: specifier.attach(self.bpf) if self.args.verbose: print("open uprobes: %s" % BPF.open_uprobes()) print("open kprobes: %s" % BPF.open_kprobes())
def attach(self, bpf): self.bpf = bpf uprobes_start = len(BPF.open_uprobes()) kprobes_start = len(BPF.open_kprobes()) if self.is_user: if self.probe_type == "r": bpf.attach_uretprobe(name=self.library, sym=self.function, fn_name=self.probe_func_name, pid=self.pid or -1) else: bpf.attach_uprobe(name=self.library, sym=self.function, fn_name=self.probe_func_name, pid=self.pid or -1) if len(BPF.open_uprobes()) != uprobes_start + 1: self._bail("error attaching probe") else: if self.probe_type == "r" or self.probe_type == "t": bpf.attach_kretprobe(event=self.function, fn_name=self.probe_func_name) else: bpf.attach_kprobe(event=self.function, fn_name=self.probe_func_name) if len(BPF.open_kprobes()) != kprobes_start + 1: self._bail("error attaching probe") if self.entry_probe_required: self._attach_entry_probe()
def test_ext_ptr_maps(self): bpf_text = """ #include <uapi/linux/ptrace.h> #include <net/sock.h> #include <bcc/proto.h> BPF_HASH(currsock, u32, struct sock *); int trace_entry(struct pt_regs *ctx, struct sock *sk, struct sockaddr *uaddr, int addr_len) { u32 pid = bpf_get_current_pid_tgid(); currsock.update(&pid, &sk); return 0; }; int trace_exit(struct pt_regs *ctx) { u32 pid = bpf_get_current_pid_tgid(); struct sock **skpp; skpp = currsock.lookup(&pid); if (skpp) { struct sock *skp = *skpp; return skp->__sk_common.skc_dport; } return 0; } """ b = BPF(text=bpf_text) b.load_func("trace_entry", BPF.KPROBE) b.load_func("trace_exit", BPF.KPROBE)
class patch_panel(object): def __init__(self): self.vnf_counter = 0 self.dataplane = BPF(src_file="core/patch_panel/patch_panel.c") # Loading Tables from db self.vnf_map = self.dataplane.get_table("vnf_prog") self.forwarder = self.dataplane.get_table("forwarder_vi") # Loading functions from db self.func_linker = self.dataplane.load_func("linker", BPF.SCHED_CLS) def get_fd(self): return self.func_linker.fd # TODO: delete vnf code. def add_new_vnf(self, vnf_function_fd): # TODO: add code to check if vnf_fd already exists. self.vnf_map[self.vnf_map.Key( self.vnf_counter)] = self.vnf_map.Leaf(vnf_function_fd) self.vnf_counter = self.vnf_counter + 1 return self.vnf_counter - 1 def link_interfaces(self, vnf_from_iface, is_virtual, vnf_to_iface, vnf_to_fd): self.forwarder[self.forwarder.Key( vnf_from_iface)] = self.forwarder.Leaf( is_virtual, vnf_to_iface, vnf_to_fd)
def test_brb2(self): try: b = BPF(src_file=arg1, debug=0) self.pem_fn = b.load_func("pem", BPF.SCHED_CLS) self.pem_dest= b.get_table("pem_dest") self.pem_stats = b.get_table("pem_stats") # set up the topology self.set_default_const() (ns1_ipdb, self.ns1_eth_out, _) = sim._create_ns(self.ns1, ipaddr=self.vm1_ip+'/24', fn=self.pem_fn, action='drop', disable_ipv6=True) (ns2_ipdb, self.ns2_eth_out, _) = sim._create_ns(self.ns2, ipaddr=self.vm2_ip+'/24', fn=self.pem_fn, action='drop', disable_ipv6=True) ns1_ipdb.routes.add({'dst': self.vm2_rtr_mask, 'gateway': self.vm1_rtr_ip}).commit() ns2_ipdb.routes.add({'dst': self.vm1_rtr_mask, 'gateway': self.vm2_rtr_ip}).commit() (_, self.nsrtr_eth0_out, _) = sim._create_ns(self.ns_router, ipaddr=self.vm1_rtr_ip+'/24', disable_ipv6=True) (rt_ipdb, self.nsrtr_eth1_out, _) = sim._ns_add_ifc(self.ns_router, "eth1", "ns_router2", ipaddr=self.vm2_rtr_ip+'/24', disable_ipv6=True) # enable ip forwarding in router ns nsp = NSPopen(rt_ipdb.nl.netns, ["sysctl", "-w", "net.ipv4.ip_forward=1"]) nsp.wait(); nsp.release() # for each VM connecting to pem, there will be a corresponding veth connecting to the bridge self.setup_br(self.br1, self.nsrtr_eth0_out.ifname, self.veth_pem_2_br1, self.veth_br1_2_pem) self.setup_br(self.br2, self.nsrtr_eth1_out.ifname, self.veth_pem_2_br2, self.veth_br2_2_pem) # load the program and configure maps self.config_maps() # ping nsp = NSPopen(ns1_ipdb.nl.netns, ["ping", self.vm2_ip, "-c", "2"]); nsp.wait(); nsp.release() # one arp request/reply, 2 icmp request/reply per VM, total 6 packets per VM, 12 packets total self.assertEqual(self.pem_stats[c_uint(0)].value, 12) nsp_server = NSPopen(ns2_ipdb.nl.netns, ["iperf", "-s", "-xSC"]) sleep(1) nsp = NSPopen(ns1_ipdb.nl.netns, ["iperf", "-c", self.vm2_ip, "-t", "1", "-xSC"]) nsp.wait(); nsp.release() nsp_server.kill(); nsp_server.wait(); nsp_server.release() nsp_server = NSPopen(ns2_ipdb.nl.netns, ["netserver", "-D"]) sleep(1) nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "--", "-m", "65160"]) nsp.wait(); nsp.release() nsp = NSPopen(ns1_ipdb.nl.netns, ["netperf", "-l", "1", "-H", self.vm2_ip, "-t", "TCP_RR"]) nsp.wait(); nsp.release() nsp_server.kill(); nsp_server.wait(); nsp_server.release() finally: if self.br1 in ipdb.interfaces: ipdb.interfaces[self.br1].remove().commit() if self.br2 in ipdb.interfaces: ipdb.interfaces[self.br2].remove().commit() if self.veth_pem_2_br1 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br1].remove().commit() if self.veth_pem_2_br2 in ipdb.interfaces: ipdb.interfaces[self.veth_pem_2_br2].remove().commit() sim.release() ipdb.release()
def test_sscanf_string(self): text = """ struct Symbol { char name[128]; char path[128]; }; struct Event { uint32_t pid; uint32_t tid; struct Symbol stack[64]; }; BPF_TABLE("array", int, struct Event, comms, 1); """ b = BPF(text=text) t = b.get_table("comms") s1 = t.leaf_sprintf(t[0]) fill = b' { "" "" }' * 63 self.assertEqual(s1, b'{ 0x0 0x0 [ { "" "" }%s ] }' % fill) l = t.Leaf(1, 2) name = b"libxyz" path = b"/usr/lib/libxyz.so" l.stack[0].name = name l.stack[0].path = path s2 = t.leaf_sprintf(l) self.assertEqual(s2, b'{ 0x1 0x2 [ { "%s" "%s" }%s ] }' % (name, path, fill)) l = t.leaf_scanf(s2) self.assertEqual(l.pid, 1) self.assertEqual(l.tid, 2) self.assertEqual(l.stack[0].name, name) self.assertEqual(l.stack[0].path, path)
def test_attach1(self): # enable USDT probe from given PID and verifier generated BPF programs u = USDT(pid=int(self.app.pid)) u.enable_probe(probe="probe", fn_name="do_trace") b = BPF(text=self.bpf_text, usdt_contexts=[u]) # processing events self.probe_value_1 = 0 self.probe_value_2 = 0 self.probe_value_3 = 0 self.probe_value_other = 0 def print_event(cpu, data, size): result = ct.cast(data, ct.POINTER(ct.c_int)).contents if result.value == 1: self.probe_value_1 = 1 elif result.value == 2: self.probe_value_2 = 1 elif result.value == 3: self.probe_value_3 = 1 else: self.probe_value_other = 1 b["event"].open_perf_buffer(print_event) for i in range(10): b.perf_buffer_poll() self.assertTrue(self.probe_value_1 != 0) self.assertTrue(self.probe_value_2 != 0) self.assertTrue(self.probe_value_3 != 0) self.assertTrue(self.probe_value_other == 0)
def run(pid): bpf = BPF(text=text) attach(bpf, pid) init_stacks = bpf["init_stacks"] stacks = bpf["stacks"] locks = bpf["locks"] mutex_lock_hist = bpf["mutex_lock_hist"] mutex_wait_hist = bpf["mutex_wait_hist"] while True: sleep(5) mutex_ids = {} next_mutex_id = 1 for k, v in init_stacks.items(): mutex_id = "#%d" % next_mutex_id next_mutex_id += 1 mutex_ids[k.value] = mutex_id print("init stack for mutex %x (%s)" % (k.value, mutex_id)) print_stack(bpf, pid, stacks, v.value) print("") grouper = lambda (k, v): k.tid sorted_by_thread = sorted(locks.items(), key=grouper) locks_by_thread = itertools.groupby(sorted_by_thread, grouper) for tid, items in locks_by_thread: print("thread %d" % tid) for k, v in sorted(items, key=lambda (k, v): -v.wait_time_ns): mutex_descr = mutex_ids[k.mtx] if k.mtx in mutex_ids else bpf.sy(k.mtx, pid) # TODO Print a nicely formatted line with the mutex description, wait time, # hold time, enter count, and stack (use print_stack) print("") mutex_wait_hist.print_log2_hist(val_type="wait time (us)") mutex_lock_hist.print_log2_hist(val_type="hold time (us)")
def _attach(self): Tracepoint.attach(self.bpf) for probe in self.probes: probe.attach(self.bpf) if self.args.verbose: print("open uprobes: %s" % BPF.open_uprobes()) print("open kprobes: %s" % BPF.open_kprobes())
def test_func(self): b = BPF(text=""" struct key_t {int a; short b; struct {int c:4; int d:8;} e;} __attribute__((__packed__)); BPF_HASH(test_map, struct key_t); int test_func(void) { return 1; }""") self.assertEqual( """Disassemble of BPF program test_func: 0: (b7) r0 = 1 1: (95) exit""", b.disassemble_func("test_func")) self.assertEqual( """Layout of BPF map test_map (type HASH, FD 3, ID 0): struct { int a; short b; struct { int c:4; int d:8; } e; } key; unsigned long long value;""", b.decode_table("test_map"))
def test_perf_buffer_for_each_cpu(self): self.events = [] class Data(ct.Structure): _fields_ = [("cpu", ct.c_ulonglong)] def cb(cpu, data, size): self.assertGreater(size, ct.sizeof(Data)) event = ct.cast(data, ct.POINTER(Data)).contents self.events.append(event) text = """ BPF_PERF_OUTPUT(events); int kprobe__sys_nanosleep(void *ctx) { struct { u64 cpu; } data = {bpf_get_smp_processor_id()}; events.perf_submit(ctx, &data, sizeof(data)); return 0; } """ b = BPF(text=text) b["events"].open_perf_buffer(cb) online_cpus = get_online_cpus() for cpu in online_cpus: subprocess.call(['taskset', '-c', str(cpu), 'sleep', '0.1']) b.kprobe_poll() b.cleanup() self.assertGreaterEqual(len(self.events), len(online_cpus), 'Received only {}/{} events'.format(len(self.events), len(online_cpus)))
def test_multiple_key(self): b = BPF(text=""" #include <uapi/linux/ptrace.h> #include <uapi/linux/fs.h> struct hist_s_key { u64 key_1; u64 key_2; }; struct hist_key { struct hist_s_key s_key; u64 slot; }; BPF_HISTOGRAM(mk_hist, struct hist_key, 1024); int kprobe__vfs_read(struct pt_regs *ctx, struct file *file, char __user *buf, size_t count) { struct hist_key key = {.slot = bpf_log2l(count)}; key.s_key.key_1 = (unsigned long)buf & 0x70; key.s_key.key_2 = (unsigned long)buf & 0x7; mk_hist.increment(key); return 0; } """) def bucket_sort(buckets): buckets.sort() return buckets for i in range(0, 100): time.sleep(0.01) b["mk_hist"].print_log2_hist("size", "k_1 & k_2", section_print_fn=lambda bucket: "%3d %d" % (bucket[0], bucket[1]), bucket_fn=lambda bucket: (bucket.key_1, bucket.key_2), strip_leading_zero=True, bucket_sort_fn=bucket_sort) b.cleanup()
def test_struct_custom_func(self): test_prog2 = """ typedef struct counter { u32 c1; u32 c2; } counter; BPF_TABLE("percpu_hash", u32, counter, stats, 1); int hello_world(void *ctx) { u32 key=0; counter value = {0,0}, *val; val = stats.lookup_or_init(&key, &value); val->c1 += 1; val->c2 += 1; return 0; } """ self.addCleanup(self.cleanup) bpf_code = BPF(text=test_prog2) stats_map = bpf_code.get_table("stats", reducer=lambda x,y: stats_map.sLeaf(x.c1+y.c1)) bpf_code.attach_kprobe(event="sys_clone", fn_name="hello_world") ini = stats_map.Leaf() for i in ini: i = stats_map.sLeaf(0,0) stats_map[ stats_map.Key(0) ] = ini f = os.popen("hostname") f.close() self.assertEqual(len(stats_map),1) k = stats_map[ stats_map.Key(0) ] self.assertGreater(k.c1, 0L)
def test_u32(self): test_prog1 = """ BPF_TABLE("percpu_array", u32, u32, stats, 1); int hello_world(void *ctx) { u32 key=0; u32 value = 0, *val; val = stats.lookup_or_init(&key, &value); *val += 1; return 0; } """ self.addCleanup(self.cleanup) bpf_code = BPF(text=test_prog1) stats_map = bpf_code.get_table("stats") bpf_code.attach_kprobe(event="sys_clone", fn_name="hello_world") ini = stats_map.Leaf() for i in range(0, multiprocessing.cpu_count()): ini[i] = 0 stats_map[ stats_map.Key(0) ] = ini f = os.popen("hostname") f.close() self.assertEqual(len(stats_map),1) val = stats_map[ stats_map.Key(0) ] sum = stats_map.sum(stats_map.Key(0)) avg = stats_map.average(stats_map.Key(0)) max = stats_map.max(stats_map.Key(0)) self.assertGreater(sum.value, 0L) self.assertGreater(max.value, 0L)
class rv_manager(object): def __init__(self): self.ipr = IPRoute() self.dataplane = BPF(src_file="core/rv_manager/rv_manager.c") # Loading Tables from dp self.next = self.dataplane.get_table("next_hop") self.ifc2vi = self.dataplane.get_table("rvm_ifc2vi") self.vi2ifc = self.dataplane.get_table("rvm_vi2ifc") # Loading Functions from db self.func_phy2virt = self.dataplane.load_func( "rvm_function_p2v", BPF.SCHED_CLS) self.func_virt2phy = self.dataplane.load_func( "rvm_function_v2p", BPF.SCHED_CLS) def set_next_hop(self, next_vnf): self.next[self.next.Key(0)] = self.next.Leaf(next_vnf) def get_fd(self): return self.func_virt2phy.fd def set_bpf_ingress(self, ifc_index, func): self.ipr.tc("add", "ingress", ifc_index, "ffff:") self.ipr.tc("add-filter", "bpf", ifc_index, ":1", fd=func.fd, name=func.name, parent="ffff:", action="drop", classid=1) def add_new_workload(self, phy_iface_index, virt_iface_index): self.ifc2vi[self.ifc2vi.Key(phy_iface_index)] = self.ifc2vi.Leaf( virt_iface_index) self.vi2ifc[self.vi2ifc.Key(virt_iface_index)] = self.vi2ifc.Leaf( phy_iface_index) self.set_bpf_ingress(phy_iface_index, self.func_phy2virt)
def run(pid): bpf = BPF(text=text) attach(bpf, pid) init_stacks = bpf["init_stacks"] stacks = bpf["stacks"] locks = bpf["locks"] mutex_lock_hist = bpf["mutex_lock_hist"] mutex_wait_hist = bpf["mutex_wait_hist"] while True: sleep(5) mutex_ids = {} next_mutex_id = 1 for k, v in init_stacks.items(): mutex_id = "#%d" % next_mutex_id next_mutex_id += 1 mutex_ids[k.value] = mutex_id print("init stack for mutex %x (%s)" % (k.value, mutex_id)) print_stack(bpf, pid, stacks, v.value) print("") grouper = lambda (k, v): k.tid sorted_by_thread = sorted(locks.items(), key=grouper) locks_by_thread = itertools.groupby(sorted_by_thread, grouper) for tid, items in locks_by_thread: print("thread %d" % tid) for k, v in sorted(items, key=lambda (k, v): -v.wait_time_ns): mutex_descr = mutex_ids[k.mtx] if k.mtx in mutex_ids else bpf.sym(k.mtx, pid) print("\tmutex %s ::: wait time %.2fus ::: hold time %.2fus ::: enter count %d" % (mutex_descr, v.wait_time_ns/1000.0, v.lock_time_ns/1000.0, v.enter_count)) print_stack(bpf, pid, stacks, k.lock_stack_id) print("") mutex_wait_hist.print_log2_hist(val_type="wait time (us)") mutex_lock_hist.print_log2_hist(val_type="hold time (us)")
class TestProbeQuota(TestCase): def setUp(self): self.b = BPF(text="""int count(void *ctx) { return 0; }""") def test_probe_quota(self): with self.assertRaises(Exception): self.b.attach_kprobe(event_re=".*", fn_name="count")
def test_perf_buffer(self): self.counter = 0 class Data(ct.Structure): _fields_ = [("ts", ct.c_ulonglong)] def cb(cpu, data, size): self.assertGreater(size, ct.sizeof(Data)) event = ct.cast(data, ct.POINTER(Data)).contents self.counter += 1 text = """ BPF_PERF_OUTPUT(events); int kprobe__sys_nanosleep(void *ctx) { struct { u64 ts; } data = {bpf_ktime_get_ns()}; events.perf_submit(ctx, &data, sizeof(data)); return 0; } """ b = BPF(text=text) b["events"].open_perf_buffer(cb) time.sleep(0.1) b.kprobe_poll() self.assertGreater(self.counter, 0)
def test_log_no_debug(self): b = BPF(text=text, debug=0) try: ingress = b.load_func("sim_port",BPF.SCHED_CLS) except Exception: self.fp.flush() self.fp.seek(0) self.assertEqual(error_msg in self.fp.read(), True)
def test_probe_read_array_accesses8(self): text = """ #include <linux/mm_types.h> int test(struct pt_regs *ctx, struct mm_struct *mm) { return mm->rss_stat.count[MM_ANONPAGES].counter; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
def test_probe_read_array_accesses5(self): text = """ #include <linux/ptrace.h> int test(struct pt_regs *ctx, char **name) { return (*name)[1]; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
def test_probe_read_array_accesses7(self): text = """ #include <net/inet_sock.h> int test(struct pt_regs *ctx, struct sock *sk) { return sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32[0]; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
def test_probe_read2(self): text = """ #include <linux/sched.h> #include <uapi/linux/ptrace.h> int count_foo(struct pt_regs *ctx, unsigned long a, unsigned long b) { return (a != b); } """ b = BPF(text=text, debug=0) fn = b.load_func("count_foo", BPF.KPROBE)
def test_unary_operator(self): text = """ #include <linux/fs.h> #include <uapi/linux/ptrace.h> int trace_read_entry(struct pt_regs *ctx, struct file *file) { return !file->f_op->read_iter; } """ b = BPF(text=text) b.attach_kprobe(event="__vfs_read", fn_name="trace_read_entry")
def test_probe_read_array_accesses3(self): text = """ #include <linux/ptrace.h> #include <linux/dcache.h> int test(struct pt_regs *ctx, const struct qstr *name) { return (name->name)[1]; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
def test_paren_probe_read(self): text = """ #include <net/inet_sock.h> int trace_entry(struct pt_regs *ctx, struct sock *sk) { u16 sport = ((struct inet_sock *)sk)->inet_sport; return sport; } """ b = BPF(text=text) fn = b.load_func("trace_entry", BPF.KPROBE)
def test_ext_ptr_from_helper(self): text = """ #include <linux/sched.h> int test(struct pt_regs *ctx) { struct task_struct *task = (struct task_struct *)bpf_get_current_task(); return task->prio; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
def test_probe_read_nested_member1(self): text = """ #include <net/inet_sock.h> int test(struct pt_regs *ctx, struct sock *skp) { u32 *daddr = &skp->sk_daddr; return *daddr; } """ b = BPF(text=text) fn = b.load_func("test", BPF.KPROBE)
class TestProbeNotExist(TestCase): def setUp(self): self.b = BPF(text="""int count(void *ctx) { return 0; }""") def test_not_exist(self): with self.assertRaises(Exception): b.attach_kprobe(event="___doesnotexist", fn_name="count") def tearDown(self): self.b.cleanup()
def test_probe_read1(self): text = """ #include <linux/sched.h> #include <uapi/linux/ptrace.h> int count_sched(struct pt_regs *ctx, struct task_struct *prev) { pid_t p = prev->pid; return (p != -1); } """ b = BPF(text=text, debug=0) fn = b.load_func("count_sched", BPF.KPROBE)
unsigned int irq_handler(struct pt_regs *regs) { struct irq_desc *desc; int irq_num; unsigned int vector = ~regs->orig_ax; bpf_trace_printk("VECTOR: %d \\n ", vector); desc = __this_cpu_read(vector_irq[vector]); return 0; } """ # load BPF program b = BPF(text=prog) # b.attach_kprobe(event="sys_clone", fn_name="hello") b.attach_kprobe(event="do_IRQ", fn_name="irq_handler") # header # print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "MESSAGE")) # format output while 1: try: (task, pid, cpu, flags, ts, msg) = b.trace_fields() except ValueError: continue print("%-18.9f %-16s %-6d %s" % (ts, task, pid, msg))
# Copyright (c) PLUMgrid, Inc. # Licensed under the Apache License, Version 2.0 (the "License") from bcc import BPF from ctypes import c_uint, c_int, c_ulonglong, Structure import json from netaddr import IPAddress from os import rename from pyroute2 import IPRoute, NetNS, IPDB, NSPopen import sys from time import sleep ipr = IPRoute() ipdb = IPDB(nl=ipr) b = BPF(src_file="monitor.c", debug=0) ingress_fn = b.load_func("handle_ingress", BPF.SCHED_CLS) egress_fn = b.load_func("handle_egress", BPF.SCHED_CLS) outer_fn = b.load_func("handle_outer", BPF.SCHED_CLS) inner_fn = b.load_func("handle_inner", BPF.SCHED_CLS) stats = b.get_table("stats") # using jump table for inner and outer packet split parser = b.get_table("parser") parser[c_int(1)] = c_int(outer_fn.fd) parser[c_int(2)] = c_int(inner_fn.fd) ifc = ipdb.interfaces.eth0 ipr.tc("add", "ingress", ifc.index, "ffff:") ipr.tc("add-filter", "bpf",
def setUp(self): b = BPF(text=text, debug=0) self.stats = b.get_table("stats") b.attach_kprobe(event="finish_task_switch", fn_name="count_sched")
"ruby_creturn", "bpf_usdt_readarg(1, ctx, &clazz);", "bpf_usdt_readarg(2, ctx, &method);", is_return=True) else: print("No language detected; use -l to trace a language.") exit(1) if args.ebpf or args.verbose: if args.verbose: print(usdt.get_text()) print(program) if args.ebpf: exit() bpf = BPF(text=program, usdt_contexts=[usdt]) print("Tracing method calls in %s process %d... Ctrl-C to quit." % (language, args.pid)) print("%-3s %-6s %-6s %-8s %s" % ("CPU", "PID", "TID", "TIME(us)", "METHOD")) class CallEvent(ct.Structure): _fields_ = [("depth", ct.c_ulonglong), ("pid", ct.c_ulonglong), ("clazz", ct.c_char * 80), ("method", ct.c_char * 80)] start_ts = time.time() def print_event(cpu, data, size): event = ct.cast(data, ct.POINTER(CallEvent)).contents
} """ if check_runnable_weight_field(): bpf_text = bpf_text.replace('RUNNABLE_WEIGHT_FIELD', 'unsigned long runnable_weight;') else: bpf_text = bpf_text.replace('RUNNABLE_WEIGHT_FIELD', '') # code substitutions if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() # initialize BPF & perf_events b = BPF(text=bpf_text) # TODO: check for HW counters first and use if more accurate b.attach_perf_event(ev_type=PerfType.SOFTWARE, ev_config=PerfSWConfig.TASK_CLOCK, fn_name="do_perf_event", sample_period=0, sample_freq=frequency) if args.csv: if args.timestamp: print("TIME", end=",") print("TIMESTAMP_ns", end=",") print(",".join("CPU" + str(c) for c in range(ncpu)), end="") if args.fullcsv: print(",", end="") print(",".join("OFFSET_ns_CPU" + str(c) for c in range(ncpu)), end="") print() else:
from builtins import input from ctypes import c_int, c_uint from http.server import HTTPServer, SimpleHTTPRequestHandler import json from netaddr import EUI, IPAddress from pyroute2 import IPRoute, NetNS, IPDB, NSPopen from socket import htons, AF_INET from threading import Thread from subprocess import call, Popen, PIPE num_hosts = int(argv[1]) host_id = int(argv[2]) dhcp = int(argv[3]) gretap = int(argv[4]) b = BPF(src_file="tunnel_mesh.c") ingress_fn = b.load_func("handle_ingress", BPF.SCHED_CLS) egress_fn = b.load_func("handle_egress", BPF.SCHED_CLS) tunkey2if = b.get_table("tunkey2if") if2tunkey = b.get_table("if2tunkey") conf = b.get_table("conf") ipr = IPRoute() ipdb = IPDB(nl=ipr) ifc = ipdb.interfaces.eth0 # ifcs to cleanup at the end ifc_gc = [] # dhcp server and client processes
if (mep == 0) { bpf_get_current_comm(&data6.task, sizeof(data6.task)); } else { bpf_probe_read(&data6.task, sizeof(data6.task), (void *)mep->task); } ipv6_events.perf_submit(args, &data6, sizeof(data6)); } if (mep != 0) whoami.delete(&sk); return 0; } """ if (BPF.tracepoint_exists("sock", "inet_sock_set_state")): bpf_text += bpf_text_tracepoint else: bpf_text += bpf_text_kprobe # code substitutions if args.pid: bpf_text = bpf_text.replace('FILTER_PID', 'if (pid != %s) { return 0; }' % args.pid) if args.remoteport: dports = [int(dport) for dport in args.remoteport.split(',')] dports_if = ' && '.join(['dport != %d' % dport for dport in dports]) bpf_text = bpf_text.replace( 'FILTER_DPORT', 'if (%s) { birth.delete(&sk); return 0; }' % dports_if) if args.localport: lports = [int(lport) for lport in args.localport.split(',')]
pid = "pid" section = "tid" bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist, pid_key_t);') bpf_text = bpf_text.replace( 'STORE', 'pid_key_t key = {.id = ' + pid + ', .slot = bpf_log2l(delta)}; ' + 'dist.increment(key);') else: section = "" bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);') bpf_text = bpf_text.replace('STORE', 'dist.increment(bpf_log2l(delta));') if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() b = BPF(text=bpf_text) b.attach_kprobe(event="finish_task_switch", fn_name="sched_switch") print("Tracing %s-CPU time... Hit Ctrl-C to end." % ("off" if args.offcpu else "on")) exiting = 0 if args.interval else 1 dist = b.get_table("dist") while (1): try: sleep(int(args.interval)) except KeyboardInterrupt: exiting = 1 print() if args.timestamp:
def print_ipv6_event(cpu, data, size): event = ct.cast(data, ct.POINTER(Data_ipv6)).contents global start_ts if args.timestamp: if start_ts == 0: start_ts = event.ts_us print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="") print("%-6d %-12.12s %-2d %-16s %-16s %-4d" % (event.pid, event.task.decode('utf-8', 'replace'), event.ip, inet_ntop(AF_INET6, event.saddr), inet_ntop( AF_INET6, event.daddr), event.dport)) # initialize BPF b = BPF(text=bpf_text) b.attach_kprobe(event="tcp_v4_connect", fn_name="trace_connect_entry") b.attach_kprobe(event="tcp_v6_connect", fn_name="trace_connect_entry") b.attach_kretprobe(event="tcp_v4_connect", fn_name="trace_connect_v4_return") b.attach_kretprobe(event="tcp_v6_connect", fn_name="trace_connect_v6_return") # header if args.timestamp: print("%-9s" % ("TIME(s)"), end="") print("%-6s %-12s %-2s %-16s %-16s %-4s" % ("PID", "COMM", "IP", "SADDR", "DADDR", "DPORT")) start_ts = 0 # read events b["ipv4_events"].open_perf_buffer(print_ipv4_event)
from bcc import BPF b = BPF(text=""" #include <uapi/linux/ptrace.h> #include <bcc/proto.h> #include <linux/skbuff.h> int kprobe__dev_queue_xmit(struct pt_regs *ctx, struct sk_buff *skb) { bpf_trace_printk("send a packet! len = %d protocol = %x\\n", skb->len, skb->protocol); return 0; } """, debug=0) print("tracing...") while 1: try: (task, pid, cpu, flags, ts, msg) = b.trace_fields() except ValueError: continue print(msg)
type = 'O' elif event.type == 3: type = 'S' if (csv): print("%d,%s,%d,%s,%d,%d,%d,%s" % (event.ts_us, event.task, event.pid, type, event.size, event.offset, event.delta_us, event.file)) return print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"), event.task, event.pid, type, event.size, event.offset / 1024, float(event.delta_us) / 1000, event.file)) # initialize BPF b = BPF(text=bpf_text) # common file functions b.attach_kprobe(event="xfs_file_read_iter", fn_name="trace_rw_entry") b.attach_kprobe(event="xfs_file_write_iter", fn_name="trace_rw_entry") b.attach_kprobe(event="xfs_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="xfs_file_fsync", fn_name="trace_fsync_entry") b.attach_kretprobe(event="xfs_file_read_iter", fn_name="trace_read_return") b.attach_kretprobe(event="xfs_file_write_iter", fn_name="trace_write_return") b.attach_kretprobe(event="xfs_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="xfs_file_fsync", fn_name="trace_fsync_return") # header if (csv): print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE") else:
start.update(&req, &ts); } void trace_completion(struct pt_regs *ctx, struct request *req){ u64 *tsp, delta; tsp = start.lookup(&req); if(tsp != 0){ delta = bpf_ktime_get_ns(); bpf_trace_printk("%d %x %d\\n", req->__data_len, req->cmd_flags, delta / 1000); start.delete(&req); } } """ bpf = BPF(text = bpf_code) if BPF.get_kprobe_functions(b'blk_start_request'): bpf.attach_kprobe(event="blk_start_request", fn_name="trace_start") bpf.attach_kprobe(event="blk_mq_start_request", fn_name="trace_start") bpf.attach_kprobe(event="blk_account_io_completion", fn_name="trace_completion") print("%-18s %-2s %-7s %8s" % ("TIME(s)", "T", "BYTES", "LAT(ms)")) while 1: try: (task, pid, cpu, flags, ts, msg) = bpf.trace_fields() (bytes_s, bflags_s, us_s) = msg.split() if int(bflags_s, 16) & REQ_WRITE: type_s = b"w" elif bytes_s == "0":
from bcc.utils import printb import commands import argparse import sys parser = argparse.ArgumentParser( description='/sys/kernel/debug/tracing/events') parser.add_argument("-c", "--command") args = parser.parse_args() # load BPF program b = BPF(text=""" TRACEPOINT_PROBE(net, net_dev_xmit) { // args is from /sys/kernel/debug/tracing/events/net/net_dev_xmit/format bpf_trace_printk("%d\\n", args->len); return 0; } """) if args.command: cmd = "pgrep " + args.command (status, output) = commands.getstatusoutput(cmd) if status != 0: exit(args.command + " not found") # header print("%-18s %-16s %-6s %s" % ("TIME(s)", "COMM", "PID", "LEN")) # format output while 1:
class datastruct(ct.Structure): _fields_ = [("pid", ct.c_uint), ("uid", ct.c_uint), ("gid", ct.c_uint), ("loginuid", ct.c_uint), ("ret", ct.c_uint), ("ev_type", ct.c_uint), ("comm", ct.c_char * TASK_COMM_LEN), ("ipset_name", ct.c_char * IPSET_MAXNAMELEN), ("ipset_newname", ct.c_char * IPSET_MAXNAMELEN), ("ipset_type", ct.c_char * IPSET_MAXNAMELEN)] # OUTPUT def callback(cpu, data, size): assert size >= ct.sizeof(datastruct) event = ct.cast(data, ct.POINTER(datastruct)).contents if event.ev_type == EXCHANGE_CREATE: print("%s (pid: %d) (auid: %d) - CREATE %s (type: %s)" % (event.comm, event.pid, event.loginuid, event.ipset_name, event.ipset_type)) # MAIN b = BPF(src_file="mine.bpf.c") b["events"].open_perf_buffer(callback) print("Tracing... Hit Ctrl-C to end.") while 1: b.kprobe_poll()
mode = BPF.XDP #mode = BPF.SCHED_CLS if mode == BPF.XDP: ret = "XDP_PASS" ctxtype = "xdp_md" else: ret = "TC_ACT_SHOT" ctxtype = "__sk_buff" # load BPF program b = BPF(src_file="ebpf_ros2_xdp.c", cflags=[ "-w", "-DRETURNCODE=%s" % ret, "-DCTXTYPE=%s" % ctxtype, "-DMAPTYPE=\"%s\"" % maptype ], device=offload_device) fn = b.load_func("xdp_prog", mode, offload_device) if mode == BPF.XDP: b.attach_xdp(device, fn, flags) else: ip = pyroute2.IPRoute() ipdb = pyroute2.IPDB(nl=ip) idx = ipdb.interfaces[device].index ip.tc("add", "clsact", idx) ip.tc("add-filter", "bpf",
laddr=inet_ntop(AF_INET, pack("I", k.saddr)), lport=k.lport, daddr=inet_ntop(AF_INET, pack("I", k.daddr)), dport=k.dport) def get_ipv6_session_key(k): return TCPSessionKey(pid=k.pid, laddr=inet_ntop(AF_INET6, k.saddr), lport=k.lport, daddr=inet_ntop(AF_INET6, k.daddr), dport=k.dport) # initialize BPF b = BPF(text=bpf_text) ipv4_send_bytes = b["ipv4_send_bytes"] ipv4_recv_bytes = b["ipv4_recv_bytes"] # output i = 0 while i != args.count: try: sleep(args.interval) except KeyboardInterrupt: break # header if not args.noclear:
u64 *last_ts = ts.lookup(&zero); if (!last_ts) return 0; if ((now-*last_ts) < MIN_INTERVAL) { return 0; } *last_ts = now; ttl_event_t event; event.in_src = in_ip->saddr; event.in_dst = in_ip->daddr; event.out_src = out_ip->saddr; event.out_dst = out_ip->daddr; event.pid = bpf_get_current_pid_tgid(); ttl_event.perf_submit(ctx, &event, sizeof(event)); } } return 0; } int kprobe__ip_rcv(struct pt_regs *ctx,struct sk_buff *skb){ return chk_ttl(ctx, skb); } int kprobe__ip_forward(struct pt_regs *ctx,struct sk_buff *skb){ return chk_ttl(ctx, skb); } """ b = BPF(text=prog) b["ttl_event"].open_perf_buffer(cb) while True: b.kprobe_poll()
return 0; pid = bpf_get_current_pid_tgid(); data.pid = pid; bpf_probe_read_user(&data.str, sizeof(data.str), (void *)PT_REGS_RC(ctx)); bpf_get_current_comm(&comm, sizeof(comm)); if (comm[0] == 'b' && comm[1] == 'a' && comm[2] == 's' && comm[3] == 'h' && comm[4] == 0 ) { events.perf_submit(ctx,&data,sizeof(data)); } return 0; }; """ b = BPF(text=bpf_text) b.attach_uretprobe(name=name, sym="readline", fn_name="printret") # header print("%-9s %-6s %s" % ("TIME", "PID", "COMMAND")) def print_event(cpu, data, size): event = b["events"].event(data) print("%-9s %-6d %s" % (strftime("%H:%M:%S"), event.pid, event.str.decode('utf-8', 'replace'))) b["events"].open_perf_buffer(print_event) while 1: try:
type = 'O' elif event.type == 3: type = 'S' if (csv): print("%d,%s,%d,%s,%d,%d,%d,%s" % (event.ts_us, event.task, event.pid, type, event.size, event.offset, event.delta_us, event.file)) return print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"), event.task, event.pid, type, event.size, event.offset / 1024, float(event.delta_us) / 1000, event.file)) # initialize BPF b = BPF(text=bpf_text) # Common file functions. See earlier comment about generic_*(). b.attach_kprobe(event="generic_file_read_iter", fn_name="trace_read_entry") b.attach_kprobe(event="btrfs_file_write_iter", fn_name="trace_write_entry") b.attach_kprobe(event="generic_file_open", fn_name="trace_open_entry") b.attach_kprobe(event="btrfs_sync_file", fn_name="trace_fsync_entry") b.attach_kretprobe(event="generic_file_read_iter", fn_name="trace_read_return") b.attach_kretprobe(event="btrfs_file_write_iter", fn_name="trace_write_return") b.attach_kretprobe(event="generic_file_open", fn_name="trace_open_return") b.attach_kretprobe(event="btrfs_sync_file", fn_name="trace_fsync_return") # header if (csv): print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE") else:
global start_ts if args.time: print("%-9s" % strftime("%H:%M:%S"), end="") if args.timestamp: if start_ts == 0: start_ts = event.ts_us print("%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), end="") printb(b"%-7d %-12.12s %-2d %-16s %-5d %-16s %-5d" % (event.pid, event.task, event.ip, inet_ntop(AF_INET6, event.daddr).encode(), event.dport, inet_ntop(AF_INET6, event.saddr).encode(), event.lport)) # initialize BPF b = BPF(text=bpf_text) # header if args.time: print("%-9s" % ("TIME"), end="") if args.timestamp: print("%-9s" % ("TIME(s)"), end="") print("%-7s %-12s %-2s %-16s %-5s %-16s %-5s" % ("PID", "COMM", "IP", "RADDR", "RPORT", "LADDR", "LPORT")) start_ts = 0 # read events b["ipv4_events"].open_perf_buffer(print_ipv4_event) b["ipv6_events"].open_perf_buffer(print_ipv6_event) while 1:
int key = stack_traces.get_stackid(ctx, BPF_F_REUSE_STACKID); u64 zero = 0; u64 *val = counts.lookup_or_init(&key, &zero); (*val)++; return 0; } """ if args.pid: bpf_text = bpf_text.replace( 'FILTER', ('u32 pid; pid = bpf_get_current_pid_tgid(); ' + 'if (pid != %s) { return 0; }') % (args.pid)) else: bpf_text = bpf_text.replace('FILTER', '') if debug: print(bpf_text) b = BPF(text=bpf_text) b.attach_kprobe(event_re=pattern, fn_name="trace_count") matched = b.num_open_kprobes() if matched == 0: print("0 functions matched by \"%s\". Exiting." % args.pattern) exit() # header print("Tracing %d functions for \"%s\"... Hit Ctrl-C to end." % (matched, args.pattern)) def print_frame(addr): print(" ", end="") if verbose: print("%-16x " % addr, end="")
def setUp(self): self.b = BPF(text="""int count(void *ctx) { return 0; }""")
(inet_ntop(AF_INET, pack('I', event.saddr)), event.lport), type[event.type], "%s:%s" % (inet_ntop(AF_INET, pack('I', event.daddr)), event.dport), tcpstate[event.state])) def print_ipv6_event(cpu, data, size): event = ct.cast(data, ct.POINTER(Data_ipv6)).contents print("%-8s %-6d %-2d %-20s %1s> %-20s %s" % (strftime("%H:%M:%S"), event.pid, event.ip, "%s:%d" % (inet_ntop(AF_INET6, event.saddr), event.lport), type[event.type], "%s:%d" % (inet_ntop(AF_INET6, event.daddr), event.dport), tcpstate[event.state])) # initialize BPF b = BPF(text=bpf_text) b.attach_kprobe(event="tcp_retransmit_skb", fn_name="trace_retransmit") if args.lossprobe: b.attach_kprobe(event="tcp_send_loss_probe", fn_name="trace_tlp") # header print("%-8s %-6s %-2s %-20s %1s> %-20s %-4s" % ("TIME", "PID", "IP", "LADDR:LPORT", "T", "RADDR:RPORT", "STATE")) # read events b["ipv4_events"].open_perf_buffer(print_ipv4_event) b["ipv6_events"].open_perf_buffer(print_ipv6_event) while 1: b.kprobe_poll()
class Tool(object): examples = """ EXAMPLES: trace do_sys_open Trace the open syscall and print a default trace message when entered trace 'do_sys_open "%s", arg2' Trace the open syscall and print the filename being opened trace 'sys_read (arg3 > 20000) "read %d bytes", arg3' Trace the read syscall and print a message for reads >20000 bytes trace 'r::do_sys_return "%llx", retval' Trace the return from the open syscall and print the return value trace 'c:open (arg2 == 42) "%s %d", arg1, arg2' Trace the open() call from libc only if the flags (arg2) argument is 42 trace 'c:malloc "size = %d", arg1' Trace malloc calls and print the size being allocated trace 'p:c:write (arg1 == 1) "writing %d bytes to STDOUT", arg3' Trace the write() call from libc to monitor writes to STDOUT trace 'r::__kmalloc (retval == 0) "kmalloc failed!" Trace returns from __kmalloc which returned a null pointer trace 'r:c:malloc (retval) "allocated = %p", retval Trace returns from malloc and print non-NULL allocated buffers trace 't:block:block_rq_complete "sectors=%d", tp.nr_sector' Trace the block_rq_complete kernel tracepoint and print # of tx sectors trace 'u:pthread:pthread_create (arg4 != 0)' Trace the USDT probe pthread_create when its 4th argument is non-zero """ def __init__(self): parser = argparse.ArgumentParser( description="Attach to functions and print trace messages.", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=Tool.examples) parser.add_argument("-p", "--pid", type=int, help="id of the process to trace (optional)") parser.add_argument( "-v", "--verbose", action="store_true", help="print resulting BPF program code before executing") parser.add_argument("-Z", "--string-size", type=int, default=80, help="maximum size to read from strings") parser.add_argument( "-S", "--include-self", action="store_true", help="do not filter trace's own pid from the trace") parser.add_argument("-M", "--max-events", type=int, help="number of events to print before quitting") parser.add_argument("-o", "--offset", action="store_true", help="use relative time from first traced message") parser.add_argument("-K", "--kernel-stack", action="store_true", help="output kernel stack trace") parser.add_argument("-U", "--user_stack", action="store_true", help="output user stack trace") parser.add_argument(metavar="probe", dest="probes", nargs="+", help="probe specifier (see examples)") self.args = parser.parse_args() def _create_probes(self): Probe.configure(self.args) self.probes = [] for probe_spec in self.args.probes: self.probes.append( Probe(probe_spec, self.args.string_size, self.args.kernel_stack, self.args.user_stack)) def _generate_program(self): self.program = """ #include <linux/ptrace.h> #include <linux/sched.h> /* For TASK_COMM_LEN */ """ self.program += BPF.generate_auto_includes( map(lambda p: p.raw_probe, self.probes)) self.program += Tracepoint.generate_decl() self.program += Tracepoint.generate_entry_probe() for probe in self.probes: self.program += probe.generate_program(self.args.include_self) if self.args.verbose: print(self.program) def _attach_probes(self): usdt_contexts = [] for probe in self.probes: if probe.usdt: # USDT probes must be enabled before the BPF object # is initialized, because that's where the actual # uprobe is being attached. probe.usdt.enable_probe(probe.usdt_name, probe.probe_name) usdt_contexts.append(probe.usdt) self.bpf = BPF(text=self.program, usdt_contexts=usdt_contexts) Tracepoint.attach(self.bpf) for probe in self.probes: if self.args.verbose: print(probe) probe.attach(self.bpf, self.args.verbose) def _main_loop(self): all_probes_trivial = all(map(Probe.is_default_action, self.probes)) # Print header print("%-8s %-6s %-12s %-16s %s" % \ ("TIME", "PID", "COMM", "FUNC", "-" if not all_probes_trivial else "")) while True: self.bpf.kprobe_poll() def run(self): try: self._create_probes() self._generate_program() self._attach_probes() self._main_loop() except: if self.args.verbose: traceback.print_exc() elif sys.exc_info()[0] is not SystemExit: print(sys.exc_info()[1])
bpf_text = EXTENDED + bpf_text else: bpf_text = NO_EXTENDED + bpf_text if args.pid: bpf_text = bpf_text.replace("PID_FILTER", "if (id >> 32 != %s) { return 0; }" % args.pid) else: bpf_text = bpf_text.replace("PID_FILTER", "") if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() # load BPF program b = BPF(text=bpf_text) b.attach_kprobe(event="compact_zone", fn_name="trace_compact_zone_entry") b.attach_kretprobe(event="compact_zone", fn_name="trace_compact_zone_return") b.attach_kprobe(event="compaction_suitable", fn_name="trace_compaction_suitable_entry") b.attach_kretprobe(event="fragmentation_index", fn_name="trace_fragmentation_index_return") b.attach_kretprobe(event="compaction_suitable", fn_name="trace_compaction_suitable_return") stack_traces = b.get_table("stack_traces") initial_ts = 0 def zone_idx_to_str(idx): # from include/linux/mmzone.h
if args.tgid: bpf_text = bpf_text.replace('TGID_FILTER', 'tgid != %d' % args.tgid) else: bpf_text = bpf_text.replace('TGID_FILTER', '0') if args.all_files: bpf_text = bpf_text.replace('TYPE_FILTER', '0') else: bpf_text = bpf_text.replace('TYPE_FILTER', '!S_ISREG(mode)') if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() # initialize BPF b = BPF(text=bpf_text) b.attach_kprobe(event="vfs_read", fn_name="trace_read_entry") b.attach_kprobe(event="vfs_write", fn_name="trace_write_entry") DNAME_INLINE_LEN = 32 # linux/dcache.h print('Tracing... Output every %d secs. Hit Ctrl-C to end' % interval) def sort_fn(counts): if args.sort == "all": return (counts[1].rbytes + counts[1].wbytes + counts[1].reads + counts[1].writes) else: return getattr(counts[1], args.sort)
else data.count = count; events.perf_submit(ctx, &data, sizeof(data)); return 0; }; """ bpf_text = bpf_text.replace('PTS', str(pi.st_ino)) if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() # initialize BPF b = BPF(text=bpf_text) BUFSIZE = 256 class Data(ct.Structure): _fields_ = [("count", ct.c_int), ("buf", ct.c_char * BUFSIZE)] if not args.noclear: call("clear") # process event def print_event(cpu, data, size): event = ct.cast(data, ct.POINTER(Data)).contents
from bcc import BPF bpf_source = """ int ret_sys_execve(struct pt_regs *ctx) { int return_value; char comm[16]; bpf_get_current_comm(&comm, sizeof(comm)); return_value = PT_REGS_RC(ctx); bpf_trace_printk("program: %s, return: %d", comm, return_value); return 0; } """ bpf = BPF(text=bpf_source) execve_function = bpf.get_syscall_fnname("execve") bpf.attach_kretprobe(event=execve_function, fn_name="ret_sys_execve") bpf.trace_print()
elif min_size is not None: size_filter = "if (size < %d) return 0;" % min_size elif max_size is not None: size_filter = "if (size > %d) return 0;" % max_size bpf_source = bpf_source.replace("SIZE_FILTER", size_filter) stack_flags = "BPF_F_REUSE_STACKID" if not kernel_trace: stack_flags += "|BPF_F_USER_STACK" bpf_source = bpf_source.replace("STACK_FLAGS", stack_flags) if args.ebpf: print(bpf_source) exit() bpf = BPF(text=bpf_source) if not kernel_trace: print("Attaching to pid %d, Ctrl+C to quit." % pid) def attach_probes(sym, fn_prefix=None, can_fail=False): if fn_prefix is None: fn_prefix = sym try: bpf.attach_uprobe(name=obj, sym=sym, fn_name=fn_prefix + "_enter", pid=pid) bpf.attach_uretprobe(name=obj, sym=sym,
bpf_text = bpf_text.replace( 'STORE', 'pid_key_t key = {.id = ' + pid + ', .slot = bpf_log2l(delta)}; ' + 'dist.increment(key);') else: section = "" bpf_text = bpf_text.replace('STORAGE', 'BPF_HISTOGRAM(dist);') bpf_text = bpf_text.replace('STORE', 'dist.atomic_increment(bpf_log2l(delta));') if debug or args.ebpf: print(bpf_text) if args.ebpf: exit() max_pid = int(open("/proc/sys/kernel/pid_max").read()) b = BPF(text=bpf_text, cflags=["-DMAX_PID=%d" % max_pid]) b.attach_kprobe(event_re="^finish_task_switch$|^finish_task_switch\.isra\.\d$", fn_name="sched_switch") print("Tracing %s-CPU time... Hit Ctrl-C to end." % ("off" if args.offcpu else "on")) exiting = 0 if args.interval else 1 dist = b.get_table("dist") while (1): try: sleep(int(args.interval)) except KeyboardInterrupt: exiting = 1 print()