def print_ipv6_event(cpu, data, size): event = b["ipv6_events"].event(data) global start_ts if args.time: if args.csv: print("%s," % strftime("%H:%M:%S"), end="") else: print("%-8s " % strftime("%H:%M:%S"), end="") if args.timestamp: if start_ts == 0: start_ts = event.ts_us delta_s = (float(event.ts_us) - start_ts) / 1000000 if args.csv: print("%.6f," % delta_s, end="") else: print("%-9.6f " % delta_s, end="") # print(format_string % (event.pid, event.task.decode('utf-8', 'replace'), # "6" if args.wide or args.csv else "", # inet_ntop(AF_INET6, event.saddr), event.ports >> 32, # inet_ntop(AF_INET6, event.daddr), event.ports & 0xffffffff, # event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000)) # write to influxdb test_data = lmp_data(datetime.now().isoformat(),'glob',event.pid, event.task.decode('utf-8', 'replace'), "6" if args.wide or args.csv else "", inet_ntop(AF_INET6, event.saddr), event.ports >> 32, inet_ntop(AF_INET6, event.daddr), event.ports & 0xffffffff, event.tx_b / 1024, event.rx_b / 1024, float(event.span_us) / 1000) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_ipv6_event(cpu, data, size): event = b["ipv6_events"].event(data) global start_ts if args.timestamp: if start_ts == 0: start_ts = event.ts_us printb(b"%-9.3f" % ((float(event.ts_us) - start_ts) / 1000000), nl="") if args.print_uid: printb(b"%-6d" % event.uid, nl="") dest_ip = inet_ntop(AF_INET6, event.daddr).encode() if args.lport: # printb(b"%-6d %-12.12s %-2d %-16s %-6d %-16s %-6d %s" % (event.pid, # event.task, event.ip, # inet_ntop(AF_INET6, event.saddr).encode(), event.lport, # dest_ip, event.dport, print_dns(dest_ip))) test_data = lmp_data(datetime.now().isoformat(),'glob',event.pid, event.task, event.ip, inet_ntop(AF_INET6, event.saddr).encode(), event.lport, dest_ip, event.dport) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value) else: printb(b"%-6d %-12.12s %-2d %-16s %-16s %-6d %s" % (event.pid, event.task, event.ip, inet_ntop(AF_INET6, event.saddr).encode(), dest_ip, event.dport, print_dns(dest_ip)))
def print_event(cpu, data, size): event = b["events"].event(data) tid = event.id & 0xffffffff; cnt = min(MAX_FD, event.fd_cnt); if args.name and bytes(args.name) not in event.comm: return for i in range(0, cnt): global initial_ts if not initial_ts: initial_ts = event.ts if args.timestamp: delta = event.ts - initial_ts print("%-14.9f" % (float(delta) / 1000000), end="") # print("%-6s %-6d %-16s " % # ("SEND" if event.action == ACTION_SEND else "RECV", # tid, event.comm.decode()), end = '') action = "SEND" if event.action == ACTION_SEND else "RECV" comm = event.comm.decode() sock = "%d:%s" % (event.sock_fd, get_file(tid, event.sock_fd)) # print("%-25s " % sock, end = '') fd = event.fd[i] fd_file = get_file(tid, fd) if event.action == ACTION_SEND else "" # print("%-5d %s" % (fd, fd_file)) #print("action: %-15s tid:%-5d comm:%-16s socket:%-25s fd:%-5d fd_file:%s" %(action,tid,comm,sock,fd,fd_file)) test_data = lmp_data(datetime.now().isoformat(),'glob',action,tid,comm,sock,fd,fd_file) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_outstanding(): # print("[%s] Top %d stacks with outstanding allocations:" % # (datetime.now().strftime("%H:%M:%S"), top_stacks)) alloc_info = {} allocs = bpf["allocs"] stack_traces = bpf["stack_traces"] for address, info in sorted(allocs.items(), key=lambda a: a[1].size): if BPF.monotonic_time() - min_age_ns < info.timestamp_ns: continue if info.stack_id < 0: continue if info.stack_id in alloc_info: alloc_info[info.stack_id].update(info.size) else: stack = list(stack_traces.walk(info.stack_id)) combined = [] for addr in stack: combined.append( bpf.sym(addr, pid, show_module=True, show_offset=True)) alloc_info[info.stack_id] = Allocation(combined, info.size) if args.show_allocs: print("\taddr = %x size = %s" % (address.value, info.size)) to_show = sorted(alloc_info.values(), key=lambda a: a.size)[-top_stacks:] for alloc in to_show: #print("\t%d bytes in %d allocations from stack\n\t\t%s" % # (alloc.size, alloc.count, # b"\n\t\t".join(alloc.stack).decode("ascii"))) test_data = lmp_data(datetime.now().isoformat(), 'glob', alloc.size, alloc.count, b''.join(alloc.stack).decode("ascii")) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_event(cpu, data, size): event = b["events"].event(data) val = -1 global start_ts global prev_ts global delta if event.rwflag == 1: rwflg = "W" if event.rwflag == 0: rwflg = "R" if not re.match(b'\?', event.name): val = event.sector if start_ts == 0: prev_ts = start_ts if start_ts == 1: delta = float(delta) + (event.ts - prev_ts) # print("%-14.9f %-14.14s %-6s %-7s %-2s %-22s %-7s %7.2f " % ( # delta / 1000000, event.name.decode('utf-8', 'replace'), event.pid, # event.disk_name.decode('utf-8', 'replace'), rwflg, val, # event.len, float(event.delta) / 1000000)) test_data = lmp_data(datetime.now().isoformat(),'glob', event.name.decode('utf-8', 'replace'), event.pid, event.disk_name.decode('utf-8', 'replace'), rwflg, event.len, float(event.delta) / 1000000) # print(event.pid, time) write2db(data_struct, test_data, client) prev_ts = event.ts start_ts = 1
def print_event(cpu, data, size): event = b["events"].event(data) #print("%-8s %-16s %-6s %14s" % (strftime("%H:%M:%S"), event.task, event.pid, event.delta_us)) # write to influxdb test_data = lmp_data(datetime.now().isoformat(), 'glob', event.task, event.pid, event.delta_us) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_event(cpu, data, size): event = b["events"].event(data) with open(loadavg) as stats: avgline = stats.read().rstrip() test_data = lmp_data(datetime.now().isoformat(), 'glob', event.fpid, event.tpid, event.pages, event.fcomm.decode('utf-8', 'replace'), event.tcomm.decode('utf-8', 'replace'), avgline) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_ipv6_event(cpu, data, size): event = b["ipv6_events"].event(data) if event.task.decode('utf-8', 'replace') != 'influxd' and event.task.decode( 'utf-8', 'replace') != 'docker-proxy': test_data = lmp_data('glob', 'ipv6', event.task.decode('utf-8', 'replace'), event.pid, event.srtt) write2db(data_struct, test_data, client) print("%-6d %-12.12s %-2d %-20s > %-20s %d" % (event.pid, event.task.decode('utf-8', 'replace'), event.ip, "%s:%d" % (inet_ntop(AF_INET6, event.saddr), event.sport), "%s:%d" % (inet_ntop(AF_INET6, event.daddr), event.dport), event.srtt))
def print_event(cpu, data, size): event = b["events"].event(data) # print("%-11.6f %-6d %-16s %1s %s" % ( # time.time() - start_ts, event.pid, # event.comm.decode('utf-8', 'replace'), mode_s[event.type], # event.filename.decode('utf-8', 'replace'))) # write to influxdb test_data = lmp_data(datetime.now().isoformat(), 'glob', event.pid, event.comm.decode('utf-8', 'replace'), mode_s[event.type], event.filename.decode('utf-8', 'replace')) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_event(cpu, data, size): event = b["events"].event(data) print("%-8s %-6d %-16s %-7.2f %s" % (strftime("%H:%M:%S"), event.pid, event.comm.decode('utf-8', 'replace'), float(event.delta) / 1000, event.fname.decode('utf-8', 'replace'))) # write to influxdb test_data = lmp_data(strftime("%H:%M:%S"), 'glob', event.pid, event.comm.decode('utf-8', 'replace'), float(event.delta) / 1000, event.fname.decode('utf-8', 'replace')) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def zone_info(thread_name, delay): path = "/proc/zoneinfo" title = ['DMA', 'DMA32', 'Normal'] data = ['0', '0', '0'] while 1: try: sleep(1) except KeyboardInterrupt: exit() f = open(path) line = f.readline() pages_free = '0' managed = '0' count = 0 i = 0 k = 0 # print(title) while line: if ':' in line: line = line.replace(':', '') strline = line.split() # if strline[3] == 'DMA': if strline[0] == 'pages': pages_free = strline[2] count = count + 1 if strline[0] == 'managed': managed = strline[1] count = count + 1 if pages_free != '0' and managed != '0' and count == 2: result = float(pages_free) / float(managed) if i == 0: data[i] = "%.4f" % result elif i == 1: data[i] = "%.4f" % result elif i == 2: data[i] = "%.4f" % result i = i + 1 count = 0 line = f.readline() # print(data) print("%-9s%-9s%-9s" % (data[0], data[1], data[2])) test_data = lmp_data(datetime.now().isoformat(), 'glob', data[0], data[1], data[2]) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value) # print('------------') f.close()
def print_ipv4_event(cpu, data, size): event = b["tcp_ipv4_event"].event(data) global start_ts if args.timestamp: if start_ts == 0: start_ts = event.ts_ns if args.verbose: print("%-14d" % (event.ts_ns - start_ts), end="") else: print("%-9.3f" % ((event.ts_ns - start_ts) / 1000000000.0), end="") if event.type == 1: type_str = "C" elif event.type == 2: type_str = "A" elif event.type == 3: type_str = "X" else: type_str = "U" # if args.verbose: # print("%-12s " % (verbose_types[type_str]), end="") # else: # print("%-2s " % (type_str), end="") # print("%-6d %-16s %-2d %-16s %-16s %-6d %-6d" % # (event.pid, event.comm.decode('utf-8', 'replace'), # event.ip, # inet_ntop(AF_INET, pack("I", event.saddr)), # inet_ntop(AF_INET, pack("I", event.daddr)), # event.sport, # event.dport), end="") # write to influxdb test_data = lmp_data(datetime.now().isoformat(),'glob',verbose_types[type_str],event.pid, event.comm.decode('utf-8', 'replace'), event.ip, inet_ntop(AF_INET, pack("I", event.saddr)), inet_ntop(AF_INET, pack("I", event.daddr)), event.sport, event.dport) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value) if args.verbose and not args.netns: print(" %-8d" % event.netns) else: print()
def print_ipv4_event(cpu, data, size): event = b["ipv4_events"].event(data) # print("%-8s %-6d %-2d %-20s %1s> %-20s %s" % ( # strftime("%H:%M:%S"), event.pid, event.ip, # "%s:%d" % (inet_ntop(AF_INET, pack('I', event.saddr)), event.lport), # type[event.type], # "%s:%s" % (inet_ntop(AF_INET, pack('I', event.daddr)), event.dport), # tcpstate[event.state])) # write to influxdb test_data = lmp_data(datetime.now().isoformat(), 'glob', event.pid, event.ip, inet_ntop(AF_INET, pack('I', event.saddr)), event.lport, type[event.type], inet_ntop(AF_INET, pack('I', event.daddr)), event.dport, tcpstate[event.state]) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
def print_event(cpu, data, size): event = b["events"].event(data) # ms = float(event.delta_us) / 1000 us=event.delta_us name = event.name.decode('utf-8', 'replace') if event.name_len > DNAME_INLINE_LEN: name = name[:-3] + "..." # print("%-8.3f %-14.14s %-6s %1s %-7s %17d %s" % ( # time.time() - start_ts, event.comm.decode('utf-8', 'replace'), # event.pid, mode_s[event.mode], event.sz, us, name)) # write to influxdb test_data = lmp_data(datetime.now().isoformat(),'glob',event.comm.decode('utf-8', 'replace'),event.pid, mode_s[event.mode], event.sz, us, name) #print(test_data) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
'pid', ], "fields": ['duration'] } b = BPF(text=bpf_text) b.attach_kretprobe(event="pick_next_task_fair", fn_name="switch_start") b.attach_kretprobe(event="pick_next_task_idle", fn_name="switch_start") b.attach_kretprobe(event="pick_next_task_rt", fn_name="switch_start") b.attach_kretprobe(event="pick_next_task_dl", fn_name="switch_start") b.attach_kretprobe(event="pick_next_task_stop", fn_name="switch_start") b.attach_kprobe(event="finish_task_switch", fn_name="switch_end") dist = b.get_table("dist") #print("%-6s%-6s%-6s%-6s" % ("CPU", "PID", "TGID", "TIME(ns)")) while (1): try: sleep(1) for k, v in dist.items(): #print("%-6d%-6d%-6d%-6d" % (k.cpu, k.pid, k.tgid, v.value)) test_data = lmp_data(datetime.now().isoformat(), 'glob', k.cpu, k.pid, v.value) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value) dist.items() except KeyboardInterrupt: exit()
# data structure from template class lmp_data(object): def __init__(self, a, b, c): self.time = a self.glob = b self.duration = c data_struct = { "measurement": 'swap_pagefault', "time": [], "tags": ['glob'], "fields": ['duration'] } timer = b.get_table("timer") #print("%-6s%-6s%-6s%-6s" % ("CPU", "PID", "TGID", "TIME(us)")) while (1): try: sleep(1) for k, v in timer.items(): #print("%-6d%-6d%-6d%-6d" % (k.cpu, k.pid, k.tgid, v.value / 1000)) test_data = lmp_data(datetime.now().isoformat(), 'glob', v.value / 1000) write2db(data_struct, test_data, client) #print("This is success") timer.clear() except KeyboardInterrupt: exit()
def print_event(cpu, data, size): global start event = b["result"].event(data) test_data = lmp_data('glob', event.len) write2db(data_struct, test_data, client)
# 测试用数据 class lmp_data(object): def __init__(self, a, b, c, d, e): self.pid = a self.name = b self.data = c self.tid = d self.address = e test_data = lmp_data(31256, 'watchdog', 'male', 0, 'xiyou') data_struct = { "measurement": 'lmpdata', "tags": ['pid', 'name'], "fields": ['data', 'tid', 'address'] } # 写入数据库 write2db(data_struct, test_data, influx_client, 1) # 删除数据 # delete(client,measurement,require='xx') # 目前只支持按tag名,或者按时间顺序筛选删除,具体写法下 # require="'tag_name'='value'" 或者 require="time>time_stamp" # 示例,删除时间戳大于1590223086102172994的数据 # delete_db(client,'test',require='time>1590223086102172994') #result = client.query("select * from test;") #print("Result: {0}".format(result))
def print_event(cpu, data, size): global start event = b["result"].event(data) test_data = lmp_data('glob', event.len) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
i += 1 if i > count: exit() try: sleep(interval) except KeyboardInterrupt: pass exit() # print("%-8s: " % strftime("%H:%M:%S"), end="") # print each statistic as a column vfs_list = [0, 0, 0, 0, 0, 0] times = 1 for stype in stat_types.keys(): idx = stat_types[stype] print(idx) try: val = b["stats"][c_int(idx)].value / interval except: val = 0 vfs_list[times] = val times += 1 if times == 5: times = 0 # print(vfs_list[1],vfs_list[2],vfs_list[3],vfs_list[4],vfs_list[5]) data = test_data('glob', vfs_list[1], vfs_list[2], vfs_list[3], vfs_list[4], vfs_list[5]) write2db(data_struct, data, client) b["stats"].clear()
def handle_loop(stdscr, args): # don't wait on key press stdscr.nodelay(1) # set default sorting field sort_field = FIELDS.index(DEFAULT_FIELD) sort_reverse = True # load BPF program bpf_text = """ #include <uapi/linux/ptrace.h> struct key_t { u64 ip; u32 pid; u32 uid; char comm[16]; }; BPF_HASH(counts, struct key_t); int do_count(struct pt_regs *ctx) { struct key_t key = {}; u64 pid = bpf_get_current_pid_tgid(); u32 uid = bpf_get_current_uid_gid(); key.ip = PT_REGS_IP(ctx); key.pid = pid & 0xFFFFFFFF; key.uid = uid & 0xFFFFFFFF; bpf_get_current_comm(&(key.comm), 16); counts.increment(key); return 0; } """ b = BPF(text=bpf_text) b.attach_kprobe(event="add_to_page_cache_lru", fn_name="do_count") b.attach_kprobe(event="mark_page_accessed", fn_name="do_count") b.attach_kprobe(event="account_page_dirtied", fn_name="do_count") b.attach_kprobe(event="mark_buffer_dirty", fn_name="do_count") exiting = 0 while 1: s = stdscr.getch() if s == ord('q'): exiting = 1 elif s == ord('r'): sort_reverse = not sort_reverse elif s == ord('<'): sort_field = max(0, sort_field - 1) elif s == ord('>'): sort_field = min(len(FIELDS) - 1, sort_field + 1) try: sleep(args.interval) except KeyboardInterrupt: exiting = 1 # as cleanup can take many seconds, trap Ctrl-C: signal.signal(signal.SIGINT, signal_ignore) # Get memory info mem = get_meminfo() cached = int(mem["Cached"]) / 1024 buff = int(mem["Buffers"]) / 1024 process_stats = get_processes_stats(b, sort_field=sort_field, sort_reverse=sort_reverse) stdscr.clear() stdscr.addstr( 0, 0, "%-8s Buffers MB: %.0f / Cached MB: %.0f " "/ Sort: %s / Order: %s" % (strftime("%H:%M:%S"), buff, cached, FIELDS[sort_field], sort_reverse and "descending" or "ascending")) test_data = lmp_data(datetime.now().isoformat(), 'glob', buff, cached, int(_pid), uid, comm, access, misses, mbd, rhits, whits) write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value) # header stdscr.addstr( 1, 0, "{0:8} {1:8} {2:16} {3:8} {4:8} {5:8} {6:10} {7:10}".format( *FIELDS), curses.A_REVERSE) (height, width) = stdscr.getmaxyx() for i, stat in enumerate(process_stats): uid = int(stat[1]) try: username = pwd.getpwuid(uid)[0] except KeyError: # `pwd` throws a KeyError if the user cannot be found. This can # happen e.g. when the process is running in a cgroup that has # different users from the host. username = '******'.format(uid) stdscr.addstr( i + 2, 0, "{0:8} {username:8.8} {2:16} {3:8} {4:8} " "{5:8} {6:9.1f}% {7:9.1f}%".format(*stat, username=username)) if i > height - 4: break stdscr.refresh() if exiting: print("Detaching...") return