def _get_fd_save(self, fd_data, timestamp): if isinstance(fd_data, dict): self.fd_file = fd_data['fd_file'] logger.debug("fd_file: " + str(self.fd_file)) else: try: ''' 0 1 2 3 fd_data: ("datatime", "pckagename", "pid", "fd num") 对应的值是:[formatTimeStamp(collection_time), packagename, pid,fd_num] ''' fd_data[0] = timestamp dic = { "time": fd_data[0] * 1000, "package": fd_data[1], "pid": fd_data[2], "fd": fd_data[3] } self.perf_data['fd'].append(dic) with open(self.fd_file, 'a+') as writer: logger.debug( "write fd data in dataworker。。。。。。 fd timestamp: " + str(fd_data[0])) if isinstance(fd_data[0], float): fd_data[0] = TimeUtils.formatTimeStamp(fd_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = fd_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') writer_p.writerow(fd_data) except Exception as e: logger.error('fd save error') s = traceback.format_exc() logger.debug(s)
def _get_activity_save(self, activity_data, timestamp): if self.first_time: activity_title = ("datetime", "current_activity") self.first_time = False self.activity_file = os.path.join(RuntimeData.package_save_path, 'current_activity.csv') try: with open(self.activity_file, 'a+') as af: csv.writer(af, lineterminator='\n').writerow(activity_title) except Exception as e: logger.error("file not found: " + str(self.activity_file)) else: try: activity_data[0] = timestamp dic = { "time": activity_data[0] * 1000, "name": activity_data[1] } self.perf_data['activity'].append(dic) with open(self.activity_file, 'a+') as writer: if isinstance(activity_data[0], float): activity_data[0] = TimeUtils.formatTimeStamp( activity_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = activity_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') writer_p.writerow(activity_data) except Exception as e: logger.error("activity save error ") s = traceback.format_exc() logger.debug(s)
def _get_fps_save(self, fps_data, timestamp): if isinstance(fps_data, dict): self.fps_filename = fps_data['fps_file'] logger.debug("fps_filename: " + str(self.fps_filename)) else: try: '''0 1 2 3 fps_data: ("datetime", "activity","fps", "jank") 对应的值是:[formatter(collection_time), activity,fps,jank], ''' fps_data[0] = timestamp dic = { "time": fps_data[0] * 1000, "activity": fps_data[1], "fps": fps_data[2], "jank": fps_data[3] } self.perf_data['fluency'].append(dic) with open(self.fps_filename, 'a+') as writer: logger.debug( "dataworker write fps data in dataworker。fps timestamp: " + str(fps_data[0])) fps_data[0] = TimeUtils.formatTimeStamp(fps_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = fps_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') writer_p.writerow(fps_data) except Exception as e: s = traceback.format_exc() logger.error(s) # 将堆栈信息打印到log中 logger.error("fps save error")
def _collect_power_thread(self,start_time): ''' 搜集电池信息的线程 :return: ''' end_time = time.time() + self._timeout power_list_titile = ("datetime","level","voltage(V)","tempreture(C)","current(mA)") power_device_file = os.path.join(RuntimeData.package_save_path, 'powerinfo.csv') try: with open(power_device_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(power_list_titile) if self.power_queue: power_file_dic = {'power_file':power_device_file} self.power_queue.put(power_file_dic) except RuntimeError as e: logger.error(e) while not self._stop_event.is_set() and time.time() < end_time: try: before = time.time() logger.debug("------------into _collect_power_thread loop thread is : " + str(threading.current_thread().name)) device_power_info = self._get_battaryproperties() if device_power_info.source == '': logger.debug("can't get power info , break!") break device_power_info = self.trim_data(device_power_info)#debug collection_time = time.time() logger.debug(" collection time in powerconsumption is : " + str(collection_time)) power_tmp_list = [collection_time, device_power_info.level, device_power_info.voltage, device_power_info.temp, device_power_info.current] if self.power_queue: self.power_queue.put(power_tmp_list) if not self.power_queue:#为了本地单个脚本运行 power_tmp_list[0] = TimeUtils.formatTimeStamp(power_tmp_list[0]) try: with open(power_device_file,'a+',encoding="utf-8") as writer: writer_p = csv.writer(writer, lineterminator='\n') writer_p.writerow(power_tmp_list) except RuntimeError as e: logger.error(e) after = time.time() time_consume = after - before delta_inter = self._interval - time_consume if delta_inter > 0: time.sleep(delta_inter) except: logger.error("an exception hanpend in powerconsumption thread , reason unkown!") s = traceback.format_exc() logger.debug(s) if self.power_queue: self.power_queue.task_done()
def _get_traffic_save(self, traffic_data, timestamp): if isinstance(traffic_data, dict): self.traffic_filename = traffic_data['traffic_file'] logger.debug("dataworker traffic_filename: " + str(self.traffic_filename)) else: try: ''' 0 1 2 3 4 5 6 7 8 9 10 11 traffic_data: ("datetime","packagename","uid","uid_total(KB)", "uid_total_packets", "rx(KB)", "rx_packets","tx(KB)","tx_packets","fg(KB)","bg(KB)","lo(KB)") example: [collection_time, traffic_snapshot.packagename, traffic_snapshot.uid,TrafficUtils.byte2kb(traffic_snapshot.total_uid_bytes), traffic_snapshot.total_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.rx_uid_bytes),traffic_snapshot.rx_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.tx_uid_bytes), traffic_snapshot.tx_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.fg_bytes), TrafficUtils.byte2kb(traffic_snapshot.bg_bytes), TrafficUtils.byte2kb(traffic_snapshot.lo_uid_bytes)] ''' traffic_data[0] = timestamp dic = { "time": traffic_data[0] * 1000, "total": traffic_data[3], "total_packets": traffic_data[4], "rx": traffic_data[5], "rx_packets": traffic_data[6], "tx": traffic_data[7], "tx_packets": traffic_data[8], "fg": traffic_data[9], "bg": traffic_data[10], "lo": traffic_data[11] } self.perf_data['traffic'].append(dic) with open(self.traffic_filename, 'a+') as writer: logger.debug( "write traffic data in dataworker traffic data timestamp: " + str(traffic_data[0])) if isinstance(traffic_data[0], float): traffic_data[0] = TimeUtils.formatTimeStamp( traffic_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = traffic_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') # logger.debug("------------------ dataworker trafficdata: " + str(traffic_data)) writer_p.writerow(traffic_data) except Exception as e: logger.error("traffic save error") s = traceback.format_exc() logger.debug(s)
def handle_launchtime(self, log_line): ''' 这个方法在每次一个启动时间的log产生时回调 :param log_line:最近一条的log 内容 :param tag:启动的方式,是normal的启动,还是自定义方式的启动:fullydrawnlaunch #如果监控到到fully drawn这样的log,则优先统计这种log,它表示了到起始界面自定义界面的启动时间 :return:void ''' # logger.debug(log_line) # add begin by liurui # 08-28 10:57:30.229 18882 19137 D IC5: CLogProducer == > code = 0, uuid = 4FE71E350379C64611CCD905938C10CA, eventType = performance, eventName = am_activity_launch_timeme, \ # log_time = 2019-08-28 10:57:30.229, contextInfo = {"tag": "am_activity_launch_time", "start_time": "2019-08-28 10:57:16", # "activity_name_original": "com.android.settings\/.FallbackHome", # "activity_name": "com.android.settings#com.android.settings.FallbackHome", # "this_time": "916", "total_time": "916", "start_type": "code_start", # "gmt_create": "2019-08-28 10:57:16.742", "uploadtime": "2019-08-28 10:57:30.173", # "boottime": "2019-08-28 10:57:18.502", "firstupload": "2019-08-28 10:57:25.733"} ltag = "" if ("am_activity_launch_time" in log_line or "am_activity_fully_drawn_time" in log_line): # 最近增加的一条如果是启动时间相关的log,那么回调所有注册的_handle if "am_activity_launch_time" in log_line: ltag = "normal launch" elif "am_activity_fully_drawn_time" in log_line: ltag = "fullydrawn launch" logger.debug("launchtime log:" + log_line) if ltag: content = [] timestamp = time.time() content.append(TimeUtils.formatTimeStamp(timestamp)) temp_list = log_line.split()[-1].replace("[", "").replace( "]", "").split(',')[2:5] for i in range(len(temp_list)): content.append(temp_list[i]) content.append(ltag) logger.debug("Launch Info: " + str(content)) if len(content) == 5: content = self.trim_value(content) if content: self.update_launch_list(content, timestamp)
def _get_power_save(self, power_data, timestamp): if isinstance(power_data, dict): self.power_filename = power_data['power_file'] logger.debug("dataworker power_filename: " + str(self.power_filename)) else: try: ''' 0 1 2 3 4 power_data: ("datetime","level","voltage(V)","tempreture(C)","current(mA)") example: [collection_time, device_power_info.level, device_power_info.voltage, device_power_info.temp, device_power_info.current] ''' power_data[0] = timestamp dic = { "time": power_data[0] * 1000, "level": power_data[1], "vol": power_data[2], "temp": power_data[3], "current": power_data[4] } self.perf_data['power'].append(dic) with open(self.power_filename, 'a+') as writer: logger.debug( "write power data in dataworker。。。。。。 timestamp:" + str(power_data[0])) if isinstance(power_data[0], float): power_data[0] = TimeUtils.formatTimeStamp( power_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = power_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') # logger.debug("------------------ dataworker power data: " + str(power_data)) writer_p.writerow(power_data) except Exception as e: logger.error('power save error') s = traceback.format_exc() logger.debug(s)
def _get_cpu_save(self, cpu_data, timestamp): if isinstance(cpu_data, dict): self.cpu_filename = cpu_data['cpu_file'] logger.debug("cpu_filename: " + str(self.cpu_filename)) else: try: ''' 0 1 2 3 4 5 6 7 8 9 cpu_data: ("datetime", " cpu_rate%", "user%", "system%", "all_jiffies","packagename", "pid", "uid", "pck_jiffies", "pid_cpu%") 对应的值是:[collection_time, cpu_info.cpu_rate, cpu_info.user_rate, cpu_info.system_rate, cpu_info.cpu_jiffs, cpu_pck_info.pckagename, cpu_pck_info.pid, cpu_pck_info.uid,cpu_pck_info.p_cpu_jiffs, cpu_pck_info.p_cpu_rate], ''' cpu_data[0] = timestamp dic = { "time": cpu_data[0] * 1000, "total": cpu_data[1], "cpu_jiffies": cpu_data[4], "user": cpu_data[2], "sys": cpu_data[3], "pck_jiffies": cpu_data[8], "pid_cpu": cpu_data[9] } self.perf_data['cpu'].append(dic) with open(self.cpu_filename, 'a+') as writer: logger.debug( "write cpu data in dataworker mem timestamp: " + str(cpu_data[0])) cpu_data[0] = TimeUtils.formatTimeStamp(cpu_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = cpu_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') # logger.debug("------------------ dataworker cpudate: " + str(cpu_data)) writer_p.writerow(cpu_data) except Exception as e: logger.error('cpu save error') s = traceback.format_exc() logger.error(s)
def _get_mem_save(self, mem_data, timestamp): if isinstance(mem_data, dict): self.mem_filename = mem_data['mem_file'] logger.debug("mem_filename: " + str(self.mem_filename)) else: try: ''' 0 1 2 3 4 5 6 mem_data: ("datatime", "total_ram(KB)", "free_ram(KB)", "pckagename", "pid", "pid_pss(KB)", "pid_alloc_heap(KB)") 对应的值是:[formatTimeStamp(collection_time), cpu_info.cpu_rate, cpu_info.user_rate, cpu_info.system_rate, cpu_info.cpu_jiffs, cpu_pck_info.pckagename, cpu_pck_info.pid, cpu_pck_info.uid,cpu_pck_info.p_cpu_jiffs, cpu_pck_info.p_cpu_rate] ''' mem_data[0] = timestamp dic = { "time": mem_data[0] * 1000, "total": mem_data[1], "free": mem_data[2], "pss": mem_data[5], "heap": mem_data[6] } self.perf_data['mem'].append(dic) with open(self.mem_filename, 'a+') as writer: logger.debug( "write mem data in dataworker。。。。。。 mem timestamp: " + str(mem_data[0])) if isinstance(mem_data[0], float): mem_data[0] = TimeUtils.formatTimeStamp(mem_data[0]) tmp_dic = copy.deepcopy(dic) tmp_dic["time"] = mem_data[0] logger.debug(tmp_dic) writer_p = csv.writer(writer, lineterminator='\n') # logger.debug("------------------ dataworker memdata: " + str(mem_data)) writer_p.writerow(mem_data) except Exception as e: logger.error('mem save error') s = traceback.format_exc() logger.debug(s)
def _calculator_thread(self, start_time): '''处理surfaceflinger数据 ''' fps_file = os.path.join(RuntimeData.package_save_path, 'fps.csv') if self.use_legacy_method: fps_title = ['datetime', 'fps'] else: fps_title = ['datetime', "activity window", 'fps', 'jank'] try: with open(fps_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(fps_title) if self.fps_queue: fps_file_dic = {'fps_file': fps_file} self.fps_queue.put(fps_file_dic) except RuntimeError as e: logger.exception(e) while True: try: data = self.data_queue.get() if isinstance(data, str) and data == 'Stop': break before = time.time() if self.use_legacy_method: td = data['timestamp'] - self.surface_before['timestamp'] seconds = td.seconds + td.microseconds / 1e6 frame_count = (data['page_flip_count'] - self.surface_before['page_flip_count']) fps = int(round(frame_count / seconds)) if fps > 60: fps = 60 self.surface_before = data logger.debug('FPS:%2s' % fps) tmp_list = [TimeUtils.getCurrentTimeUnderline(), fps] try: with open(fps_file, 'a+') as f: # tmp_list[0] = TimeUtils.formatTimeStamp(tmp_list[0]) csv.writer(f, lineterminator='\n').writerow(tmp_list) except RuntimeError as e: logger.exception(e) else: refresh_period = data[0] timestamps = data[1] collect_time = data[2] fps, jank = self._calculate_results( refresh_period, timestamps) logger.debug('FPS:%2s Jank:%s' % (fps, jank)) fps_list = [collect_time, self.focus_window, fps, jank] if self.fps_queue: self.fps_queue.put(fps_list) if not self.fps_queue: #为了让单个脚本运行时保存数据 try: with open(fps_file, 'a+') as f: tmp_list = copy.deepcopy(fps_list) tmp_list[0] = TimeUtils.formatTimeStamp( tmp_list[0]) csv.writer( f, lineterminator='\n').writerow(tmp_list) except RuntimeError as e: logger.exception(e) time_consume = time.time() - before delta_inter = self.frequency - time_consume if delta_inter > 0: time.sleep(delta_inter) except: logger.error( "an exception hanpend in fps _calculator_thread ,reason unkown!" ) s = traceback.format_exc() logger.debug(s) if self.fps_queue: self.fps_queue.task_done()
def _collect_cpu_thread(self, start_time): end_time = time.time() + self._timeout cpu_title = ["datetime", "device_cpu_rate%", "user%", "system%"] cpu_file = os.path.join(RuntimeData.package_save_path, 'cpuinfo.csv') for i in range(0, len(self.packages)): cpu_title.append("package", "pid", "pid_cpu%") cpu_title.append("total_pid_cpu%") try: with open(cpu_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(cpu_title) if self.cpu_queue: cpu_file_dic = {'cpu_file': cpu_file} self.cpu_queue.put(cpu_file_dic) except RuntimeError as e: logger.error(e) while not self.stop_device_event.is_set() and time.time() < end_time: try: before = time.time() logger.debug("into _collect_cpu_thread loop thread is : " + str(threading.current_thread().name)) cpu_info = self._get_cpu_usage() logger.debug(" get cpu info: " + str(cpu_info)) cpu_pck_info = self._get_pck_cpu_usage() cpu_pck_info = self._trim_pakcage_info(cpu_pck_info, cpu_info) collection_time = time.time() logger.debug(" collection time in cpu is : " + TimeUtils.getCurrentTime()) if cpu_pck_info.pid == -1: logger.debug("cpu_pck pid is -1") continue gather_list = [ collection_time, cpu_info.cpu_rate, cpu_info.user_rate, cpu_info.system_rate ] if self.cpu_queue: self.cpu_queue.put(gather_list) for i in range(0, len(self.packages)): gather_list.append() # 添加进程 总cpu使用率 gather_list.append() if not self.cpu_queue: #为了让单个脚本运行 gather_list[0] = TimeUtils.formatTimeStamp(gather_list[0]) try: with open(cpu_file, 'a+', encoding="utf-8") as f: csv.writer( f, lineterminator='\n').writerow(gather_list) logger.debug("write to file:" + cpu_file) logger.debug(gather_list) except RuntimeError as e: logger.error(e) time_consume = time.time() - before logger.debug( " _collect_cpu_thread time consume for device cpu usage: " + str(format(time_consume, '0.2f'))) delta_inter = self._interval - time_consume if delta_inter > 0: time.sleep(delta_inter) except Exception as e: logger.error( "an exception hanpend in cpu thread , reason unkown!") s = traceback.format_exc() logger.debug(s) #将堆栈信息打印到log中 if self.cpu_queue: self.cpu_queue.task_done() logger.debug("stop event is set or timeout")
def _collect_traffic_thread(self, start_time): end_time = time.time() + self._timeout uid = TrafficUtils.getUID(self.device, self.packagename) traffic_list_title = ("datetime", "packagename", "uid", "uid_total(KB)", "uid_total_packets", "rx(KB)", "rx_packets", "tx(KB)", "tx_packets", "fg(KB)", "bg(KB)", "lo(KB)") traffic_file = os.path.join(RuntimeData.package_save_path, 'traffics_uid.csv') try: with open(traffic_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(traffic_list_title) if self.traffic_queue: traffic_file_dic = {'traffic_file': traffic_file} self.traffic_queue.put(traffic_file_dic) except RuntimeError as e: logger.error(e) while not self._stop_event.is_set() and time.time() < end_time: try: before = time.time() logger.debug( "----------------- into _collect_traffic_thread loop thread is : " + str(threading.current_thread().name) + ", current uid is : " + str(uid)) traffic_snapshot = self._cat_traffic_data( self.packagename, uid) if traffic_snapshot.source == '' or traffic_snapshot.source == None: continue #获取不到值的时候,直接不执行下面的代码了,缺一个 # retry_count = retry_count - 1 # if retry_count <= 0: # logger.debug("traffic, can't get traffic info, try six times, break...") # break if self.traffic_init: self.traffic_init_dic = self.get_traffic_init_data( traffic_snapshot) self.traffic_init = False traffic_snapshot = self.get_data_from_threadstart( traffic_snapshot) collection_time = time.time() logger.debug(" collection time in traffic is : " + str(collection_time)) traffic_list_temp = [ collection_time, traffic_snapshot.packagename, traffic_snapshot.uid, TrafficUtils.byte2kb(traffic_snapshot.total_uid_bytes), traffic_snapshot.total_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.rx_uid_bytes), traffic_snapshot.rx_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.tx_uid_bytes), traffic_snapshot.tx_uid_packets, TrafficUtils.byte2kb(traffic_snapshot.fg_bytes), TrafficUtils.byte2kb(traffic_snapshot.bg_bytes), TrafficUtils.byte2kb(traffic_snapshot.lo_uid_bytes) ] logger.debug(traffic_list_temp) if self.traffic_queue: self.traffic_queue.put(traffic_list_temp) if not self.traffic_queue: #为了本地单个文件单独运行 traffic_list_temp[0] = TimeUtils.formatTimeStamp( traffic_list_temp[0]) try: with open(traffic_file, 'a+') as f: writer = csv.writer(f, lineterminator='\n') writer.writerow(traffic_list_temp) except RuntimeError as e: logger.error(e) after = time.time() time_consume = after - before logger.debug(" -----------traffic timeconsumed: " + str(time_consume)) # 校准时间,由于执行命令行需要耗时,需要将这个损耗加上去 delta_inter = self._interval - time_consume if delta_inter > 0: time.sleep(delta_inter) except RuntimeError as e: logger.error(" trafficstats RuntimeError ") logger.error(e) except Exception as e: logger.error( "an exception hanpend in traffic thread , reason unkown! e: " ) s = traceback.format_exc() logger.debug(s) if self.traffic_queue: self.traffic_queue.task_done()
def _collect_memory_thread(self, start_time): end_time = time.time() + self._timeout mem_list_titile = ["datatime", "total_ram(MB)", "free_ram(MB)"] pid_list_titile = ["datatime"] pss_detail_titile = [ "datatime", "package", "pid", "pss", "java_heap", "native_heap", "system" ] for i in range(0, len(self.packages)): mem_list_titile.extend(["package", "pid", "pid_pss(MB)"]) pid_list_titile.extend(["package", "pid"]) if len(self.packages) > 1: mem_list_titile.append("total_pss(MB)") mem_file = os.path.join(RuntimeData.package_save_path, 'meminfo.csv') pid_file = os.path.join(RuntimeData.package_save_path, 'pid_change.csv') for package in self.packages: pss_detail_file = os.path.join( RuntimeData.package_save_path, 'pss_%s.csv' % package.split(".")[-1].replace(":", "_")) with open(pss_detail_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(pss_detail_titile) try: with open(mem_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(mem_list_titile) if self.mem_queue: mem_file_dic = {'mem_file': mem_file} self.mem_queue.put(mem_file_dic) with open(pid_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(pid_list_titile) except RuntimeError as e: logger.error(e) starttime_stamp = TimeUtils.getTimeStamp(start_time, "%Y_%m_%d_%H_%M_%S") old_package_pid_pss_list = [] dumpsys_mem_times = 0 # D系统上会报错 System server has no access to file context # hprof_path = "/sdcard/hprof" hprof_path = "/data/local/tmp" self.device.adb.run_shell_cmd("mkdir " + hprof_path) # sdcard 卡目录下dump需要打开这个开关 self.device.adb.run_shell_cmd("setenforce 0") first_dump = True while not self._stop_event.is_set() and time.time() < end_time: try: before = time.time() logger.debug( "-----------into _collect_mem_thread loop, thread is : " + str(threading.current_thread().name)) collection_time = time.time() # # 获取主进程的详细信息 for package in self.packages: mem_pck_snapshot = self._dumpsys_process_meminfo(package) if 0 == mem_pck_snapshot.totalPSS: logger.error("package total pss is 0:%s" % package) continue pss_detail_file = os.path.join( RuntimeData.package_save_path, 'pss_%s.csv' % package.split(".")[-1].replace(":", "_")) pss_detail_list = [ TimeUtils.formatTimeStamp(collection_time), package, mem_pck_snapshot.pid, mem_pck_snapshot.totalPSS, mem_pck_snapshot.javaHeap, mem_pck_snapshot.nativeHeap, mem_pck_snapshot.system ] with open(pss_detail_file, 'a+') as pss_writer: writer_p = csv.writer(pss_writer, lineterminator='\n') writer_p.writerow(pss_detail_list) # 写到pss_detail表格中 # 手机每5分钟 dumpheap一次 if (before - starttime_stamp) > 300 or first_dump: # 先清理hprof文件 filelist = self.device.adb.list_dir(hprof_path) if filelist: for file in filelist: for package in self.packages: if package in file: self.device.adb.delete_file(hprof_path + "/" + file) # if (before - starttime_stamp) % 60 < self._interval and "D" in self.device.adb.get_system_version(): for package in self.packages: self.device.adb.dumpheap(package, RuntimeData.package_save_path) starttime_stamp = before # self.device.adb.run_shell_cmd("kill -10 %s"%str(mem_pck_snapshot.pid)) # dumpsys meminfo 耗时长,可能会导致system server cpu占用变高,降低采集频率 dumpsys_mem_times = dumpsys_mem_times + 1 # 10倍率frequency dumpsys meminfo一次 if dumpsys_mem_times % 10 == 0 or first_dump: mem_device_snapshot = self._dumpsys_meminfo() # 如果没有采集到dumpsys meminfo的信息,正常情况totalmem不可能为0 if mem_device_snapshot == None or not mem_device_snapshot.package_pid_pss_list or mem_device_snapshot.totalmem == 0: logger.error("mem_device_snapshot is none") # 如果获取不到结果,继续延长采集间隔 dumpsys_mem_times = dumpsys_mem_times - 1 continue first_dump = False logger.debug("current time: " + TimeUtils.getCurrentTime() + ", processname: " + ",total pss:" + str(mem_device_snapshot.total_pss)) logger.debug("collection time in meminfo is : " + TimeUtils.getCurrentTime()) gather_list = [ TimeUtils.formatTimeStamp(collection_time), mem_device_snapshot.totalmem, mem_device_snapshot.freemem ] pid_list = [TimeUtils.formatTimeStamp(collection_time)] pid_change = False for i in range(0, len(self.packages)): if len(mem_device_snapshot.package_pid_pss_list ) == len(self.packages): gather_list.extend([ mem_device_snapshot.package_pid_pss_list[i] ["package"], mem_device_snapshot.package_pid_pss_list[i] ["pid"], mem_device_snapshot.package_pid_pss_list[i] ["pss"] ]) if not old_package_pid_pss_list: old_package_pid_pss_list = mem_device_snapshot.package_pid_pss_list pid_change = True else: for i in range(0, len(self.packages)): package = mem_device_snapshot.package_pid_pss_list[ i]["package"] if mem_device_snapshot.package_pid_pss_list[i]["pid"] and \ old_package_pid_pss_list[i]["pid"]!=mem_device_snapshot.package_pid_pss_list[i]["pid"]: pid_change = True # 确保上次pid也有 if old_package_pid_pss_list[i]["pid"]: if package and package in RuntimeData.config_dic[ "pid_change_focus_package"]: # 确保有tombstones文件才提单 self.device.adb.pull_file( "/data/vendor/tombstones", RuntimeData.package_save_path) if pid_change: old_package_pid_pss_list = mem_device_snapshot.package_pid_pss_list for i in range(0, len(self.packages)): if len(old_package_pid_pss_list) == len( self.packages): pid_list.extend([ old_package_pid_pss_list[i]["package"], old_package_pid_pss_list[i]["pid"] ]) try: with open(pid_file, 'a+') as pid_writer: writer_p = csv.writer(pid_writer, lineterminator='\n') writer_p.writerow(pid_list) logger.debug("write to file:" + pid_file) logger.debug(pid_list) except RuntimeError as e: logger.error(e) if len(self.packages) > 1: gather_list.append(mem_device_snapshot.total_pss) if self.mem_queue: gather_list[0] = collection_time self.mem_queue.put(gather_list) if not self.mem_queue: #为了本地单个文件运行 try: with open(mem_file, 'a+') as mem_writer: writer_p = csv.writer(mem_writer, lineterminator='\n') writer_p.writerow(gather_list) logger.debug("write to file:" + mem_file) logger.debug(gather_list) except RuntimeError as e: logger.error(e) after = time.time() time_consume = after - before delta_inter = self._interval - time_consume logger.info("time consume for meminfos: " + str(time_consume)) if delta_inter > 0: time.sleep(delta_inter) except: logger.error( "an exception hanpend in meminfo thread, reason unkown!") s = traceback.format_exc() logger.debug(s) if self.mem_queue: self.mem_queue.task_done() logger.debug("stop event is set or timeout")
def get_traffic_with_dev(self): end_time = time.time() + self._timeout traffic_title = [ "datetime", "device_total(KB)", "device_receive(KB)", "device_transport(KB)" ] traffic_file = os.path.join(RuntimeData.package_save_path, 'traffic.csv') for i in range(0, len(self.packages)): traffic_title.extend([ "package", "pid", "pid_rx(KB)", "pid_tx(KB)", "pid_total(KB)" ]) if len(self.packages) > 1: traffic_title.append("total_proc_traffic(kB)") try: with open(traffic_file, 'a+') as df: csv.writer(df, lineterminator='\n').writerow(traffic_title) except RuntimeError as e: logger.error(e) self.device_init_net = None self.pck_init_net_list = [] while not self._stop_event.is_set() and time.time() < end_time: try: before = time.time() logger.debug( "--------- into _collect_traffic_thread loop thread is : " + str(threading.current_thread().name)) device_cur_net = self._cat_traffic_device_dev() if device_cur_net.source == '' or device_cur_net.source == None: continue if self.traffic_init: self.device_init_net = device_cur_net # self.traffic_init = False device_grow = self.get_net_from_begin(self.device_init_net, device_cur_net) collection_time = time.time() logger.debug(" collection time in traffic is : " + str(collection_time)) net_row = [ collection_time, TrafficUtils.byte2kb(device_grow.total), TrafficUtils.byte2kb(device_grow.rx), TrafficUtils.byte2kb(device_grow.tx) ] self.total_pck_net = 0 for i in range(0, len(self.packages)): pid = self.device.adb.get_pid_from_pck(self.packages[i]) pck_net_info = self._cat_traffic_pid_dev(pid) if not pck_net_info.source: logger.error("package net dev failed %s:" % self.packages[i]) continue if self.traffic_init: self.pck_init_net_list.append(pck_net_info) if i == len(self.packages) - 1: self.traffic_init = False pck_grow = self.get_net_from_begin( self.pck_init_net_list[i], pck_net_info) self.total_pck_net = self.total_pck_net + pck_grow.wifi_total net_row.extend([ self.packages[i], pid, TrafficUtils.byte2kb(pck_grow.rx), TrafficUtils.byte2kb(pck_grow.tx), TrafficUtils.byte2kb(pck_grow.total) ]) if len(self.packages) > 1: net_row.append(TrafficUtils.byte2kb(self.total_pck_net)) if self.traffic_queue: self.traffic_queue.put(net_row) if not self.traffic_queue: # 为了本地单个文件单独运行 net_row[0] = TimeUtils.formatTimeStamp(net_row[0]) try: with open(traffic_file, 'a+', encoding="utf-8") as f: writer = csv.writer(f, lineterminator='\n') writer.writerow(net_row) except RuntimeError as e: logger.error(e) logger.debug(net_row) after = time.time() time_consume = after - before logger.debug(" -----------traffic timeconsumed: " + str(time_consume)) # 校准时间,由于执行命令行需要耗时,需要将这个损耗加上去 delta_inter = self._interval - time_consume if delta_inter > 0: time.sleep(delta_inter) except RuntimeError as e: logger.error(" trafficstats RuntimeError ") logger.error(e) except Exception as e: logger.error( "an exception hanpend in traffic thread , reason unkown! e: " ) s = traceback.format_exc() logger.debug(s) if self.traffic_queue: self.traffic_queue.task_done()