def __init__(self, mysql_w, model="", batch_size=1): """ init """ try: #TODO self.multithread_pk_file = "/home/qa_work/CI/workspace/sys_anakin_compare_output/%s/multithread_time/Multi_thread_time.txt" % model if os.path.exists(self.multithread_pk_file): self.file = open(self.multithread_pk_file) else: print "[error]: the %s does not exist" % self.multithread_pk_file sys.exit(1) # top's list self.batch_size = [] self.thread_num = [] self.qps = [] # init mysql self.mysql = LoadCommon(model, batch_size) # init the initing time # the qps table has the same primary key: time time_now = int(time.time()) time_local = time.localtime(time_now) self.time_sql = time.strftime("%Y-%m-%d %H:%M:%S",time_local) self.mysql.create_table_sql_multithread_qps() self.db_onoff = mysql_w if self.db_onoff == "on": self.mysql.truncate_table_sql("multithread_qps") except Exception as exception: print exception return
def __init__(self, time_sql, mysql_w, model="", batch_size=1, gpu_card="p4"): """ init """ try: cf = ConfigParser.ConfigParser() #cf.read("./conf/load_config.conf") cf.read("../conf/load_config.conf") conf_name = "conf_%s" % model try: self.gpu_card = gpu_card self.time_pk_path = cf.get(conf_name, "time_pk_path") except Exception as e: print ("\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m") sys.exit(1) # init mysql self.mysql = LoadCommon(model, batch_size, self.gpu_card) # init the initing time # the qps table has the same primary key: time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_table_sql_anakin2_yolo_qps() self.mysql.create_table_sql_tensorrt_qps() self.db_onoff = mysql_w except Exception as exception: print exception return
def __init__(self, db_name, model, batch_size): """ init """ # init mysql self.mysql = LoadCommon(db_name) self.batch_size = batch_size self.model = model
def __init__(self, db_name, model, thread_size): """ init """ # init mysql self.mysql = LoadCommon(db_name) self.thread_size = thread_size self.model = model
def __init__(self, mysql_w, time_sql, model="", thread_size=1, cpu_card="5117"): """ init """ try: cf = ConfigParser.ConfigParser() cf.read("../conf/load_config.conf") #TODO conf_name = "conf_%s" % model try: #write dead---no need from config #self.filename_top = cf.get(conf_name, "top_result_filename") self.cpu_card = cpu_card self.filename_top = "lego_top_result_filename_%s.txt" % self.cpu_card except Exception as e: print( "\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m" ) sys.exit(1) # get pid in top's file pid_set = set() temp_f1 = open(self.filename_top) for line in temp_f1.readlines(): if "qa_work" in line: pid_set.add(line.split()[0]) temp_f1.close() if len(pid_set) != 1: sys.exit(1) self.pid = pid_set.pop() self.file_top = open(self.filename_top) # top's list self.cpu_list_1sec = [] self.phy_mem_list_1sec = [] self.virt_mem_list_1sec = [] self.top_pertime = [] # init mysql self.mysql = LoadCommon(model, thread_size, self.cpu_card) # init the initing time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_database() self.mysql.create_table_sql_top_avg_model_lego() self.db_onoff = mysql_w except Exception as exception: print exception return
def __init__(self, mysql_w, time_sql, model="", batch_size=1, gpu_card="p4"): """ init """ try: cf = ConfigParser.ConfigParser() cf.read("../conf/load_config.conf") #TODO conf_name = "conf_%s" % model try: #write dead---no need from config #self.filename_top = cf.get(conf_name, "top_result_filename") #self.filename_gpu = cf.get(conf_name, "gpu_result_filename") self.gpu_card = gpu_card self.filename_top = "tensorrt_top_result_filename_%s.txt" % self.gpu_card self.filename_gpu = "tensorrt_gpu_result_filename_%s.txt" % self.gpu_card except Exception as e: print( "\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m" ) sys.exit(1) # get pid in top's file pid_set = set() temp_f1 = open(self.filename_top) for line in temp_f1.readlines(): if "qa_work" in line: pid_set.add(line.split()[0]) temp_f1.close() if len(pid_set) != 1: sys.exit(1) self.pid = pid_set.pop() self.file_top = open(self.filename_top) self.file_gpu = open(self.filename_gpu) # top's list self.cpu_list_1sec = [] self.phy_mem_list_1sec = [] self.virt_mem_list_1sec = [] self.top_pertime = [] # gpu's list self.gpu_pertime = [] self.gpu_usage_percent_1 = [] self.gpu_usage_percent_2 = [] self.gpu_usage_percent_3 = [] self.gpu_usage_percent_4 = [] self.gpu_usage_percent_all = [] self.gpu_mem_1 = [] self.gpu_mem_2 = [] self.gpu_mem_3 = [] self.gpu_mem_4 = [] self.gpu_mem_all = [] self.gpu_temper_1 = [] self.gpu_temper_2 = [] self.gpu_temper_3 = [] self.gpu_temper_4 = [] self.gpu_temper_max = [] # init mysql self.mysql = LoadCommon(model, batch_size, self.gpu_card) # init the initing time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_database() self.mysql.create_table_sql_top_avg_model_tensorRT() self.mysql.create_table_sql_nvidia_version_model_tensorRT() self.db_onoff = mysql_w if self.db_onoff == "on": self.mysql.truncate_table_sql("top_list_1sec_avg_tensorRT_%s" % self.gpu_card) self.mysql.truncate_table_sql( "nvidia_list_1sec_version_tensorRT_%s" % self.gpu_card) except Exception as exception: print exception return
class LoadPerformance(object): """ init """ def __init__(self, mysql_w, time_sql, model="", batch_size=1, gpu_card="p4"): """ init """ try: cf = ConfigParser.ConfigParser() cf.read("../conf/load_config.conf") #TODO conf_name = "conf_%s" % model try: #write dead---no need from config #self.filename_top = cf.get(conf_name, "top_result_filename") #self.filename_gpu = cf.get(conf_name, "gpu_result_filename") self.gpu_card = gpu_card self.filename_top = "tensorrt_top_result_filename_%s.txt" % self.gpu_card self.filename_gpu = "tensorrt_gpu_result_filename_%s.txt" % self.gpu_card except Exception as e: print( "\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m" ) sys.exit(1) # get pid in top's file pid_set = set() temp_f1 = open(self.filename_top) for line in temp_f1.readlines(): if "qa_work" in line: pid_set.add(line.split()[0]) temp_f1.close() if len(pid_set) != 1: sys.exit(1) self.pid = pid_set.pop() self.file_top = open(self.filename_top) self.file_gpu = open(self.filename_gpu) # top's list self.cpu_list_1sec = [] self.phy_mem_list_1sec = [] self.virt_mem_list_1sec = [] self.top_pertime = [] # gpu's list self.gpu_pertime = [] self.gpu_usage_percent_1 = [] self.gpu_usage_percent_2 = [] self.gpu_usage_percent_3 = [] self.gpu_usage_percent_4 = [] self.gpu_usage_percent_all = [] self.gpu_mem_1 = [] self.gpu_mem_2 = [] self.gpu_mem_3 = [] self.gpu_mem_4 = [] self.gpu_mem_all = [] self.gpu_temper_1 = [] self.gpu_temper_2 = [] self.gpu_temper_3 = [] self.gpu_temper_4 = [] self.gpu_temper_max = [] # init mysql self.mysql = LoadCommon(model, batch_size, self.gpu_card) # init the initing time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_database() self.mysql.create_table_sql_top_avg_model_tensorRT() self.mysql.create_table_sql_nvidia_version_model_tensorRT() self.db_onoff = mysql_w if self.db_onoff == "on": self.mysql.truncate_table_sql("top_list_1sec_avg_tensorRT_%s" % self.gpu_card) self.mysql.truncate_table_sql( "nvidia_list_1sec_version_tensorRT_%s" % self.gpu_card) except Exception as exception: print exception return def __del__(self): """ delete """ try: self.file_top.close() self.file_gpu.close() except Exception as exception: print exception return def analysis_host_sec(self): """ analysis the performace which in top cmd 1. host(cpu) usage percent 2. host(mem) physical memory size 3. host(mem) virtual memory size """ #calc the date time_now = int(time.time()) time_local = time.localtime(time_now) date = time.strftime("%Y-%m-%d", time_local) sum_cpu_ratio = 0 sum_phy_mem_size = 0 sum_virt_mem_size = 0 key_re_word = "%s qa_work" % self.pid for line in self.file_top.readlines(): if re.search(key_re_word, line): #analysis_cpu_rate() sum_cpu_ratio += float(line.split()[8]) self.cpu_list_1sec.append(float(line.split()[8])) #analysis_host_phy_mem_size(), the standerd unit is "g" if "m" in line.split()[5]: phy_mem_size = float(line.split()[5].strip("m")) / 1000 elif "g" in line.split()[5]: phy_mem_size = float(line.split()[5].strip("g")) elif "k" in line.split()[5]: phy_mem_size = float( line.split()[5].strip("k")) / 1000 / 1000 else: phy_mem_size = 0.0 self.phy_mem_list_1sec.append(float(phy_mem_size)) sum_phy_mem_size += phy_mem_size #analysis_host_virt_mem_size(), the standerd unit is "g" if "m" in line.split()[4]: vir_mem_size = float(line.split()[4].strip("m")) / 1000 elif "g" in line.split()[4]: vir_mem_size = float(line.split()[4].strip("g")) elif "k" in line.split()[4]: vir_mem_size = float( line.split()[4].strip("k")) / 1000 / 1000 else: vir_mem_size = 0 self.virt_mem_list_1sec.append(float(vir_mem_size)) sum_virt_mem_size += vir_mem_size elif re.search("top -", line): final_time = date + " " + line.split()[2] self.top_pertime.append(final_time) top_num = min(len(self.top_pertime), len(self.cpu_list_1sec), len(self.phy_mem_list_1sec), len(self.virt_mem_list_1sec)) #cal the average data average_cpu_ratio = round(sum_cpu_ratio / len(self.cpu_list_1sec), 2) average_phy_mem_size = round( sum_phy_mem_size / len(self.phy_mem_list_1sec), 2) average_virt_mem_size = round( sum_virt_mem_size / len(self.virt_mem_list_1sec), 2) #cal the max data max_cpu_ratio = max(self.cpu_list_1sec) max_phy_mem_size = max(self.phy_mem_list_1sec) max_virt_mem_size = max(self.virt_mem_list_1sec) #insert into mysql-top_list_1sec_avg print "average_cpu_ratio: %s" % average_cpu_ratio print "average_phy_mem_size: %s" % average_phy_mem_size print "average_virt_mem_size: %s" % average_virt_mem_size print "max_cpu_ratio: %s" % max_cpu_ratio print "max_phy_mem_size: %s" % max_phy_mem_size print "max_virt_mem_size: %s" % max_virt_mem_size if self.db_onoff == "on": self.mysql.insert_table_sql_top_avg(self.time_sql, max_cpu_ratio, max_phy_mem_size, max_virt_mem_size) def analysis_dev_sec(self): """ analysis the device(gpu) physical memory size analysis the performace which in nvidia-smi cmd 1. device(gpu) physical memory size 2. device(gpu) usage percent 3. device(gpu) temperature """ #calc the date time_now = int(time.time()) time_local = time.localtime(time_now) date = time.strftime("%Y-%m-%d", time_local) sum_cpu_ratio = 0 sum_gpu_mem_size = 0 # key: time key key_re_time = "[0-9]+ [0-9]+:[0-9]+:[0-9]+ 20[12][][0-9]" # key: temperature key key_re_temper = "[0-9]+C" # key: gpu percent key key_re_percent = "[0-9]+%" # key: gpu mem key key_re_mem = "%s" % self.pid key_re_mem_null = "No running processes found" # key: line ending key key_ending = "====ending====" new_gpu_data_count = 0 sum_gpu_usage_percent_all = 0 for line in self.file_gpu.readlines(): if re.search(key_re_time, line): # time own unit # 1. colect the gpu time info final_time = date + " " + line.split()[3] self.gpu_pertime.append(final_time) elif re.search(key_re_temper, line) and re.search( key_re_percent, line): #print "2222, data_line: %s" % line # 2. colect the gpu temperature info # 3. colect the gpu usage percentage info temper = float(line.split()[2].rstrip("C")) gpu_usage = float(line.split()[12].rstrip("%")) if new_gpu_data_count == 0: self.gpu_temper_1.append(temper) self.gpu_usage_percent_1.append(gpu_usage) elif new_gpu_data_count == 1: self.gpu_temper_2.append(temper) self.gpu_usage_percent_2.append(gpu_usage) elif new_gpu_data_count == 2: self.gpu_temper_3.append(temper) self.gpu_usage_percent_3.append(gpu_usage) elif new_gpu_data_count == 3: self.gpu_temper_4.append(temper) self.gpu_usage_percent_4.append(gpu_usage) new_gpu_data_count += 1 elif re.search(key_re_mem, line) or re.search( key_re_mem_null, line): # 4. colect the gpu mem info this_gpu_num = line.split()[1] if "MiB" in line.split()[5]: this_gpu_mem = float(line.split()[5].strip("MiB")) # TODO_this: if there have other unit if this_gpu_num == "0": self.gpu_mem_1.append(this_gpu_mem) elif this_gpu_num == "1": self.gpu_mem_2.append(this_gpu_mem) elif this_gpu_num == "2": self.gpu_mem_3.append(this_gpu_mem) elif this_gpu_num == "3": self.gpu_mem_4.append(this_gpu_mem) elif this_gpu_num == "No": self.gpu_mem_1.append(0) self.gpu_mem_2.append(0) self.gpu_mem_3.append(0) self.gpu_mem_4.append(0) elif re.search(key_ending, line): # control unit # 1.complete the gpu_mem list max_len_gpu_mem = max(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1)) min_len_gpu_mem = min(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1)) if max_len_gpu_mem != min_len_gpu_mem: if len(self.gpu_mem_1) != max_len_gpu_mem: self.gpu_mem_1.append(0) if len(self.gpu_mem_2) != max_len_gpu_mem: self.gpu_mem_2.append(0) if len(self.gpu_mem_3) != max_len_gpu_mem: self.gpu_mem_3.append(0) if len(self.gpu_mem_4) != max_len_gpu_mem: self.gpu_mem_4.append(0) new_gpu_data_count = 0 # ! because all the list is equal for i in range(len(self.gpu_mem_1)): self.gpu_usage_percent_all.append(self.gpu_usage_percent_1[i] + self.gpu_usage_percent_2[i] + self.gpu_usage_percent_3[i] + self.gpu_usage_percent_4[i]) self.gpu_mem_all.append(self.gpu_mem_1[i] + self.gpu_mem_2[i] + self.gpu_mem_3[i] + self.gpu_mem_4[i]) self.gpu_temper_max.append( max(self.gpu_temper_1[i], self.gpu_temper_2[i], self.gpu_temper_3[i], self.gpu_temper_4[i])) version_gpu_usage_percent_all = max(self.gpu_usage_percent_all) version_gpu_mem_all = max(self.gpu_mem_all) version_gpu_temper_max = max(self.gpu_temper_max) print "version_gpu_usage_percent_all: %s" % version_gpu_usage_percent_all print "version_gpu_mem_all: %s" % version_gpu_mem_all print "version_gpu_temper_max: %s" % version_gpu_temper_max # insert into database: nvidia_list_1sec if self.db_onoff == "on": # insert into database: nvidia_list_1sec_avg self.mysql.insert_table_sql_nvidia_version( self.time_sql, version_gpu_usage_percent_all, version_gpu_mem_all, version_gpu_temper_max)
class LoadPerformance(object): """ init """ def __init__(self, db_name, model, batch_size): """ init """ # init mysql self.mysql = LoadCommon(db_name) self.batch_size = batch_size self.model = model def make_excel_result(self): """ analysis the excel data 1. Net_name 2. Batch_size 3. Library: RT 4. tensorRT Latency (ms) 5. RT Memory (MB) 6. Library: Anakin2 7. anakin2 Latency (ms) 8. RT Memory (MB) 9. anakin/tensorrt latency 10. anakin/tensorrt memory """ # 1.Net_name net_name = model # 2. Batch_size batch_size = self.batch_size # 3. Library: RT Library_RT = "RT" # 6. Library: Anakin2 Library_anakin2 = "Anakin2" # 4. tensorRT Latency (ms) tensorRT_latency_p4 = self.mysql.select_tensorRT_latency("p4") tensorRT_latency_k1200 = self.mysql.select_tensorRT_latency("k1200") print "tensorRT_latency_p4: %s" % tensorRT_latency_p4 print "tensorRT_latency_k1200: %s" % tensorRT_latency_k1200 # 5. RT Memory (MB) tensorRT_memory_p4 = self.mysql.select_tensorRT_memory("p4") tensorRT_memory_k1200 = self.mysql.select_tensorRT_memory("k1200") print "tensorRT_memory_p4: %s" % tensorRT_memory_p4 print "tensorRT_memory_k1200: %s" % tensorRT_memory_k1200 # 7. anakin2 Latency (ms) anakin2_latency_p4 = self.mysql.select_anakin2_latency("p4") anakin2_latency_k1200 = self.mysql.select_anakin2_latency("k1200") print "anakin2_latency_p4: %s" % anakin2_latency_p4 print "anakin2_latency_k1200: %s" % anakin2_latency_k1200 # 8. RT Memory (MB) anakin2_memory_p4 = self.mysql.select_anakin2_memory("p4") anakin2_memory_k1200 = self.mysql.select_anakin2_memory("k1200") print "anakin2_memory_p4: %s" % anakin2_memory_p4 print "anakin2_memory_k1200: %s" % anakin2_memory_k1200 # 9. anakin/tensorrt latency if tensorRT_latency_p4 and anakin2_latency_p4: #ratio_latency_p4 = string((float(anakin2_latency_p4) / float(tensorRT_latency_p4)) * 100) ratio_latency_p4 = str( int((float(anakin2_latency_p4) / float(tensorRT_latency_p4)) * 100)) + "%" else: ratio_latency_p4 = None if tensorRT_latency_k1200 and anakin2_latency_k1200: ratio_latency_k1200 = str( int((float(anakin2_latency_k1200) / float(tensorRT_latency_k1200)) * 100)) + "%" else: ratio_latency_k1200 = None print "ratio_latency_p4: %s" % ratio_latency_p4 print "ratio_latency_k1200: %s" % ratio_latency_k1200 # 10. anakin/tensorrt memory if tensorRT_memory_p4 and anakin2_memory_p4: ratio_memory_p4 = str( int((float(anakin2_memory_p4) / float(tensorRT_memory_p4)) * 100)) + "%" else: ratio_memory_p4 = None if tensorRT_memory_k1200 and anakin2_memory_k1200: ratio_memory_k1200 = str( int((float(anakin2_memory_k1200) / float(tensorRT_memory_k1200)) * 100)) + "%" else: ratio_memory_k1200 = None print "ratio_memory_p4: %s" % ratio_memory_p4 print "ratio_memory_k1200: %s" % ratio_memory_k1200 line_data_p4 = {} line_data_p4["net_name"] = net_name line_data_p4["batch_size"] = batch_size line_data_p4["Library_RT"] = Library_RT line_data_p4["tensorRT_latency_p4"] = tensorRT_latency_p4 line_data_p4["tensorRT_memory_p4"] = tensorRT_memory_p4 line_data_p4["Library_anakin2"] = Library_anakin2 line_data_p4["anakin2_latency_p4"] = anakin2_latency_p4 line_data_p4["anakin2_memory_p4"] = anakin2_memory_p4 line_data_p4["ratio_latency_p4"] = ratio_latency_p4 line_data_p4["ratio_memory_p4"] = ratio_memory_p4 line_data_k1200 = {} line_data_k1200["net_name"] = net_name line_data_k1200["batch_size"] = batch_size line_data_k1200["Library_RT"] = Library_RT line_data_k1200["tensorRT_latency_k1200"] = tensorRT_latency_k1200 line_data_k1200["tensorRT_memory_k1200"] = tensorRT_memory_k1200 line_data_k1200["Library_anakin2"] = Library_anakin2 line_data_k1200["anakin2_latency_k1200"] = anakin2_latency_k1200 line_data_k1200["anakin2_memory_k1200"] = anakin2_memory_k1200 line_data_k1200["ratio_latency_k1200"] = ratio_latency_k1200 line_data_k1200["ratio_memory_k1200"] = ratio_memory_k1200 return line_data_p4, line_data_k1200
class LoadPerformance(object): """ init """ def __init__(self, mysql_w, time_sql, model="", thread_size=1, cpu_card="5117"): """ init """ try: cf = ConfigParser.ConfigParser() cf.read("../conf/load_config.conf") #TODO conf_name = "conf_%s" % model try: #write dead---no need from config #self.filename_top = cf.get(conf_name, "top_result_filename") self.cpu_card = cpu_card self.filename_top = "lego_top_result_filename_%s.txt" % self.cpu_card except Exception as e: print( "\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m" ) sys.exit(1) # get pid in top's file pid_set = set() temp_f1 = open(self.filename_top) for line in temp_f1.readlines(): if "qa_work" in line: pid_set.add(line.split()[0]) temp_f1.close() if len(pid_set) != 1: sys.exit(1) self.pid = pid_set.pop() self.file_top = open(self.filename_top) # top's list self.cpu_list_1sec = [] self.phy_mem_list_1sec = [] self.virt_mem_list_1sec = [] self.top_pertime = [] # init mysql self.mysql = LoadCommon(model, thread_size, self.cpu_card) # init the initing time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_database() self.mysql.create_table_sql_top_avg_model_lego() self.db_onoff = mysql_w except Exception as exception: print exception return def __del__(self): """ delete """ try: self.file_top.close() except Exception as exception: print exception return def analysis_host_sec(self): """ analysis the performace which in top cmd 1. host(cpu) usage percent 2. host(mem) physical memory size 3. host(mem) virtual memory size """ #calc the date time_now = int(time.time()) time_local = time.localtime(time_now) date = time.strftime("%Y-%m-%d", time_local) sum_cpu_ratio = 0 sum_phy_mem_size = 0 sum_virt_mem_size = 0 key_re_word = "%s qa_work" % self.pid for line in self.file_top.readlines(): if re.search(key_re_word, line): #analysis_cpu_rate() sum_cpu_ratio += float(line.split()[8]) self.cpu_list_1sec.append(float(line.split()[8])) #analysis_host_phy_mem_size(), the standerd unit is "g" if "m" in line.split()[5]: phy_mem_size = float(line.split()[5].strip("m")) / 1000 elif "g" in line.split()[5]: phy_mem_size = float(line.split()[5].strip("g")) elif "k" in line.split()[5]: phy_mem_size = float( line.split()[5].strip("k")) / 1000 / 1000 else: phy_mem_size = 0.0 self.phy_mem_list_1sec.append(float(phy_mem_size)) sum_phy_mem_size += phy_mem_size #analysis_host_virt_mem_size(), the standerd unit is "g" if "m" in line.split()[4]: vir_mem_size = float(line.split()[4].strip("m")) / 1000 elif "g" in line.split()[4]: vir_mem_size = float(line.split()[4].strip("g")) elif "k" in line.split()[4]: vir_mem_size = float( line.split()[4].strip("k")) / 1000 / 1000 else: vir_mem_size = 0 self.virt_mem_list_1sec.append(float(vir_mem_size)) sum_virt_mem_size += vir_mem_size elif re.search("top -", line): final_time = date + " " + line.split()[2] self.top_pertime.append(final_time) top_num = min(len(self.top_pertime), len(self.cpu_list_1sec), len(self.phy_mem_list_1sec), len(self.virt_mem_list_1sec)) #cal the average data average_cpu_ratio = round(sum_cpu_ratio / len(self.cpu_list_1sec), 2) average_phy_mem_size = round( sum_phy_mem_size / len(self.phy_mem_list_1sec), 2) average_virt_mem_size = round( sum_virt_mem_size / len(self.virt_mem_list_1sec), 2) #cal the max data max_cpu_ratio = max(self.cpu_list_1sec) max_phy_mem_size = max(self.phy_mem_list_1sec) max_virt_mem_size = max(self.virt_mem_list_1sec) #insert into mysql-top_list_1sec_avg print "average_cpu_ratio: %s" % average_cpu_ratio print "average_phy_mem_size: %s" % average_phy_mem_size print "average_virt_mem_size: %s" % average_virt_mem_size print "max_cpu_ratio: %s" % max_cpu_ratio print "max_phy_mem_size: %s" % max_phy_mem_size print "max_virt_mem_size: %s" % max_virt_mem_size if self.db_onoff == "on": self.mysql.insert_table_sql_top_avg(self.time_sql, max_cpu_ratio, max_phy_mem_size, max_virt_mem_size)
# # Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved # ################################################################################ """ Compare TensorRT Main File! Authors: sysqa([email protected]) Date: 2018/04/04 """ import os import re import sys import time import json import logging import random import mylogging from load_common import LoadCommon if __name__ == '__main__': #init mylogging logger = mylogging.init_log(logging.DEBUG) mysql = LoadCommon("language", 1, "5177") #mysql.create_table_sql_top_avg_model_tensorRT() #mysql.create_table_sql_nvidia_version_model_tensorRT() mysql.create_database()
# print ("\033[0;36;m[no_right_data] = (%s)\033[0m" % (no_right_data)) # print ("\033[0;36;m[no_right_weight] = (%s)\033[0m" % (no_right_weight)) # print ("\033[0;36;m[no_right_c_and_h] = (%s)\033[0m" % (no_right_c_and_h)) time_now = int(time.time()) time_local = time.localtime(time_now) time_sql = time.strftime("%Y-%m-%d %H:%M:%S",time_local) if len(sys.argv) == 1: print ("\033[0;36;mno right input model: python logdata_collect.py \"model\"\033[0m") sys.exit(1) elif len(sys.argv) == 2: model = sys.argv[1] thread_size = 1 cpu_card = "5117" mysql = LoadCommon(model, thread_size, cpu_card) elif len(sys.argv) == 3: model = sys.argv[1] thread_size = sys.argv[2] cpu_card = "5117" mysql = LoadCommon(model, thread_size, cpu_card) elif len(sys.argv) == 4: model = sys.argv[1] thread_size = sys.argv[2] cpu_card = sys.argv[3] mysql = LoadCommon(model, thread_size, cpu_card) elif len(sys.argv) == 5: model = sys.argv[1] thread_size = sys.argv[2] cpu_card = sys.argv[3] mysql = LoadCommon(model, thread_size, cpu_card)
class LoadPerformance(object): """ init """ def __init__(self, time_sql, mysql_w, model="", batch_size=1, gpu_card="p4"): """ init """ try: cf = ConfigParser.ConfigParser() #cf.read("./conf/load_config.conf") cf.read("../conf/load_config.conf") conf_name = "conf_%s" % model try: self.gpu_card = gpu_card self.time_pk_path = cf.get(conf_name, "time_pk_path") except Exception as e: print ("\033[0;31;m[error]: Pls Check The Modle input wrong!\033[0m") sys.exit(1) # init mysql self.mysql = LoadCommon(model, batch_size, self.gpu_card) # init the initing time # the qps table has the same primary key: time self.time_sql = time_sql # init env: truncate table top_list_1sec self.mysql.create_table_sql_anakin2_yolo_qps() self.mysql.create_table_sql_tensorrt_qps() self.db_onoff = mysql_w except Exception as exception: print exception return def analysis_tensorRT_qps(self): """ analysis the anakin1.0's qps """ anakin1_time_file = self.time_pk_path + "/" + "TensorRT_time_%s.txt" % self.gpu_card if not os.path.exists(anakin1_time_file): print "[alarm]: the TensorRT_time.txt file do not exist" sys.exit(0) try: f = open(anakin1_time_file) #for line in f.readlines(): # if line.split()[0] == "image_num": # image_num = int(line.split()[2]) # elif line.split()[0] == "total_time": # total_time_ms = float(line.split()[2].strip("ms")) # total_time = total_time_ms / 1000 # elif line.split()[0] == "average_time": # average_time = float(line.split()[2].strip("ms")) for line in f.readlines(): if "image_num" in line.split(":")[0]: image_num = int(line.split(":")[1].strip()) elif "total_time" in line.split(":")[0]: total_time_ms = float(line.split(":")[1].strip().strip("ms")) total_time = total_time_ms / 1000 elif "average_time" in line.split(":")[0]: average_time = float(line.split(":")[1].strip().strip("ms")) if self.db_onoff == "on": self.mysql.insert_table_sql_tensorrt_qps(self.time_sql, image_num, total_time, average_time) except Exception,e: print "\033[0;31;m[error]: analysis TensorRT_time.txt file error\033[0m" sys.exit(1) finally:
class LoadPerformance(object): """ init """ def __init__(self, mysql_w, model="", batch_size=1): """ init """ try: #TODO self.multithread_pk_file = "/home/qa_work/CI/workspace/sys_anakin_compare_output/%s/multithread_time/Multi_thread_time.txt" % model if os.path.exists(self.multithread_pk_file): self.file = open(self.multithread_pk_file) else: print "[error]: the %s does not exist" % self.multithread_pk_file sys.exit(1) # top's list self.batch_size = [] self.thread_num = [] self.qps = [] # init mysql self.mysql = LoadCommon(model, batch_size) # init the initing time # the qps table has the same primary key: time time_now = int(time.time()) time_local = time.localtime(time_now) self.time_sql = time.strftime("%Y-%m-%d %H:%M:%S",time_local) self.mysql.create_table_sql_multithread_qps() self.db_onoff = mysql_w if self.db_onoff == "on": self.mysql.truncate_table_sql("multithread_qps") except Exception as exception: print exception return def __del__(self): """ delete """ try: self.file.close() except Exception as exception: print exception return def analysis_multithread_qps(self): """ analysis the multithread qps """ for line in self.file.readlines(): batch_size = int(line.split(" ")[3]) thread_num = int(line.split(" ")[7]) qps = float(line.split(" ")[11]) print "batch_size: %s, thread_num: %s, qps: %s" % (batch_size, thread_num, qps) self.batch_size.append(batch_size) self.thread_num.append(thread_num) self.qps.append(qps) print self.batch_size print self.thread_num print self.qps if self.db_onoff == "on": for i in range(len(self.batch_size)): self.mysql.insert_table_sql_multithread_qps(self.time_sql, self.batch_size[i], self.thread_num[i], self.qps[i])
Compare TensorRT Main File! Authors: sysqa([email protected]) Date: 2018/04/04 """ import os import re import sys import time import json import logging import random import mylogging from load_common import LoadCommon if __name__ == '__main__': #init mylogging logger = mylogging.init_log(logging.DEBUG) mysql = LoadCommon("cnn_seg_8") #mysql.create_table_sql_top_avg_model_tensorRT() #mysql.create_table_sql_nvidia_version_model_tensorRT() mysql.create_database() #mysql.create_table_sql_top_avg_model_tensorRT() #mysql.create_table_sql_nvidia_version_model_tensorRT() # mysql.truncate_table_sql("top_list_1sec_avg_tensorRT") # mysql.truncate_table_sql("nvidia_list_1sec_version_tensorRT")
class LoadPerformance(object): """ init """ def __init__(self, db_name, model, thread_size): """ init """ # init mysql self.mysql = LoadCommon(db_name) self.thread_size = thread_size self.model = model def make_excel_result(self): """ analysis the excel data 1. Net_name 2. thread_size 3. Library: Anakin2 4. Anakin2 Latency (ms) 5. Anakin2 QPS (ms) 6. Anakin2 ratio (%) 7. Anakin2 mem (%) 8. Library: Paddle 9. Paddle Latency (ms) 10. Paddle QPS (ms) 11. Paddle ratio (%) 12. Paddle mem (%) 13. anakin/paddle latency 14. anakin/paddle qps 15. accuracy rate """ # 1.Net_name net_name = self.model # 2. thread_size thread_size = self.thread_size # 3. Library: Anakin2 Library_anakin = "Anakin2" # 8. Library: paddle Library_paddle = "Paddle" Library_lego = "Lego" # 4. anakin2 Latency (ms) anakin_latency_5117 = self.mysql.select_anakin2_latency("5117") anakin_latency_v3 = self.mysql.select_anakin2_latency("v3") anakin_latency_v4 = self.mysql.select_anakin2_latency("v4") print "anakin_latency_5117: %s" % anakin_latency_5117 print "anakin_latency_v3: %s" % anakin_latency_v3 print "anakin_latency_v4: %s" % anakin_latency_v4 # 5. anakin2 QPS (ms) anakin_qps_5117 = self.mysql.select_anakin2_qps("5117") anakin_qps_v3 = self.mysql.select_anakin2_qps("v3") anakin_qps_v4 = self.mysql.select_anakin2_qps("v4") print "anakin_qps_5117: %s" % anakin_qps_5117 print "anakin_qps_v3: %s" % anakin_qps_v3 print "anakin_qps_v4: %s" % anakin_qps_v4 # 6. anakin2 ratio (ms) anakin_ratio_5117 = self.mysql.select_anakin2_ratio("5117") anakin_ratio_v3 = self.mysql.select_anakin2_ratio("v3") anakin_ratio_v4 = self.mysql.select_anakin2_ratio("v4") print "anakin_ratio_5117: %s" % anakin_ratio_5117 print "anakin_ratio_v3: %s" % anakin_ratio_v3 print "anakin_ratio_v4: %s" % anakin_ratio_v4 # 7. anakin2 Mem (MB) anakin_memory_5117 = self.mysql.select_anakin2_memory("5117") anakin_memory_v3 = self.mysql.select_anakin2_memory("v3") anakin_memory_v4 = self.mysql.select_anakin2_memory("v4") print "anakin_memory_5117: %s" % anakin_memory_5117 print "anakin_memory_v3: %s" % anakin_memory_v3 print "anakin_memory_v4: %s" % anakin_memory_v4 # 9. paddle Latency (ms) paddle_latency_5117 = self.mysql.select_paddle_latency("5117") paddle_latency_v3 = self.mysql.select_paddle_latency("v3") paddle_latency_v4 = self.mysql.select_paddle_latency("v4") print "paddle_latency_5117: %s" % paddle_latency_5117 print "paddle_latency_v3: %s" % paddle_latency_v3 print "paddle_latency_v4: %s" % paddle_latency_v4 # 10. paddle QPS (ms) paddle_qps_5117 = self.mysql.select_paddle_qps("5117") paddle_qps_v3 = self.mysql.select_paddle_qps("v3") paddle_qps_v4 = self.mysql.select_paddle_qps("v4") print "paddle_qps_5117: %s" % paddle_qps_5117 print "paddle_qps_v3: %s" % paddle_qps_v3 print "paddle_qps_v4: %s" % paddle_qps_v4 # 11. paddle ratio (ms) paddle_ratio_5117 = self.mysql.select_paddle_ratio("5117") paddle_ratio_v3 = self.mysql.select_paddle_ratio("v3") paddle_ratio_v4 = self.mysql.select_paddle_ratio("v4") print "paddle_ratio_5117: %s" % paddle_ratio_5117 print "paddle_ratio_v3: %s" % paddle_ratio_v3 print "paddle_ratio_v4: %s" % paddle_ratio_v4 # 12. paddle Mem (MB) paddle_memory_5117 = self.mysql.select_paddle_memory("5117") paddle_memory_v3 = self.mysql.select_paddle_memory("v3") paddle_memory_v4 = self.mysql.select_paddle_memory("v4") print "paddle_memory_5117: %s" % paddle_memory_5117 print "paddle_memory_v3: %s" % paddle_memory_v3 print "paddle_memory_v4: %s" % paddle_memory_v4 # 9. lego Latency (ms) lego_latency_5117 = self.mysql.select_lego_latency("5117") lego_latency_v3 = self.mysql.select_lego_latency("v3") lego_latency_v4 = self.mysql.select_lego_latency("v4") print "lego_latency_5117: %s" % lego_latency_5117 print "lego_latency_v3: %s" % lego_latency_v3 print "lego_latency_v4: %s" % lego_latency_v4 # 10. lego QPS (ms) lego_qps_5117 = self.mysql.select_lego_qps("5117") lego_qps_v3 = self.mysql.select_lego_qps("v3") lego_qps_v4 = self.mysql.select_lego_qps("v4") print "lego_qps_5117: %s" % lego_qps_5117 print "lego_qps_v3: %s" % lego_qps_v3 print "lego_qps_v4: %s" % lego_qps_v4 # 11. lego ratio (ms) lego_ratio_5117 = self.mysql.select_lego_ratio("5117") lego_ratio_v3 = self.mysql.select_lego_ratio("v3") lego_ratio_v4 = self.mysql.select_lego_ratio("v4") print "lego_ratio_5117: %s" % lego_ratio_5117 print "lego_ratio_v3: %s" % lego_ratio_v3 print "lego_ratio_v4: %s" % lego_ratio_v4 # 12. lego Mem (MB) lego_memory_5117 = self.mysql.select_lego_memory("5117") lego_memory_v3 = self.mysql.select_lego_memory("v3") lego_memory_v4 = self.mysql.select_lego_memory("v4") print "lego_memory_5117: %s" % lego_memory_5117 print "lego_memory_v3: %s" % lego_memory_v3 print "lego_memory_v4: %s" % lego_memory_v4 # 13. anakin/paddle latency if anakin_latency_5117 and paddle_latency_5117: ratio_latency_5117 = str(int((float(anakin_latency_5117) / float(paddle_latency_5117)) * 100)) + "%" else: ratio_latency_5117 = None if anakin_latency_v3 and paddle_latency_v3: ratio_latency_v3 = str(int((float(anakin_latency_v3) / float(paddle_latency_v3)) * 100)) + "%" else: ratio_latency_v3 = None if anakin_latency_v4 and paddle_latency_v4: ratio_latency_v4 = str(int((float(anakin_latency_v4) / float(paddle_latency_v4)) * 100)) + "%" else: ratio_latency_v4 = None print "ratio_latency_5117: %s" % ratio_latency_5117 print "ratio_latency_v3: %s" % ratio_latency_v3 print "ratio_latency_v4: %s" % ratio_latency_v4 # 14. anakin/paddle qps if anakin_qps_5117 and paddle_qps_5117: ratio_qps_5117 = str(int((float(paddle_qps_5117) / float(anakin_qps_5117)) * 100)) + "%" else: ratio_qps_5117 = None if anakin_qps_v3 and paddle_qps_v3: ratio_qps_v3 = str(int((float(paddle_qps_v3) / float(anakin_qps_v3)) * 100)) + "%" else: ratio_qps_v3 = None if anakin_qps_v4 and paddle_qps_v4: ratio_qps_v4 = str(int((float(paddle_qps_v4) / float(anakin_qps_v4)) * 100)) + "%" else: ratio_qps_v4 = None print "ratio_qps_5117: %s" % ratio_qps_5117 print "ratio_qps_v3: %s" % ratio_qps_v3 print "ratio_qps_v4: %s" % ratio_qps_v4 # 13. anakin/lego latency if anakin_latency_5117 and lego_latency_5117: ratio_latency_5117_2 = str(int((float(anakin_latency_5117) / float(lego_latency_5117)) * 100)) + "%" else: ratio_latency_5117_2 = None if anakin_latency_v3 and lego_latency_v3: ratio_latency_v3_2 = str(int((float(anakin_latency_v3) / float(lego_latency_v3)) * 100)) + "%" else: ratio_latency_v3_2 = None if anakin_latency_v4 and lego_latency_v4: ratio_latency_v4_2 = str(int((float(anakin_latency_v4) / float(lego_latency_v4)) * 100)) + "%" else: ratio_latency_v4_2 = None print "ratio_latency_5117_2: %s" % ratio_latency_5117_2 print "ratio_latency_v3_2: %s" % ratio_latency_v3_2 print "ratio_latency_v4_2: %s" % ratio_latency_v4_2 # 14. anakin/lego qps if anakin_qps_5117 and lego_qps_5117: ratio_qps_5117_2 = str(int((float(lego_qps_5117) / float(anakin_qps_5117)) * 100)) + "%" else: ratio_qps_5117_2 = None if anakin_qps_v3 and lego_qps_v3: ratio_qps_v3_2 = str(int((float(lego_qps_v3) / float(anakin_qps_v3)) * 100)) + "%" else: ratio_qps_v3_2 = None if anakin_qps_v4 and lego_qps_v4: ratio_qps_v4_2 = str(int((float(lego_qps_v4) / float(anakin_qps_v4)) * 100)) + "%" else: ratio_qps_v4_2 = None print "ratio_qps_5117_2: %s" % ratio_qps_5117_2 print "ratio_qps_v3_2: %s" % ratio_qps_v3_2 print "ratio_qps_v4_2: %s" % ratio_qps_v4_2 # # 15. accuracy rate # accuracy_rate_5117 = self.mysql.select_accuracy_rate("5117") # accuracy_rate_v3 = self.mysql.select_accuracy_rate("v3") # accuracy_rate_v4 = self.mysql.select_accuracy_rate("v4") # if accuracy_rate_5117 == 0.0: # ratio_accuracy_rate_5117 = "0%" # elif accuracy_rate_5117 and accuracy_rate_5117 != 0.0: # ratio_accuracy_rate_5117 = str(int(accuracy_rate_5117) * 100) + "%" # else: # ratio_accuracy_rate_5117 = None # # if accuracy_rate_v3 == 0.0: # ratio_accuracy_rate_v3 = "0%" # elif accuracy_rate_v3 and accuracy_rate_v3 != 0.0: # ratio_accuracy_rate_v3 = str(int(accuracy_rate_v3) * 100) + "%" # else: # ratio_accuracy_rate_v3 = None # if accuracy_rate_v4 == 0.0: # ratio_accuracy_rate_v4 = "0%" # elif accuracy_rate_v4 and accuracy_rate_v4 != 0.0: # ratio_accuracy_rate_v4 = str(int(accuracy_rate_v4) * 100) + "%" # else: # ratio_accuracy_rate_v4 = None # # print "ratio_accuracy_rate_5117: %s" % ratio_accuracy_rate_5117 # print "ratio_accuracy_rate_v4: %s" % ratio_accuracy_rate_v4 line_data_5117 = {} line_data_5117["net_name"] = net_name line_data_5117["thread_size"] = thread_size line_data_5117["Library_anakin"] = Library_anakin + "_5117" line_data_5117["anakin_latency_5117"] = anakin_latency_5117 line_data_5117["anakin_qps_5117"] = anakin_qps_5117 line_data_5117["anakin_ratio_5117"] = anakin_ratio_5117 line_data_5117["anakin_memory_5117"] = anakin_memory_5117 line_data_5117["Library_paddle"] = Library_paddle + "_5117" line_data_5117["paddle_latency_5117"] = paddle_latency_5117 line_data_5117["paddle_qps_5117"] = paddle_qps_5117 line_data_5117["paddle_ratio_5117"] = paddle_ratio_5117 line_data_5117["paddle_memory_5117"] = paddle_memory_5117 line_data_5117["Library_lego"] = Library_lego + "_5117" line_data_5117["lego_latency_5117"] = lego_latency_5117 line_data_5117["lego_qps_5117"] = lego_qps_5117 line_data_5117["lego_ratio_5117"] = lego_ratio_5117 line_data_5117["lego_memory_5117"] = lego_memory_5117 line_data_5117["ratio_latency_5117"] = ratio_latency_5117 line_data_5117["ratio_qps_5117"] = ratio_qps_5117 line_data_5117["ratio_latency_5117_2"] = ratio_latency_5117_2 line_data_5117["ratio_qps_5117_2"] = ratio_qps_5117_2 # line_data_5117["ratio_accuracy_rate_5117"] = ratio_accuracy_rate_5117 line_data_v3 = {} line_data_v3["net_name"] = net_name line_data_v3["thread_size"] = thread_size line_data_v3["Library_anakin"] = Library_anakin + "_v3" line_data_v3["anakin_latency_v3"] = anakin_latency_v3 line_data_v3["anakin_qps_v3"] = anakin_qps_v3 line_data_v3["anakin_ratio_v3"] = anakin_ratio_v3 line_data_v3["anakin_memory_v3"] = anakin_memory_v3 line_data_v3["Library_paddle"] = Library_paddle + "_v3" line_data_v3["paddle_latency_v3"] = paddle_latency_v3 line_data_v3["paddle_qps_v3"] = paddle_qps_v3 line_data_v3["paddle_ratio_v3"] = paddle_ratio_v3 line_data_v3["paddle_memory_v3"] = paddle_memory_v3 line_data_v3["Library_lego"] = Library_lego + "_v3" line_data_v3["lego_latency_v3"] = lego_latency_v3 line_data_v3["lego_qps_v3"] = lego_qps_v3 line_data_v3["lego_ratio_v3"] = lego_ratio_v3 line_data_v3["lego_memory_v3"] = lego_memory_v3 line_data_v3["ratio_latency_v3"] = ratio_latency_v3 line_data_v3["ratio_qps_v3"] = ratio_qps_v3 line_data_v3["ratio_latency_v3_2"] = ratio_latency_v3_2 line_data_v3["ratio_qps_v3_2"] = ratio_qps_v3_2 # line_data_v3["ratio_accuracy_rate_v3"] = ratio_accuracy_rate_v3 line_data_v4 = {} line_data_v4["net_name"] = net_name line_data_v4["thread_size"] = thread_size line_data_v4["Library_anakin"] = Library_anakin + "_v4" line_data_v4["anakin_latency_v4"] = anakin_latency_v4 line_data_v4["anakin_qps_v4"] = anakin_qps_v4 line_data_v4["anakin_ratio_v4"] = anakin_ratio_v4 line_data_v4["anakin_memory_v4"] = anakin_memory_v4 line_data_v4["Library_paddle"] = Library_paddle + "_v4" line_data_v4["paddle_latency_v4"] = paddle_latency_v4 line_data_v4["paddle_qps_v4"] = paddle_qps_v4 line_data_v4["paddle_ratio_v4"] = paddle_ratio_v4 line_data_v4["paddle_memory_v4"] = paddle_memory_v4 line_data_v4["Library_lego"] = Library_lego + "_v4" line_data_v4["lego_latency_v4"] = lego_latency_v4 line_data_v4["lego_qps_v4"] = lego_qps_v4 line_data_v4["lego_ratio_v4"] = lego_ratio_v4 line_data_v4["lego_memory_v4"] = lego_memory_v4 line_data_v4["ratio_latency_v4"] = ratio_latency_v4 line_data_v4["ratio_qps_v4"] = ratio_qps_v4 line_data_v4["ratio_latency_v4_2"] = ratio_latency_v4_2 line_data_v4["ratio_qps_v4_2"] = ratio_qps_v4_2 # line_data_v4["ratio_accuracy_rate_v4"] = ratio_accuracy_rate_v4 return line_data_5117, line_data_v3, line_data_v4