def sys_kpi_collect(self): flag = '1' cmds = ['./collect.sh'] popen = subprocess.Popen(cmds[0], stdout=subprocess.PIPE, shell=True) pid = popen.pid writeLog('INFO', '>>>>> 性能指标采集进程执行中.....') self.to_stop_subprocess(flag, popen)
def to_tar(): ip = get_local_ip() times = time.strftime("%Y-%m-%d-%H-%M-%S",time.localtime()) subprocess.call("cp res/linking_number res/timeConsum " +"res/%s "*len(SERVERS_D.items()) %tuple([v + "\:" + k for k,v in SERVERS_D.items()]) + "result/",shell=True) files = ["result/" + filename for filename in os.listdir("result/")] tar_file_name = 'SYS_KPI_'+ ip + "_" + times + '.tar' cmd = 'tar -cf ' + tar_file_name + ' %s'*len(files) %tuple(files) try: subprocess.call(cmd,shell=True) except Exception as err: writeLog("ERROR",r">>>>> 文件压缩出现错误 %s" %str(err)) exit() writeLog("INFO",r">>>>> 指标文件打包完成") return tar_file_name
def scp_tar(tar_file): ip = REPORT_SERVER.get("ip") username = REPORT_SERVER.get("username") password = REPORT_SERVER.get("password") location = REPORT_SERVER.get("location") if not (ip and username and password and location): writeLog("CRITICAL", r">>>>> 信息不全,不能上传压缩包") return remote = os.path.join(location, tar_file) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ip, 22, username, password) with scp.SCPClient(ssh.get_transport()) as s: s.put(tar_file, remote_path=remote) s.close() writeLog("INFO", r">>>>> 上传压缩包完成")
def main_start(): #删除旧的kpi文件 del_old_file.Del_Old_File("result/").del_old_file() #获取到配置文件路径 confpath = getConfPath() #调用getCmds获取解析kpi文件的命令 cmds = getCmds.Get_Cmds(confpath).getcmds() #从原始指标文件提取有用的数据 AbstractKPI(cmds).abstract_kpi() #将result目录下的解析后的kpi文件打包 tar_file = to_tar() writeLog("INFO",r">>>>> 指标数据提取并打包完成") scp_tar(tar_file)
def to_stop_subprocess(self, flag, popen): curr_tcpnum = self.getLinkNum.getLinkingNumber(SERVERS_D) self.tcpRecord.recordData([ "srs&nginx Linking", "%s %s %s" % tuple(SERVERS_D.values()), "Time(s) Numbers" ]) self.tcpRecord.recordData(self.getStr(self.TCP_COUNT)) if flag is '1': loops = 0 while True: if sum(curr_tcpnum) <= sum(self.TCP_COUNT): if loops == 15: #15s内当前连接数小于初始化连接数,退出程序 #删除还存在于系统中的sar和iostat进程 names = ['sar', 'iostat'] cmd = "killall -9 %s %s" % tuple(names) subprocess.call(cmd, shell=True) #终止子进程 popen.kill() if subprocess.Popen.poll(popen) is not None: break else: writeLog("INFO", r">>>>> 等待子进程终止") else: loops += 5 time.sleep(5) else: loops = 0 time.sleep(INTERVAL_TIME) #等待INTERVAL_TIME时间 curr_tcpnum = self.getLinkNum.getLinkingNumber(SERVERS_D) self.tcpRecord.recordData(self.getStr(curr_tcpnum)) writeLog("INFO", r">>>>> 性能指标采集完成") else: while True: if subprocess.Popen.poll(popen) is not None: break else: writeLog("INFO", r">>>>> 等待子进程终止") writeLog("INFO", r">>>>> 性能指标采集完成")
def serverMemoryCollect(servers,intervaltime,tcpNum,getLinkObj): getLinkNum = getLinkObj memRecord = Record_Data("res/%s" %(servers[1]+":"+servers[0])) cmd = "ps -ef | grep %s | grep -v grep | awk \'{print $2}\'" %servers[1] (status_code, lines) = subprocess.getstatusoutput(cmd) writeLog("INFO",">>>>> %s 指标采集进程执行中....." %servers[1]) if not lines: return -1 pids = lines.split("\n") heard = [servers[1],'used','Linking_Number Memory_Capacity(MB)'] try: memRecord.recordData(heard) curr_tcpN = sum(getLinkNum.getLinkingNumber(servers[0])) loops = 0 while True: vrss = [] for pid in pids: cmd2 = "cat /proc/%s/status | grep VmRSS | awk \'{print $2}\'" %(pid) (status_code, result) = subprocess.getstatusoutput(cmd2) vrss.append(int(result)) memRecord.recordData([str(sum(vrss)/1024)]) if curr_tcpN <= tcpNum: if loops == 15: #15s之内,当前连接数小于初始化连接数,程序退出 break else: loops += 5 time.sleep(5) else: loops = 0 time.sleep(intervaltime) curr_tcpN = sum(getLinkNum.getLinkingNumber(servers[0])) writeLog("INFO",r">>>>> %s 进程内存采集完成" %servers[1]) except IOError as err: writeLog("INFO","File error: " + str(err)) return 0
def main_start(self): start_times = 0.0 timeRecord = Record_Data("res/timeConsum") for server, num in zip(SERVERS_D.values(), self.TCP_COUNT): writeLog("INFO", r">>>>> 初始 %s 服务连接数 %d" % (server, num)) curr_tcpN = self.getLinkNum.getLinkingNumber(SERVERS_D) time.sleep(INTERVAL_TIME) while True: if not sum(curr_tcpN) <= sum(self.TCP_COUNT): start_times = time.time() global g_starttime g_starttime = start_times for server, num in zip(SERVERS_D.values(), curr_tcpN): writeLog("INFO", r">>>>> 指标采集任务开始,当前 %s 连接数 %d" % (server, num)) #删除旧的kpi文件 del_old_file.Del_Old_File("res/").del_old_file() #单独线程执行其他服务(srs、nginx等)进程内存指标采集任务 for port, server in SERVERS_D.items(): multiprocessing.Process(target=serverMemoryCollect, args=([port, server], INTERVAL_TIME, sum(self.TCP_COUNT), self.getLinkNum)).start() #采集服务器系统kpi指标 self.sys_kpi_collect() writeLog("INFO", r">>>>> 性能数据采集结束!") time_consum = time.time() - start_times timeRecord.recordData([str(time_consum)]) break else: time.sleep(1) curr_tcpN = self.getLinkNum.getLinkingNumber(SERVERS_D)
pyb.repl_uart(uart) #sw = pyb.Switch() rtc = pyb.RTC() def usr_pressed_check(): global usr_pressed usr_pressed = True #sw.callback(usr_pressed_check) while True: current_temp = hdc_temp() current_hum = hdc_hum() print_to_display(current_temp, current_hum) while True: new_temp = hdc_temp() new_hum = hdc_hum() if new_temp > current_temp + 1 or new_temp < current_temp - 1: current_temp = new_temp current_hum = new_hum print_to_display(current_temp, current_hum) writeLog(rtc, new_temp, new_hum) utime.sleep_ms(1000) #if usr_pressed: #read_sensors() #usr_pressed = False #if lcd.is_touched(): #read_sensors()