Exemple #1
0
    def fetch_mem(self, lines):
        """
        获取 mem 的关键数据包括: 纯物理内存使用率, 包含虚拟内存的内存使用率(无则为0)
        :param lines: 带 mem 关键数据行
        """
        # 解析数据错误行数
        error_num = 0
        mem_sum = float(0)
        mem_virtual_sum = float(0)
        for line in lines:
            mems = line.split(",")
            if len(mems) == 17:
                # (Memtotal - Memfree - cached - buffers)/Memtotal  * 100
                mem_sum += ((float(mems[2]) - float(mems[6]) -
                             float(mems[11]) - float(mems[14])) /
                            float(mems[2]) * 100)
            elif len(mems) == 8:
                # (Real total - Real free)/Real total * 100
                mem_sum += ((float(mems[6]) - float(mems[4])) /
                            float(mems[6]) * 100)
                # (Real total - Real free + Virtual total - Virtual free) /(Real total + Virtual total) * 100
                mem_virtual_sum += ((float(mems[6]) - float(mems[4]) +
                                     float(mems[7]) - float(mems[5])) /
                                    (float(mems[6]) + float(mems[7])) * 100)
            else:
                logger.error("解析服务器ip为 %s 的 %s 监控文件的 MEM 数据出现异常,出现异常行数据为:%s" %
                             (self.ip, self.name, line))
                error_num += 1
                continue

        self.mem = (round(mem_sum / (len(lines) - error_num), 2),
                    round(mem_virtual_sum / (len(lines) - error_num), 2))
        logger.debug("mem: 不含虚拟内存的使用率 %.2f%%, 包含虚拟内存的使用率 %.2f%%" %
                     (self.mem[0], self.mem[1]))
Exemple #2
0
def check_dir(path):
    logger.debug("检查下载路径是否存在")
    if not os.path.exists(path):
        logger.info("下载路径不存在,创建下载路径")
        os.makedirs(path)

    logger.debug("下载路径检查完成")
Exemple #3
0
    def start_nmon_control(self, config, filename):
        """
        开启后台监控
        :param config:config 对象
        :param filename: nmon 文件名
        :return:
        """
        if not hasattr(self, "ssh"):
            raise CustomError("未与服务端进行连接")

        stdin, stdout, stderr = self.ssh.exec_command("ls -dl " +
                                                      self.server_name)
        if stdout.channel.recv_exit_status():
            stdin, stdout, stderr = self.ssh.exec_command("mkdir " +
                                                          self.server_name)

            if stdout.channel.recv_exit_status():
                raise CustomError(stderr.read().decode('utf-8'))

        nmon_filename = filename + ".nmon"
        nmon_cmd = (self.path + "/nmon -F ./" + self.server_name + "/" +
                    nmon_filename + " -t -s " +
                    config.nmon_acquisition_interval + " -c " +
                    config.nmon_all_time)

        logger.debug("正在开启" + self.server_name + "监控,监控结果文件名为:" +
                     nmon_filename)
        logger.debug("监控命令 %s" % nmon_cmd)
        stdin, stdout, stderr = self.ssh.exec_command(nmon_cmd)

        if stdout.channel.recv_exit_status():
            err_msg = stderr.read().decode("utf-8")
            raise CustomError(err_msg)
Exemple #4
0
    def download_nmon_files(self, config):
        if not hasattr(self, "ssh"):
            raise CustomError("未与服务端进行连接")

        download_local_path = config.download_local_path + os.path.sep + self.server_name + os.path.sep + self.taskid
        if not os.path.exists(download_local_path):
            logger.info("正在创建文件夹" + self.server_name)
            os.mkdir(download_local_path)

        trans = self.ssh.get_transport()
        sftp = paramiko.SFTPClient.from_transport(trans)
        files = sftp.listdir_attr(self.path + "/" + self.server_name + "/" +
                                  self.taskid)

        logger.info("开始下载" + self.server_name + "监控文件")
        for file in files:
            logger.debug("正在下载:" + file.filename)
            sftp.get(
                self.path + "/" + self.server_name + "/" + self.taskid + "/" +
                file.filename, download_local_path + "\\" + file.filename)
            self.file_list.append(download_local_path + "\\" + file.filename)
        trans.close()
        # --add 20200515 报告显示顺序存在乱序的情况, 在保存完所有的文件路径后, 进行排序
        self.file_list.sort()
        logger.info("%s 监控文件下载完成, 文件保存在 %s" %
                    (self.server_name, download_local_path))
Exemple #5
0
 def close(self):
     """
     关闭后台连接
     """
     if not hasattr(self, "ssh"):
         raise CustomError("未与服务端进行连接")
     logger.debug("正在关闭" + self.server_name + "的连接")
     self.ssh.close()
Exemple #6
0
def exe_command(command):
    logger.debug("正在执行命令:"+command)
    result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if not result.returncode:
        logger.info(result.stdout.decode("gbk"))
    else:
        # 调用 lr 时,会抛出一个 log4cxx 的异常, 但是脚本正常跑完,结果保存成功,此异常暂时忽略
        err_msg = result.stderr.decode('gbk')
        if not err_msg.find("log4cxx") >= 0:
            raise CustomError(err_msg)
 def fetch_cpu(self, lines):
     """
     :param lines: 带 cpu 关键数据行
     """
     cpu_sum = float(0)
     for line in lines:
         cpus = line.split(",")
         # sys% datas[2] user datas[3]
         # total = sys + user
         cpu_sum += (float(cpus[3]) + float(cpus[2]))
     self.cpu = round(cpu_sum / len(lines), 2)
     logger.debug("cpu: %.2f%%" % self.cpu)
Exemple #8
0
 def connect(self, user, passwd):
     """
     连接后台 server
     :param user:     用户名
     :param passwd:   密码
     """
     logger.debug("正在与" + self.server_name + "建立连接")
     ssh_client = paramiko.SSHClient()
     ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     ssh_client.connect(hostname=self.server_name,
                        port=self.SSH_PORT,
                        username=user,
                        password=passwd)
     self.ssh = ssh_client
Exemple #9
0
def get_all_script(script_path, file_extension):
    """
    获取当前文件夹下所有指定后缀文件
    :param script_path:    文件夹路径
    :param file_extension: 文件类型
    :return:返回脚本文件列表
    """
    script_files = []
    if not os.path.exists(script_path):
        raise CustomError("路径错误,文件夹不存在")

    files = os.listdir(script_path)
    logger.debug("当前路径" + script_path + "下所有文件与文件夹")
    logger.debug(files)
    for file in files:
        if not os.path.isfile(script_path + "\\" + file):
            continue
        if os.path.splitext(file)[1] == file_extension:
            script_files.append(os.path.splitext(file)[0])

    if not script_files.__len__():
        raise CustomError("路径下无后缀为%s的脚本文件" % file_extension)

    logger.debug("所有脚本文件")
    logger.debug(script_files)
    return script_files
Exemple #10
0
def analyse_lr_cmd(files_path):
    """
    生成lr解析命令
    """
    cmd_anaylise_list = []
    cmd = r'wlrun -TestPath '
    # cmd_analyse = r'C:\"Program Files (x86)"\HP\LoadRunner\bin\AnalysisUI -RESULTPATH '
    cmd_analyse = r'AnalysisUI -RESULTPATH '
    for file_path in files_path:
        command_analyse = cmd_analyse + file_path + ".lrr -TEMPLATENAME html"
        cmd_anaylise_list.append(command_analyse)

    logger.debug("生成的 lr 解析命令")
    logger.debug(cmd_anaylise_list)
    return cmd_anaylise_list
Exemple #11
0
    def fetch_resp_time(self, file_path, resp_avg_list, resp_min_list,
                        resp_max_list):
        """
        提取 response time html 中 各 response time 的值
        :param file_path: response time html 绝对路径
        :param resp_avg_list: 保存 response time average 值
        :param resp_min_list: 保存 response time min 值
        :param resp_max_list: 保存 response time max 值
        """
        logger.debug("%s 开始提取 response time 数据" % self.name)
        with open(file_path, "r", encoding='utf8') as response_time_html_file:
            response_time_str = response_time_html_file.read()
            response_time_table_list = re.findall(
                r'<tr class="legendRow">([\s\S]*?)</tr>', response_time_str)

            if not response_time_table_list:
                raise CustomError("%s 未匹配到 response time 数据" % self.name)

            logger.debug("%s 共匹配到 %d 条 response time 记录" %
                         (self.name, len(response_time_table_list)))
            for index in range(0, len(response_time_table_list)):
                response_time_table_str = response_time_table_list[
                    index].replace("\n", "")
                response_time_data_list = response_time_table_str.split(
                    "<td>", 6)

                trasaction_name = response_time_data_list[2][:-5]
                # 单位转化为 ms
                response_time_average = round(
                    float(response_time_data_list[4][:-5]) * 1000, 2)
                logger.debug(
                    "%s 交易 transcation %s response time average: %.2fms" %
                    (self.name, trasaction_name, response_time_average))
                resp_avg_list.append(response_time_average)

                response_time_min = round(
                    float(response_time_data_list[3][:-5]) * 1000, 2)
                logger.debug("%s 交易 transcation %s response time min: %.2fms" %
                             (self.name, trasaction_name, response_time_min))
                resp_min_list.append(response_time_min)

                response_time_max = round(
                    float(response_time_data_list[5][:-5]) * 1000, 2)
                logger.debug("%s 交易 transcation %s response time max: %.2fms" %
                             (self.name, trasaction_name, response_time_max))
                resp_max_list.append(response_time_max)
Exemple #12
0
def get_all_script_path(file_path, file_extension):
    """
    获取当前文件夹下所有制定后缀文件
    :param file_path:       文件夹路径
    :param file_extension:  文件类型
    :return:返回脚本文件全路径列表
    """
    files_path = []
    for root, dirs, files in os.walk(file_path):
        for file in files:
            logger.debug("文件名:"+file)
            if os.path.splitext(file)[1] == file_extension:
                file_path = os.path.join(root, file)
                logger.info("含有"+file_extension+"后缀的文件:"+file+",全路径为:"+file_path)
                files_path.append(os.path.splitext(file_path)[0])

    return files_path
Exemple #13
0
 def fetch_cpu(self, lines):
     """
     :param lines: 带 cpu 关键数据行
     """
     # 解析数据错误行数
     error_num = 0
     cpu_sum = float(0)
     for line in lines:
         cpus = line.split(",")
         # sys% datas[2] user datas[3]
         # total = sys + user
         try:
             cpu_sum += (float(cpus[3]) + float(cpus[2]))
         except Exception:
             logger.error("解析服务器ip为 %s 的 %s 监控文件的 cpu 数据出现异常,出现异常行数据为:%s" %
                          (self.ip, self.name, line))
             error_num += 1
     self.cpu = round(cpu_sum / (len(lines) - error_num), 2)
     logger.debug("cpu: %.2f%%" % self.cpu)
Exemple #14
0
def check_exe():
    '''
    检查jmeter是否在运行, 正在运行则退出
    :return:
    '''

    command = "tasklist | findstr java.exe"
    result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    if result.returncode == 1:
        if result.stderr:
            raise CustomError(result.stderr.decode('gbk'))
    elif result.returncode == 0:
        command_result_str = result.stdout.decode('gbk')
        logger.debug("命令 %s 执行结果 %a" % (command, command_result_str))
        command_result_list = command_result_str.split(os.linesep)
        logger.debug(command_result_list)
        for command_result in command_result_list:
            if command_result != '':
                pid = command_result.split()[1]
                find_jemeter = "jstack %s" % pid
                result_jm = subprocess.run(find_jemeter, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

                if result_jm.returncode == 0:
                    if "jmeter" in result_jm.stdout.decode('gbk'):
                        raise CustomError("jmeter 程序正在运行, 请关闭 jmeter 再开启脚本")
                    else:
                        logger.debug("jmeter不在运行运行")
                else:
                    if result_jm.stderr:
                        raise CustomError(result_jm.stderr.decode('gbk'))
Exemple #15
0
    def fetch_tps(self, file_path, tps_list):
        """
        提取 tps html 中 tps 的值
        :param file_path: tps html 绝对路径
        :param tps_list: 保存 tps 值的 list
        """
        logger.debug("%s 开始提取 tps 数据" % self.name)
        with open(file_path, "r", encoding='utf8') as tps_html_file:
            tps_str = tps_html_file.read()
            tps_table_list = re.findall(
                r'<tr class="legendRow">([\s\S]*?)</tr>', tps_str)

            if not tps_table_list:
                raise CustomError("%s 未匹配到 tps 数据" % self.name)

            logger.debug("%s 共匹配到 %d 条tps记录" %
                         (self.name, len(tps_table_list)))
            for index in range(0, len(tps_table_list)):
                tps_table_str = tps_table_list[index].replace("\n", "")
                tps_data_list = tps_table_str.split("<td>", 5)

                # 判断是否为成功记录,成功记录提取数据, 失败记录跳过
                if tps_data_list[2][:-5].split(":")[1] != "Pass":
                    continue

                logger.debug("%s 交易 transaction %s tps %s" %
                             (self.name, tps_data_list[2][:-5].split(":")[0],
                              tps_data_list[4][:-5]))

                tps_list.append(tps_data_list[4][:-5])
Exemple #16
0
    def file_analyse(self, file):
        """
        解析jmeter报告
        :param file: jmeter报告所在目录
        """
        try:
            logger.info("开始解析%s jmeter结果文件" % os.path.basename(file))
            super().file_analyse(file)
            file_all_path = file + r"\content\js\dashboard.js"
            with open(file_all_path, "r", encoding="utf8") as jmeterfile:
                text = jmeterfile.read()
                static_data_match_result = re.match(
                    r'[\s\S]*statisticsTable"\),(.*?), function', text)

                if static_data_match_result is not None:
                    static_json_data = static_data_match_result.group(
                        1).strip()
                    logger.debug("取到 %s 的压测结果数据为: %s" %
                                 (os.path.basename(file), static_json_data))
                    static_data = json.loads(static_json_data)
                    logger.debug("转化成json格式:%s" % static_data)

                    if "items" not in static_data.keys():
                        raise CustomError("%s获取压测结果失败,提取到的数据中未找到item标签" %
                                          os.path.basename(file))

                    static_items_data = static_data["items"]
                    logger.debug("提取到的数据为: %s" % static_items_data)
                    for static_item_data in static_items_data:
                        tmp_data = static_item_data['data']
                        # list: [Transaction, TPS, Error%, Response Time(average), Response Time(min), Response Time(max)]
                        tmp_list = [
                            tmp_data[1],
                            round(float(tmp_data[10]), 2), tmp_data[3],
                            round(float(tmp_data[4]), 2),
                            round(float(tmp_data[5]), 2),
                            round(float(tmp_data[6]), 2)
                        ]
                        # dict: {name:list}
                        self.result_dict[tmp_data[0]] = tmp_list

                    logger.debug("%s 提取结果 %s" %
                                 (os.path.basename(file), self.result_dict))

                else:
                    raise CustomError("%s获取压测结果失败,未找到匹配数据" %
                                      os.path.basename(file))
        finally:
            logger.info("%s jmeter 结果文件解析结束" % os.path.basename(file))
Exemple #17
0
    def fetch_mem(self, lines):
        """
        获取 mem 的关键数据包括: 纯物理内存使用率, 包含虚拟内存的内存使用率(无则为0)
        :param lines: 带 mem 关键数据行
        """
        mem_sum = float(0)
        mem_virtual_sum = float(0)
        for line in lines:
            mems = line.split(",")
            if len(mems) == 17:
                # (Memtotal - Memfree - cached - buffers)/Memtotal  * 100
                mem_sum += ((float(mems[2]) - float(mems[6]) - float(mems[11]) - float(mems[14])) / float(
                    mems[2]) * 100)
            elif len(mems) == 8:
                # (Real total - Real free)/Real total * 100
                mem_sum += ((float(mems[6]) - float(mems[4])) / float(mems[6]) * 100)
                # (Real total - Real free + Virtual total - Virtual free) /(Real total + Virtual total) * 100
                mem_virtual_sum += ((float(mems[6]) - float(mems[4]) + float(mems[7]) - float(mems[5])) / (
                            float(mems[6]) + float(mems[7])) * 100)
            else:
                raise CustomError("暂不支持此内存页面数据读取")

        self.mem = (round(mem_sum / len(lines), 2), round(mem_virtual_sum / len(lines), 2))
        logger.debug("mem: 不含虚拟内存的使用率 %.2f%%, 包含虚拟内存的使用率 %.2f%%" % (self.mem[0], self.mem[1]))
Exemple #18
0
def jmeter_cmd(script_file, path):
    """
    获取路径生成执行脚本命令
    """
    cmd_list = []
    result_file_list = []
    # cmd = r"D:\JMeter\apache-jmeter-5.1.1\bin\jmeter -n -t "
    cmd = r"jmeter -n -t "
    for file in script_file:
        command = cmd + path + os.path.sep + file + ".jmx" + " -l " + path + os.path.sep + file + ".jtl -e -o " \
                  + path + os.path.sep + file
        cmd_list.append(command)
        result_file_list.append(path + os.path.sep + file)

    logger.debug("生成的 jmeter 命令")
    logger.debug(cmd_list)
    logger.debug(("jmeter结果文件保存路径"))
    logger.debug(result_file_list)
    return cmd_list, result_file_list
Exemple #19
0
def get_all_script(script_path, file_extension):
    """
    获取当前文件夹下所有指定后缀文件
    :param script_path:    文件夹路径
    :param file_extension: 文件类型
    :return:返回脚本文件列表
    :return 脚本路径
    """
    # 如果是一个文件,判断后缀是否合法后,返回
    if os.path.isfile(script_path):
        file_and_path = os.path.split(script_path)
        file = file_and_path[1]
        if os.path.splitext(file)[1] == file_extension:
            return [os.path.splitext(file)[0]], file_and_path[0]
        else:
            raise CustomError("检查到文件后缀与脚本类型不符, 预期脚本类型为: %s" % file_extension)

    script_files = []
    if not os.path.exists(script_path):
        raise CustomError("路径错误,文件夹或者文件不存在: %s" % script_path)

    files = os.listdir(script_path)
    logger.debug("当前路径" + script_path + "下所有文件与文件夹")
    logger.debug(files)
    for file in files:
        if not os.path.isfile(script_path + "\\" + file):
            continue
        if os.path.splitext(file)[1] == file_extension:
            script_files.append(os.path.splitext(file)[0])

    if not script_files.__len__():
        raise CustomError("路径下无后缀为%s的脚本文件" % file_extension)

    logger.debug("所有脚本文件")
    logger.debug(script_files)
    return script_files, script_path
Exemple #20
0
    def _change_to_load_table(self, result_list):
        """
        将压测结果转化成 html 中的 table 返回
        :param result_list:需要转化的压测list
        :return: str table str
        """
        logger.info("开始将压测报告数据转化成 table")
        html_str = """
        <h1>summary</h1>
        <table border="1">
         <tr>
            <th>script name</th>
            <th>trasaction name</th>
            <th>trasaction number</th>
            <th>tps</th>
            <th>error%</th>
            <th>response time(average) ms</th>
            <th>response time(min) ms</th>
            <th>response time(max) ms</th>
        </tr>
        """
        for result in result_list:
            keys_dict = result.result_dict.keys()
            keys = list(keys_dict)
            if len(keys) == 0:
                raise CustomError("%s 脚本提取数据异常,无法获取到取样器" % result.name)

            logger.debug('%s 含有 transaction %s' % (result.name, keys))
            result_value_one = result.result_dict[keys[0]]
            summary_html_one = """
                  <tr>
                    <td rowspan= '%d'>%s</td>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s%%</td>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s</td>
                </tr>
                """ % (len(keys), result.name, keys[0], result_value_one[0],
                       result_value_one[1], result_value_one[2],
                       result_value_one[3], result_value_one[4],
                       result_value_one[5])

            if len(keys) == 1:
                html_str += summary_html_one
                continue

            for key_index in range(1, len(keys)):
                result_value = result.result_dict[keys[key_index]]
                summary_html = """
                <tr>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s%%</td>
                    <td>%s</td>
                    <td>%s</td>
                    <td>%s</td>              
                </tr>
                """ % (keys[key_index], result_value[0], result_value[1],
                       result_value[2], result_value[3], result_value[4],
                       result_value[5])
                summary_html_one += summary_html

            html_str += summary_html_one

        return html_str + "</table>"
Exemple #21
0
    def fetch_disk(self, lines):
        """
        获取 disk 的关键数据包括: disk-read(KB/S),disk-write(KB/S),io(io/s),disk-busy(%)
        :param lines: 带 disk 关键数据行
        """
        # 累加和
        diskread_sum = float(0)
        diskwrite_sum = float(0)
        diskio_sum = float(0)

        # diskbusy 每列均值
        diskbusy_avg = []

        # diskbusy 最大值
        diskbusy_max = float(0)

        # 次数统计
        diskread_num = 0
        diskwrite_num = 0
        diskio_num = 0
        diskbusy_num = 0
        for line in lines:
            disks = line.split(",")
            if "DISKREAD,T" in line:
                # diskread
                disk_read_line_sum = float(0)
                # 统计每行之和
                for diskread_index in range(2, len(disks)):
                    disk_read_line_sum += float(disks[diskread_index])
                # 累加
                diskread_sum += disk_read_line_sum
                # 计算总行数
                diskread_num += 1
            elif "DISKWRITE,T" in line:
                # diskwrite
                disk_write_line_sum = float(0)
                # 统计每行之和
                for diskwrite_index in range(2, len(disks)):
                    disk_write_line_sum += float(disks[diskwrite_index])
                # 累加
                diskwrite_sum += disk_write_line_sum
                # 计算总行数
                diskwrite_num += 1
            elif "DISKXFER,T" in line:
                # 每秒 IO 操作次数
                disk_io_line_sum = float(0)
                # 统计每行之和
                for diskio_index in range(2, len(disks)):
                    disk_io_line_sum += float(disks[diskio_index])
                # 累加
                diskio_sum += disk_io_line_sum
                # 计算总行数
                diskio_num += 1
            elif "DISKBUSY,T" in line:
                # 获取 busi 每列初始值
                if len(diskbusy_avg) == 0:
                    for disk_busy_line_index in range(2, len(disks)):
                        diskbusy_avg.append(float(disks[disk_busy_line_index]))
                else:
                    diskbusy_num += 1
                    # 计算 busi 每列均值
                    for disk_busy_line_index in range(2, len(disks)):
                        diskbusy_avg[disk_busy_line_index - 2] = (
                            float(diskbusy_avg[disk_busy_line_index - 2]) *
                            diskbusy_num + float(disks[disk_busy_line_index])
                        ) / (diskbusy_num + 1)

        # 获取 busi 最大列的均值
        for disk_busy in diskbusy_avg:
            if disk_busy > diskbusy_max:
                diskbusy_max = disk_busy

        self.disk = (round(diskread_sum / diskread_num,
                           2), round(diskwrite_sum / diskwrite_num,
                                     2), round(diskio_sum / diskio_num,
                                               2), round(diskbusy_max, 2))
        logger.debug(
            "disk: diskread %.2f, diskwrite %.2f, diskio %.2f, diskbusy %.2f%%"
            % (self.disk[0], self.disk[1], self.disk[2], self.disk[3]))
Exemple #22
0
    def fetch_net(self, lines):
        """
        获取 net read(KB/s) 和 write(KB/s) 均值
        :param lines:包含 net 关键数据的行
        :return:
        """
        # read 列索引
        net_read_index = []
        # write 列索引
        net_write_index = []
        # 所有 raad 列均值
        net_read = []
        # 所有 write 列均值
        net_write = []
        # read 列均值最大值
        net_read_max = float(0)
        # write 列均值最大值
        net_write_max = float(0)

        for line in lines:
            disks = line.split(",")
            if not "NET,T" in line:
                for net_name_index in range(2, len(disks)):
                    net_name = disks[net_name_index]
                    # 获取 read 所在列
                    if "read" in net_name:
                        avg_read = 0
                        net_read_index.append(net_name_index)
                    # 获取 write 所在列
                    elif "write" in net_name:
                        avg_write = 0
                        net_write_index.append(net_name_index)
            else:
                # 获取每个 read 列的均值
                if not len(net_read_index) == 0:
                    avg_read += 1
                    net_read_len_index = 0
                    for net_read_num_index in net_read_index:
                        if avg_read == 1:
                            net_read.append(float(disks[net_read_num_index]))
                        else:
                            net_read[net_read_len_index] = (
                                float(net_read[net_read_len_index]) *
                                (avg_read - 1) +
                                float(disks[net_read_num_index])) / avg_read
                            net_read_len_index += 1
                # 获取每个 write 列的均值
                if not len(net_write_index) == 0:
                    avg_write += 1
                    net_write_len_index = 0
                    for net_write_num_index in net_write_index:
                        if avg_write == 1:
                            net_write.append(float(disks[net_write_num_index]))
                        else:
                            net_write[net_write_len_index] = (
                                float(net_write[net_write_len_index]) *
                                (avg_write - 1) +
                                float(disks[net_write_num_index])) / avg_write
                            net_write_len_index += 1

        for net_read_avg in net_read:
            if net_read_avg > net_read_max:
                net_read_max = net_read_avg

        for net_write_avg in net_write:
            if net_write_avg > net_write_max:
                net_write_max = net_write_avg

        self.net = (round(net_read_max, 2), round(net_write_max, 2))
        logger.debug("net: 网络读取最大值 %.2f, 网络写入最大值 %.2f" %
                     (self.net[0], self.net[1]))
Exemple #23
0
def lr_cmd(script_file, path):
    """
    获取路径生成执行脚本命令
    """
    cmd_list = []
    cmd_anaylise_list = []
    result_file_list = []
    # cmd = r'C:\"Program Files (x86)"\HP\LoadRunner\bin\wlrun -TestPath  '
    cmd = r'wlrun -TestPath '
    # cmd_analyse = r'C:\"Program Files (x86)"\HP\LoadRunner\bin\AnalysisUI -RESULTPATH '
    cmd_analyse = r'AnalysisUI -RESULTPATH '
    for file in script_file:
        command = cmd + path + os.path.sep + file + ".lrs" + " -Run -ResultName " + path + os.path.sep + file
        command_analyse = cmd_analyse + path + os.path.sep + file + os.path.sep + file + ".lrr -TEMPLATENAME html"
        cmd_list.append(command)
        cmd_anaylise_list.append(command_analyse)
        result_file_list.append(path + os.path.sep + file)

    logger.debug("生成的 lr 命令")
    logger.debug(cmd_list)
    logger.debug("生成的 lr 解析命令")
    logger.debug(cmd_anaylise_list)
    logger.debug("loadrunner 结果文件保存路径")
    logger.debug(result_file_list)
    return cmd_list, cmd_anaylise_list, result_file_list
Exemple #24
0
    def file_analyse(self, file):
        """
        解析 Loadrunner 报告
        :param file: loadrunner 报告所在路径
        """
        try:
            logger.info("开始解析 %s loadrunner 报告" % os.path.basename(file))

            super().file_analyse(file)

            tps_list = []
            resp_avg_list = []
            resp_min_list = []
            resp_max_list = []

            summary_html_path = file + r'\An_Report1\summary.html'
            content_html_path = file + r'\An_Report1\contents.html'

            with open(summary_html_path, "r",
                      encoding='utf8') as summary_html_file:
                summary_str = summary_html_file.read()
                transaction_name_list = re.findall(
                    r'headers="LraTransaction Name".*?8">(.*?)</td>',
                    summary_str)
                logger.debug(
                    "trasaction_name_list is None: %s" %
                    str(False if
                        (transaction_name_list is not None) else True))
                pass_list = re.findall(r'headers="LraPass".*?8">(.*?)</td>',
                                       summary_str)
                logger.debug("pass_list is None: %s" %
                             str(False if (pass_list is not None) else True))
                fail_list = re.findall(r'headers="LraFail".*?8">(.*?)</td>',
                                       summary_str)
                logger.debug("fail_list is None: %s" %
                             str(False if (fail_list is not None) else True))

            if not pass_list or not fail_list or not transaction_name_list:
                raise CustomError("%s 有未匹配到的数据" % self.name)

            # TPS 从 TPS html 页面中获取, 先从 contents.html 获取到 TPS html 名称
            # Respnse Time 从 Response Time html 页面中获取,先从 contents.html 获取到 Response Time html 名称
            with open(content_html_path, "r",
                      encoding='utf8') as content_html_file:
                content_str = content_html_file.read()
                tps_html_name_match = re.match(
                    r'[\s\S]*href="(.*?)" Target.*?>Transactions per Second',
                    content_str)
                response_time_html_name_match = re.match(
                    r'[\s\S]*href="(.*?)" Target.*?>Average Transaction Response Time',
                    content_str)

                if tps_html_name_match is None:
                    raise CustomError("%s 未找到 tps html 报告" % self.name)
                elif response_time_html_name_match is None:
                    raise CustomError("%s 未找到 Respnse Time html 报告" %
                                      self.name)

                tps_html_name = tps_html_name_match.group(1)
                logger.debug("%s tps html name %s " %
                             (os.path.basename(file), tps_html_name))
                tps_html_path = file + r'\An_Report1' + os.path.sep + tps_html_name
                logger.debug("%s tps html path %s " %
                             (os.path.basename(file), tps_html_path))
                response_time_html_name = response_time_html_name_match.group(
                    1)
                logger.debug("%s response time html name %s" %
                             (os.path.basename(file), response_time_html_name))
                response_time_html_path = file + r'\An_Report1' + os.path.sep + response_time_html_name
                logger.debug("%s response time html path %s" %
                             (os.path.basename(file), response_time_html_path))

            self.fetch_tps(tps_html_path, tps_list)
            self.fetch_resp_time(response_time_html_path, resp_avg_list,
                                 resp_min_list, resp_max_list)

            # 长整数取到的数字带有逗号,例如1024是1,024,在取数字时,先将逗号去掉
            for index in range(0, len(transaction_name_list)):
                transaction_name = transaction_name_list[index]
                logger.debug("transaction name %s" % transaction_name)
                tps = tps_list[index]
                logger.debug("tps %s" % tps)
                pass_tsc = pass_list[index].replace(",", "")
                logger.debug("pass transaction: %s" % pass_tsc)
                fail_tsc = fail_list[index].replace(",", "")
                logger.debug("fail transaction: %s" % fail_tsc)
                # 时间转化成 ms 单位
                resp_avg = resp_avg_list[index]
                logger.debug("resp average time : %sms" % resp_avg)
                resp_max = resp_max_list[index]
                logger.debug("resp max time: %sms" % resp_max)
                resp_min = resp_min_list[index]
                logger.debug("resp min time: %sms" % resp_min)

                all_tsc = str(int(fail_tsc) + int(pass_tsc))
                error = round(int(fail_tsc) / int(all_tsc) * 100, 2)
                # list: [Transaction, TPS, Error%, Response Time(average), Response Time(min), Response Time(max)]
                data_list = [all_tsc, tps, error, resp_avg, resp_min, resp_max]
                # dict:{transaction name:list}
                self.result_dict[transaction_name] = data_list
        finally:
            logger.info("%s loadrunner 报告解析结束" % os.path.basename(file))