Exemplo n.º 1
0
    def run_client_command(self, cmd_sec_name, tmp_logfile, bench_name):
        fp = open(tmp_logfile, "a+")
        start_log = "%%%%%%         %s test start       %%%%%% \n" % cmd_sec_name
        fp.write(start_log)
        fp.write("<<<BEGIN TEST>>>\n")
        tags = "[test: " + cmd_sec_name + "]\n"
        fp.write(tags)
        logs = "log: " + cmd_sec_name + "\n"
        fp.write(logs)
        fp.close()
        start = time.time()
        flag = 0
        logging.debug("the client running command is %s" % cmd_sec_name)

        try:
            logging.debug(
                "begining to execute the command of %s on remote host" %
                cmd_sec_name)
            fp = open(tmp_logfile, "a+")
            logging.debug("client command in localhost is: %s" % cmd_sec_name)
            # FIXME: update code for this condition
            [out, returncode] = self.run_commands(bench_name, cmd_sec_name)
            fp.close()
            server_utils.file_copy(tmp_logfile,
                                   '/tmp/%s_output.log' % bench_name, 'a+')
        except error.ServRunError, e:
            fp = open(tmp_logfile, "a+")
            fp.write("[status]: FAIL\n")
            sys.stdout.write(e)
            flag = -1
            fp.close()
Exemplo n.º 2
0
 def run_commands(self, bench_name, cmd_sec_name):
     returncode = -1
     output = ''
     pwd = os.getcwd()
     try:
         # the commands is multiple lines, and was included by Quotation
         try:
             logging.debug("the actual commands running in local is: %s" %
                           cmd_sec_name)
             test_case_dir = os.path.join(caliper_path.BENCHS_DIR,
                                          bench_name, 'tests')
             cmd_sec_name_tmp_file = os.path.join(Folder.workspace,
                                                  Folder.name,
                                                  cmd_sec_name + '_tmp.log')
             subprocess.call("echo '$$ %s RUN START' >> %s" %
                             (cmd_sec_name, cmd_sec_name_tmp_file),
                             shell=True)
             os.chdir(test_case_dir)
             result = subprocess.call(
                 'ansible-playbook -i %s %s.yml --extra-vars "hosts=%s" -u %s>> %s 2>&1'
                 % (Folder.project_config, cmd_sec_name, self.host,
                    getpass.getuser(), cmd_sec_name_tmp_file),
                 stdout=subprocess.PIPE,
                 shell=True)
             subprocess.call("echo '$$ %s RUN STOP' >> %s" %
                             (cmd_sec_name, cmd_sec_name_tmp_file),
                             shell=True)
             subprocess.call(
                 "echo '==================================' >> %s" %
                 (cmd_sec_name_tmp_file),
                 shell=True)
             server_utils.file_copy(Folder.caliper_run_log_file,
                                    cmd_sec_name_tmp_file, 'a+')
             if os.path.exists(cmd_sec_name_tmp_file):
                 os.remove(cmd_sec_name_tmp_file)
         except error.CmdError, e:
             raise error.ServRunError(e.args[0], e.args[1])
     except Exception, e:
         logging.debug(e)
Exemplo n.º 3
0
def parse_all_cases(kind_bench, bench_name, parser_file, dic, run_case_list):
    """
    function: parse one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        # get the config sections for the benchmrk
        bench_conf_file = os.path.join(kind_bench, 'main.yml')
        # get the config sections for the benchmrk
        pf = open(bench_conf_file, 'r')
        values = yaml.load(pf.read())
        sections_run = values[bench_name].keys()
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    parser_result_file = log_bench + "_parser.log"
    tmp_parser_file = log_bench + "_parser_tmp.log"
    if os.path.exists(parser_result_file):
        os.remove(parser_result_file)
    # for each command in run config file, read the config for the benchmark
    i = 0
    for section in sections_run:
        if section in run_case_list:
            dic[bench_name][section] = {}
            flag = 0
            try:
                parser = values[bench_name][section]['parser']
            except Exception:
                logging.debug("no value for the %s" % section)
                continue
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            # parser the result in the tmp_log_file, the result is the output of
            # running the command
            try:
                logging.debug("Parsering the result of command: %s" % section)
                outfp = open(logfile, 'r')
                infp = open(tmp_log_file, 'w')
                # infp.write(re.findall("test start\s+%+(.*?)%+\s+test_end", outfp.read(), re.DOTALL)[sections_run.index(section) - i])
                infp.write(re.findall(section + "\s+test start\s+%+(.*?)%+\s+test_end", outfp.read(), re.DOTALL)[-1])
                infp.close()
                outfp.close()
                parser_result = parser_case(bench_name, parser_file,
                                            parser, tmp_log_file,
                                            tmp_parser_file, 'parser')
                dic[bench_name][section]["type"] = type(parser_result)
                dic[bench_name][section]["value"] = parser_result
            except Exception, e:
                logging.info("Error while parsing the result of \" %s \""
                             % section)
                logging.info(e)
                if os.path.exists(tmp_parser_file):
                    os.remove(tmp_parser_file)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
            else:
                server_utils.file_copy(parser_result_file, tmp_parser_file, "a+")
                if os.path.exists(tmp_parser_file):
                    os.remove(tmp_parser_file)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                if (parser_result <= 0):
                    continue
        else:
            i += 1
            continue
Exemplo n.º 4
0
def parse_all_cases(target_exec_dir, target, kind_bench, bench_name, run_file,
                    parser_file, dic):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(caliper_path.config_files.tests_cfg_dir,
                                       kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
            bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    parser_result_file = log_bench + "_parser.log"
    tmp_parser_file = log_bench + "_parser_tmp.log"
    if os.path.exists(parser_result_file):
        os.remove(parser_result_file)
    #output_logs_names = glob.glob(Folder.exec_dir+"/*output.log")
    bench_test = "ltp"

    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        dic[bench_name][sections_run[i]] = {}

        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            scores_way = configRun.get(sections_run[i], 'scores_way')
            parser = configRun.get(sections_run[i], 'parser')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
        if bench_name == bench_test:
            subsection = sections_run[i].split(" ")[1]
            subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_parser_file):
            os.remove(tmp_parser_file)
        # parser the result in the tmp_log_file, the result is the output of
        # running the command

        try:
            logging.debug("Parsering the result of command: %s" % command)
            if bench_name == bench_test:
                outfp = open(tmp_parser_file, "w")
                outfp.write("%s" % (subsection))
                outfp.close()
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser,
                                            subsection_file, tmp_parser_file)
            else:
                outfp = open(logfile, 'r')
                infp = open(tmp_log_file, 'w')
                infp.write(
                    re.findall("test start\s+%+(.*?)%+\s+test_end",
                               outfp.read(), re.DOTALL)[i])
                infp.close()
                outfp.close()
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser, tmp_log_file,
                                            tmp_parser_file)
            dic[bench_name][sections_run[i]]["type"] = type(parser_result)
            dic[bench_name][sections_run[i]]["value"] = parser_result
        except Exception, e:
            logging.info("Error while parsing the result of \" %s \"" %
                         sections_run[i])
            logging.info(e)
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
        else:
            server_utils.file_copy(parser_result_file, tmp_parser_file, "a+")
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            if (parser_result <= 0):
                continue
Exemplo n.º 5
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name, run_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(caliper_path.config_files.tests_cfg_dir,
                                       kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
            bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    if os.path.exists(Folder.caliper_log_file):
        sections = bench_name + " EXECUTION"
        fp = open(Folder.caliper_log_file, "r")
        f = fp.readlines()
        fp.close()
        op = open(Folder.caliper_log_file, "w")
        for line in f:
            if not (sections in line):
                op.write(line)
        op.close()
    result = subprocess.call(
        "echo '$$ %s EXECUTION START: %s' >> %s" %
        (bench_name, str(starttime)[:19], Folder.caliper_log_file),
        shell=True)
    bench_test = "ltp"
    if bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run(
            "if [[ ! -e /mnt/caliper_nfs ]]; then mkdir -p /mnt/caliper_nfs; fi"
        )
        # fix me , now that we create the folder, why not we mount it directly here
        try:
            tar_mask = ".".join(tar_ip.split(".")[0:3])
            p1 = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE)
            p2 = subprocess.Popen(["grep", tar_mask],
                                  stdin=p1.stdout,
                                  stdout=subprocess.PIPE)
            p1.stdout.close()
            output, err = p2.communicate()
            output = output.strip()
            host_ip = output.split("inet addr:")[1].split(" ")[0]
        except Exception:
            logging.debug("Unable to get the host_ip")
        try:
            mount_cmd = target.run(
                "mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" %
                (host_ip))
        except Exception:
            try:
                umount_cmd = target.run("umount /mnt/caliper_nfs/")
                mount_cmd = target.run(
                    "mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" %
                    (host_ip))
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file = log_bench + "_README"
        resultltp = subprocess.call("touch %s" % (readme_file), shell=True)
        resultltp = subprocess.call(
            "echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
            % (readme_file),
            shell=True)
    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
        if bench_name == bench_test:
            subsection = sections_run[i].split(" ")[1]
            subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench, target,
                                      command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                move_logs = subprocess.call(
                    "cp /opt/caliper_nfs/ltp_log/* %s " % (Folder.exec_dir),
                    shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
                continue
            else:
                return result
        else:
            if bench_name == bench_test:
                move_logs = subprocess.call(
                    "cp /opt/caliper_nfs/ltp_log/* %s " % (Folder.exec_dir),
                    shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file, subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\"" %
                             command)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                        continue
                else:
                    return result
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
Exemplo n.º 6
0
    def run_all_cases(self, kind_bench, bench_name, run_case_list):
        """
        function: run one benchmark which was selected in the configuration files
        """
        try:
            # get the abspath, which is filename of run config for the benchmark
            bench_conf_file = os.path.join(kind_bench, 'main.yml')
            # get the config sections for the benchmrk
            pf = open(bench_conf_file, 'r')
            values = yaml.load(pf.read())
            sections_run = values[bench_name].keys()
        except AttributeError as e:
            raise AttributeError
        except Exception:
            raise
        logging.debug("the sections to run are: %s" % sections_run)
        if not os.path.exists(Folder.exec_dir):
            os.mkdir(Folder.exec_dir)
        log_bench = os.path.join(Folder.exec_dir, bench_name)
        logfile = log_bench + "_output.log"
        tmp_log_file = log_bench + "_output_tmp.log"
        if os.path.exists(logfile):
            os.remove(logfile)

        starttime = datetime.datetime.now()
        if os.path.exists(Folder.caliper_log_file):
            sections = bench_name + " EXECUTION"
            fp = open(Folder.caliper_log_file, "r")
            f = fp.readlines()
            fp.close()
            op = open(Folder.caliper_log_file, "w")
            for line in f:
                if not (sections in line):
                    op.write(line)
            op.close()
        result = subprocess.call(
            "echo '$$ %s EXECUTION START: %s' >> %s" %
            (bench_name, str(starttime)[:19], Folder.caliper_log_file),
            shell=True)
        # for each command in run config file, read the config for the benchmark
        for section in sections_run:
            if section in run_case_list:
                if self.num == 0:
                    config_files = os.path.join(
                        caliper_path.config_files.config_dir,
                        'cases_config.json')
                    fp = open(config_files, 'r')
                    case_list = yaml.load(fp.read())
                    for dimension in case_list:
                        for i in range(len(case_list[dimension])):
                            for tool in case_list[dimension][i]:
                                for case in case_list[dimension][i][tool]:
                                    if case == section:
                                        self.num = case_list[dimension][i][
                                            tool][case][-1]
                flag = 0

                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                # run the command of the benchmarks

                try:
                    for j in range(int(self.num)):
                        flag = self.run_client_command(section, tmp_log_file,
                                                       bench_name)
                except Exception, e:
                    logging.info(e)
                    crash_handle.main()
                    server_utils.file_copy(logfile, tmp_log_file, 'a+')
                    if os.path.exists(tmp_log_file):
                        os.remove(tmp_log_file)

                    run_flag = server_utils.get_fault_tolerance_config(
                        'fault_tolerance', 'run_error_continue')
                    if run_flag == 1:
                        continue
                    else:
                        return result
                else:
                    server_utils.file_copy(logfile, tmp_log_file, 'a+')
                    if flag != 1:
                        logging.info(
                            "There is wrong when running the command \"%s\"" %
                            section)

                        if os.path.exists(tmp_log_file):
                            os.remove(tmp_log_file)
                        crash_handle.main()

                        run_flag = server_utils.get_fault_tolerance_config(
                            'fault_tolerance', 'run_error_continue')
                        if run_flag == 1:
                            return result
                    if os.path.exists(tmp_log_file):
                        os.remove(tmp_log_file)
            else:
                continue

            endtime = datetime.datetime.now()
            subprocess.call(
                "echo '$$ %s EXECUTION STOP: %s' >> %s" %
                (section, str(endtime)[:19], Folder.caliper_log_file),
                shell=True)
            subprocess.call(
                "echo '$$ %s EXECUTION DURATION %s Seconds'>>%s" %
                (section,
                 (endtime - starttime).seconds, Folder.caliper_log_file),
                shell=True)
Exemplo n.º 7
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name, run_file,
                  parser_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(caliper_path.config_files.tests_cfg_dir,
                                       kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
            bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    parser_result_file = log_bench + "_parser.log"
    tmp_parser_file = log_bench + "_parser_tmp.log"
    if os.path.exists(parser_result_file):
        os.remove(parser_result_file)
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    result = subprocess.call(
        "echo '$$ %s EXECUTION START: %s' >> %s" %
        (bench_name, str(starttime)[:19], Folder.caliper_log_file),
        shell=True)
    bench_test = "ltp"
    if bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run("if [[ ! -e /mnt/ltp ]]; then mkdir -p /mnt/ltp; fi")
        # fix me , now that we create the folder, why not we mount it directly here
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        try:
            # fix me , getting host ip to be optimised
            s.connect(("8.8.8.8", 80))
        except Exception:
            logging.debug(
                "Socket connection failed during ltp pre-requisite check")
        host_ip = s.getsockname()[0]
        try:
            xyz = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/ltp" %
                             (host_ip))
        except Exception:
            try:
                xyz = target.run("umount /mnt/ltp/")
                xyz = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/ltp" %
                                 (host_ip))
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file = log_bench + "_README"
        resultltp = subprocess.call("touch %s" % (readme_file), shell=True)
        resultltp = subprocess.call(
            "echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
            % (readme_file),
            shell=True)

    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            scores_way = configRun.get(sections_run[i], 'scores_way')
            parser = configRun.get(sections_run[i], 'parser')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
        if bench_name == bench_test:
            subsection = sections_run[i].split(" ")[1]
            subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_parser_file):
            os.remove(tmp_parser_file)
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench, target,
                                      command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                xyz = subprocess.call("mv /opt/caliper_nfs/ltp_log/* %s " %
                                      (Folder.exec_dir),
                                      shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
                continue
            else:
                return result
        else:
            if bench_name == bench_test:
                xyz = subprocess.call("mv /opt/caliper_nfs/ltp_log/* %s " %
                                      (Folder.exec_dir),
                                      shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file, subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\"" %
                             command)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                        continue
                else:
                    return result
        # parser the result in the tmp_log_file, the result is the output of
        # running the command
        try:
            logging.debug("Parsering the result of command: %s" % command)
            if bench_name == bench_test:
                outfp = open(tmp_parser_file, "w")
                outfp.write("%s" % (subsection))
                outfp.close()
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser,
                                            subsection_file, tmp_parser_file)
            else:
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser, tmp_log_file,
                                            tmp_parser_file)
        except Exception, e:
            logging.info(
                "There's wrong when parsering the result of \" %s \"" %
                sections_run[i])
            logging.info(e)
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
Exemplo n.º 8
0
                                            subsection_file, tmp_parser_file)
            else:
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser, tmp_log_file,
                                            tmp_parser_file)
        except Exception, e:
            logging.info(
                "There's wrong when parsering the result of \" %s \"" %
                sections_run[i])
            logging.info(e)
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
        else:
            server_utils.file_copy(parser_result_file, tmp_parser_file, "a+")
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            if (parser_result <= 0):
                continue

        # according the method in the config file, compute the score
        try:
            logging.debug("Computing the score of the result of command: %s" %
                          command)
            flag_compute = compute_case_score(parser_result, category,
                                              scores_way, target)
        except Exception, e:
            logging.info(e)
Exemplo n.º 9
0
def parse_all_cases(target_exec_dir, target, kind_bench, bench_name,
                     run_file,parser_file,dic):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(
                                    caliper_path.config_files.tests_cfg_dir,
                                    kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
                                                    bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    bench_test = "ltp"
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    if bench_name != bench_test:
    	if not os.path.exists(logfile):
	    return -1
    tmp_log_file = log_bench + "_output_tmp.log"
    parser_result_file = log_bench + "_parser.log"
    tmp_parser_file = log_bench + "_parser_tmp.log"
    if os.path.exists(parser_result_file):
        os.remove(parser_result_file)
    #output_logs_names = glob.glob(Folder.exec_dir+"/*output.log")

    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        dic[bench_name][sections_run[i]] = {}

        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            scores_way = configRun.get(sections_run[i], 'scores_way')
            parser = configRun.get(sections_run[i], 'parser')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
	if bench_name == bench_test:
	    subsection = sections_run[i].split(" ")[1]
	    subsection_file = log_bench + "_" + subsection + "_output.log"
            if not os.path.exists(subsection_file):
            	continue
        if os.path.exists(tmp_parser_file):
            os.remove(tmp_parser_file)
        # parser the result in the tmp_log_file, the result is the output of
        # running the command

        try:
            logging.debug("Parsering the result of command: %s" % command)
            if bench_name == bench_test:
                outfp = open(tmp_parser_file, "w")
                outfp.write("%s" %(subsection))
                outfp.close()		
                parser_result = parser_case(kind_bench, bench_name, parser_file,
                                        parser, subsection_file,
                                        tmp_parser_file)
            else:
                outfp = open(logfile, 'r')
                infp = open(tmp_log_file, 'w')
                infp.write(re.findall("test start\s+%+(.*?)%+\s+test_end", outfp.read(), re.DOTALL)[i])
                infp.close()
                outfp.close()
                parser_result = parser_case(kind_bench, bench_name, parser_file,
                                        parser,tmp_log_file,
                                        tmp_parser_file)
            dic[bench_name][sections_run[i]]["type"] = type(parser_result)
            dic[bench_name][sections_run[i]]["value"] = parser_result
        except Exception, e:
            logging.info("Error while parsing the result of \" %s \""
                            % sections_run[i])
            logging.info(e)
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
        else:
            server_utils.file_copy(parser_result_file, tmp_parser_file, "a+")
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            if (parser_result <= 0):
                continue
Exemplo n.º 10
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name,
                    run_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(
                                    caliper_path.config_files.tests_cfg_dir,
                                    kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
                                                    bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    if os.path.exists(Folder.caliper_log_file):
        sections = bench_name + " EXECUTION"
        fp = open(Folder.caliper_log_file,"r")
        f = fp.readlines()
        fp.close()
        op = open(Folder.caliper_log_file,"w")
        for line in f:
            if not(sections in line):
                op.write(line)
        op.close()
    result = subprocess.call("echo '$$ %s EXECUTION START: %s' >> %s"
                            % (bench_name,
                                str(starttime)[:19],
                                Folder.caliper_log_file),
                            shell=True)
    bench_test = "ltp"
    if  bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run("if [[ ! -e /mnt/caliper_nfs ]]; then mkdir -p /mnt/caliper_nfs; fi")
# fix me , now that we create the folder, why not we mount it directly here
        try:
             tar_mask = ".".join(tar_ip.split(".")[0:3])
             p1 = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE)
             p2 = subprocess.Popen(["grep", tar_mask], stdin=p1.stdout, stdout=subprocess.PIPE)
             p1.stdout.close()
             output,err = p2.communicate()
             output = output.strip()
             host_ip = output.split("inet addr:")[1].split(" ")[0]
        except Exception:
            logging.debug("Unable to get the host_ip" )
        try:
            mount_cmd = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" % (host_ip) )
        except Exception:
            try:
                umount_cmd = target.run("umount /mnt/caliper_nfs/")
                mount_cmd = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" % (host_ip) )
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file=log_bench+"_README"
        resultltp = subprocess.call("touch %s"
                             %(readme_file),shell=True)
        resultltp = subprocess.call("echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
                  %(readme_file),shell=True)
    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
	if bench_name == bench_test:
	    subsection = sections_run[i].split(" ")[1]
	    subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench,
                                      target, command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                 move_logs = subprocess.call("cp /opt/caliper_nfs/ltp_log/* %s "
                                % (Folder.exec_dir), shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
              continue
            else:
              return result
        else:
            if bench_name == bench_test:
                move_logs = subprocess.call("cp /opt/caliper_nfs/ltp_log/* %s "
                                % (Folder.exec_dir), shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file,subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\""
                                % command)
                if os.path.exists(tmp_log_file):	
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                                'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                       continue
                else:
                    return result
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)