示例#1
0
 def run(self):
     config_files = os.path.join(caliper_path.config_files.config_dir,
                                 'cases_config.json')
     fp = open(config_files, 'r')
     # get the test cases defined files
     for section in self.sections:
         common.print_format()
         logging.info("Running %s" % section)
         bench = os.path.join(caliper_path.BENCHS_DIR, section, 'defaults')
         if not os.path.exists(bench):
             download_section(section)
         run_case_list = []
         if self.run_case_list == '':
             case_list = yaml.load(fp.read())
             for dimension in case_list:
                 for i in range(len(case_list[dimension])):
                     try:
                         for case in case_list[dimension][i][section]:
                             run_case_list.append(case)
                     except:
                         pass
         else:
             run_case_list = self.run_case_list
         try:
             # On some platforms, swapoff and swapon command is not able to execute.
             # So this function has been commented
             result = self.run_all_cases(bench, section, run_case_list)
         except Exception, e:
             logging.info(e)
             logging.info("Running %s Exception" % section)
             crash_handle.main()
             common.print_format()
         else:
             logging.info("Running %s Finished" % section)
示例#2
0
def parsing_run(target_exec_dir, target):
    # get the test cases defined files
    config_files = server_utils.get_cases_def_files(target_exec_dir)
    logging.debug("the selected configuration are %s" % config_files)
    dic = {}
    for i in range(0, len(config_files)):
        # run benchmarks selected in each configuration file
        # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i])
        config_file = os.path.join(config_files[i])
        config, sections = server_utils.read_config_file(config_file)
        logging.debug(sections)

        # get if it is the 'common' or 'arm' or 'android'
        classify = config_files[i].split("/")[-1].strip().split("_")[0]
        logging.debug(classify)

        for i in range(0, len(sections)):
            dic[sections[i]] = {}
            # try to resolve the configuration of the configuration file
            try:
                run_file = config.get(sections[i], 'run')
                parser = config.get(sections[i], 'parser')
            except Exception:
                raise AttributeError("The is no option value of parser")

            print_format()
            logging.info("Parsing %s" % sections[i])
            bench = os.path.join(classify, sections[i])

            try:
                result = parse_all_cases(target_exec_dir, target, bench,
                                         sections[i], run_file, parser, dic)
            except Exception:
                logging.info("Running %s Exception" % sections[i])
                crash_handle.main()
                print_format()
                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    continue
                else:
                    return result
            else:
                logging.info("Parsing %s Finished" % sections[i])
                print_format()
    outfp = open(
        os.path.join(
            caliper_path.folder_ope.workspace,
            caliper_path.folder_ope.name.strip() + "/final_parsing_logs.yaml"),
        'w')
    outfp.write(yaml.dump(dic, default_flow_style=False))
    outfp.close()
    return 0
示例#3
0
def parsing_run(sections, run_case_list):
    dic = {}
    for i in range(0, len(sections)):
        dic[sections[i]] = {}
        # try to resolve the configuration of the configuration file
        try:
            run_file = sections[i] + '_run.cfg'
            parser = sections[i] + '_parser.py'
        except Exception:
            raise AttributeError("The is no option value of parser")

        logging.info("=" * 55)
        logging.info("Parsing %s" % sections[i])
        bench = os.path.join(caliper_path.BENCHS_DIR, sections[i], 'defaults')

        try:
            result = parse_all_cases(bench,sections[i], parser, dic, run_case_list)
        except Exception:
            logging.info("Parsing %s Exception" % sections[i])
            crash_handle.main()
            logging.info("=" * 55)
            run_flag = server_utils.get_fault_tolerance_config(
                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
                continue
            else:
                return result
        else:
            logging.info("Parsing %s Finished" % sections[i])
            logging.info("=" * 55)
        try:
            logging.info("Parsing json %s" % sections[i])
            log_bench = os.path.join(Folder.exec_dir, sections[i])
            logfile = log_bench + "_output.log"
            outfile_name = sections[i] + '.json'
            outfile = os.path.join(Folder.json_dir, outfile_name)
            if not os.path.exists(Folder.json_dir):
                os.mkdir(Folder.json_dir)
            parser_case(sections[i], parser, sections[i], logfile, outfile, 'json')
            # parser_json(sections[i],  parser, logfile)
        except Exception as e:
            logging.info(e)
        else:
            logging.info("Parsing json %s Finished" % sections[i])

    outfp = open(os.path.join(caliper_path.folder_ope.workspace,
                              caliper_path.folder_ope.name.strip()
                              + "/final_parsing_logs.yaml"), 'w')
    outfp.write(yaml.dump(dic, default_flow_style=False))
    outfp.close()
    return 0
示例#4
0
def parsing_run(target_exec_dir, target):
    # get the test cases defined files
    config_files = server_utils.get_cases_def_files(target_exec_dir)
    logging.debug("the selected configuration are %s" % config_files)
    dic = {}
    for i in range(0, len(config_files)):
        # run benchmarks selected in each configuration file
        # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i])
        config_file = os.path.join(config_files[i])
        config, sections = server_utils.read_config_file(config_file)
        logging.debug(sections)

        # get if it is the 'common' or 'arm' or 'android'
        classify = config_files[i].split("/")[-1].strip().split("_")[0]
        logging.debug(classify)

        for i in range(0, len(sections)):
            dic[sections[i]] = {}
            # try to resolve the configuration of the configuration file
            try:
                run_file = config.get(sections[i], 'run')
                parser = config.get(sections[i], 'parser')
            except Exception:
                raise AttributeError("The is no option value of parser")

            print_format()
            logging.info("Parsing %s" % sections[i])
            bench = os.path.join(classify, sections[i])

            try:
                result = parse_all_cases(target_exec_dir, target, bench,
                                       sections[i], run_file,parser,dic)
            except Exception:
                logging.info("Running %s Exception" % sections[i])
                crash_handle.main()
                print_format()
                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    continue
                else:
                    return result
            else:
                logging.info("Parsing %s Finished" % sections[i])
                print_format()
    outfp = open(os.path.join(caliper_path.folder_ope.workspace,
                              caliper_path.folder_ope.name.strip()
                              +"/final_parsing_logs.yaml"),'w')
    outfp.write(yaml.dump(dic, default_flow_style=False))
    outfp.close()
    return 0
示例#5
0
def caliper_run(target_exec_dir, target):
    # get the test cases defined files
    config_files = server_utils.get_cases_def_files(target_exec_dir)
    logging.debug("the selected configuration are %s" % config_files)

    for i in range(0, len(config_files)):
        # run benchmarks selected in each configuration file
        # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i])
        config_file = os.path.join(config_files[i])
        config, sections = server_utils.read_config_file(config_file)
        logging.debug(sections)

        # get if it is the 'common' or 'arm' or 'android'
        classify = config_files[i].split("/")[-1].strip().split("_")[0]
        logging.debug(classify)

        for i in range(0, len(sections)):
            # run for each benchmark
            target_arch = server_utils.get_host_arch(target)
            build_name = sections[i] + '_' + target_arch + '.suc'
            build_suc = os.path.join(Folder.build_dir, build_name)
            if not os.path.exists(build_suc):
                continue
            build_host_name = sections[i] + '_' + \
                    server_utils.get_local_machine_arch() + '.fail'
            if os.path.exists(build_host_name):
                continue

            # try to resolve the configuration of the configuration file
            try:
                run_file = config.get(sections[i], 'run')
                parser = config.get(sections[i], 'parser')
            except Exception:
                raise AttributeError("The is no option value of parser")

            print_format()
            logging.info("Running %s" % sections[i])
            bench = os.path.join(classify, sections[i])
            try:
                result = run_all_cases(target_exec_dir, target, bench,
                                       sections[i], run_file)
            except Exception:
                logging.info("Running %s Exception" % sections[i])
                crash_handle.main()
                print_format()
                if sections[i] == "ltp":
                    try:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/; fi"
                        )
                    except Exception:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs ;fi"
                        )
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    continue
                else:
                    return result
            else:
                logging.info("Running %s Finished" % sections[i])
                if sections[i] == "ltp":
                    try:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                    except Exception:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs/ ;fi"
                        )
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                print_format()

    return 0
示例#6
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name, run_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(caliper_path.config_files.tests_cfg_dir,
                                       kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
            bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    if os.path.exists(Folder.caliper_log_file):
        sections = bench_name + " EXECUTION"
        fp = open(Folder.caliper_log_file, "r")
        f = fp.readlines()
        fp.close()
        op = open(Folder.caliper_log_file, "w")
        for line in f:
            if not (sections in line):
                op.write(line)
        op.close()
    result = subprocess.call(
        "echo '$$ %s EXECUTION START: %s' >> %s" %
        (bench_name, str(starttime)[:19], Folder.caliper_log_file),
        shell=True)
    bench_test = "ltp"
    if bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run(
            "if [[ ! -e /mnt/caliper_nfs ]]; then mkdir -p /mnt/caliper_nfs; fi"
        )
        # fix me , now that we create the folder, why not we mount it directly here
        try:
            tar_mask = ".".join(tar_ip.split(".")[0:3])
            p1 = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE)
            p2 = subprocess.Popen(["grep", tar_mask],
                                  stdin=p1.stdout,
                                  stdout=subprocess.PIPE)
            p1.stdout.close()
            output, err = p2.communicate()
            output = output.strip()
            host_ip = output.split("inet addr:")[1].split(" ")[0]
        except Exception:
            logging.debug("Unable to get the host_ip")
        try:
            mount_cmd = target.run(
                "mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" %
                (host_ip))
        except Exception:
            try:
                umount_cmd = target.run("umount /mnt/caliper_nfs/")
                mount_cmd = target.run(
                    "mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" %
                    (host_ip))
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file = log_bench + "_README"
        resultltp = subprocess.call("touch %s" % (readme_file), shell=True)
        resultltp = subprocess.call(
            "echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
            % (readme_file),
            shell=True)
    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
        if bench_name == bench_test:
            subsection = sections_run[i].split(" ")[1]
            subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench, target,
                                      command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                move_logs = subprocess.call(
                    "cp /opt/caliper_nfs/ltp_log/* %s " % (Folder.exec_dir),
                    shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
                continue
            else:
                return result
        else:
            if bench_name == bench_test:
                move_logs = subprocess.call(
                    "cp /opt/caliper_nfs/ltp_log/* %s " % (Folder.exec_dir),
                    shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file, subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\"" %
                             command)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                        continue
                else:
                    return result
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
示例#7
0
    def run_all_cases(self, kind_bench, bench_name, run_case_list):
        """
        function: run one benchmark which was selected in the configuration files
        """
        try:
            # get the abspath, which is filename of run config for the benchmark
            bench_conf_file = os.path.join(kind_bench, 'main.yml')
            # get the config sections for the benchmrk
            pf = open(bench_conf_file, 'r')
            values = yaml.load(pf.read())
            sections_run = values[bench_name].keys()
        except AttributeError as e:
            raise AttributeError
        except Exception:
            raise
        logging.debug("the sections to run are: %s" % sections_run)
        if not os.path.exists(Folder.exec_dir):
            os.mkdir(Folder.exec_dir)
        log_bench = os.path.join(Folder.exec_dir, bench_name)
        logfile = log_bench + "_output.log"
        tmp_log_file = log_bench + "_output_tmp.log"
        if os.path.exists(logfile):
            os.remove(logfile)

        starttime = datetime.datetime.now()
        if os.path.exists(Folder.caliper_log_file):
            sections = bench_name + " EXECUTION"
            fp = open(Folder.caliper_log_file, "r")
            f = fp.readlines()
            fp.close()
            op = open(Folder.caliper_log_file, "w")
            for line in f:
                if not (sections in line):
                    op.write(line)
            op.close()
        result = subprocess.call(
            "echo '$$ %s EXECUTION START: %s' >> %s" %
            (bench_name, str(starttime)[:19], Folder.caliper_log_file),
            shell=True)
        # for each command in run config file, read the config for the benchmark
        for section in sections_run:
            if section in run_case_list:
                if self.num == 0:
                    config_files = os.path.join(
                        caliper_path.config_files.config_dir,
                        'cases_config.json')
                    fp = open(config_files, 'r')
                    case_list = yaml.load(fp.read())
                    for dimension in case_list:
                        for i in range(len(case_list[dimension])):
                            for tool in case_list[dimension][i]:
                                for case in case_list[dimension][i][tool]:
                                    if case == section:
                                        self.num = case_list[dimension][i][
                                            tool][case][-1]
                flag = 0

                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                # run the command of the benchmarks

                try:
                    for j in range(int(self.num)):
                        flag = self.run_client_command(section, tmp_log_file,
                                                       bench_name)
                except Exception, e:
                    logging.info(e)
                    crash_handle.main()
                    server_utils.file_copy(logfile, tmp_log_file, 'a+')
                    if os.path.exists(tmp_log_file):
                        os.remove(tmp_log_file)

                    run_flag = server_utils.get_fault_tolerance_config(
                        'fault_tolerance', 'run_error_continue')
                    if run_flag == 1:
                        continue
                    else:
                        return result
                else:
                    server_utils.file_copy(logfile, tmp_log_file, 'a+')
                    if flag != 1:
                        logging.info(
                            "There is wrong when running the command \"%s\"" %
                            section)

                        if os.path.exists(tmp_log_file):
                            os.remove(tmp_log_file)
                        crash_handle.main()

                        run_flag = server_utils.get_fault_tolerance_config(
                            'fault_tolerance', 'run_error_continue')
                        if run_flag == 1:
                            return result
                    if os.path.exists(tmp_log_file):
                        os.remove(tmp_log_file)
            else:
                continue

            endtime = datetime.datetime.now()
            subprocess.call(
                "echo '$$ %s EXECUTION STOP: %s' >> %s" %
                (section, str(endtime)[:19], Folder.caliper_log_file),
                shell=True)
            subprocess.call(
                "echo '$$ %s EXECUTION DURATION %s Seconds'>>%s" %
                (section,
                 (endtime - starttime).seconds, Folder.caliper_log_file),
                shell=True)
示例#8
0
def caliper_run(target_exec_dir, server, target):
    # get the test cases defined files
    config_files = server_utils.get_cases_def_files(target_exec_dir)
    logging.debug("the selected configuration are %s" % config_files)

    for i in range(0, len(config_files)):
        # run benchmarks selected in each configuration file
        # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i])
        config_file = os.path.join(config_files[i])
        config, sections = server_utils.read_config_file(config_file)
        logging.debug(sections)

        # get if it is the 'common' or 'arm' or 'android'
        classify = config_files[i].split("/")[-1].strip().split("_")[0]
        logging.debug(classify)

        if classify == "server" and server:
            try:
                server_ip = settings.get_value("SERVER", "ip", type=str)
                server_port = settings.get_value("SERVER", "port", type=int)
                server_user = settings.get_value("SERVER", "user", type=str)
                logging.info(
                    "Please wait while caliper triggers the server.py script in the server"
                )
                server_pwd = server.run("pwd").stdout
                server_pwd = server_pwd.split("\n")[0]
                server_caliper_dir = os.path.join(server_pwd, "caliper_server")
                server_caliper_dir = os.path.join(server_caliper_dir,
                                                  "server.py")
                server_user = server_user + '@' + server_ip
                script = server_caliper_dir + ' ' + str(server_port)
                subprocess.Popen(
                    ['ssh', '%s' % server_user,
                     'python %s' % script])

            except Exception as e:
                logging.info(e)
                raise AttributeError(
                    "Error in establising connection with server")

        for i in range(0, len(sections)):
            # run for each benchmark
            target_arch = server_utils.get_host_arch(target)
            build_name = sections[i] + '_' + target_arch + '.suc'
            build_suc = os.path.join(Folder.build_dir, build_name)
            if not os.path.exists(build_suc):
                continue
            build_host_name = sections[i] + '_' + \
                    server_utils.get_local_machine_arch() + '.fail'
            if os.path.exists(build_host_name):
                continue

            # try to resolve the configuration of the configuration file
            try:
                run_file = config.get(sections[i], 'run')
                parser = config.get(sections[i], 'parser')
            except Exception:
                raise AttributeError("The is no option value of parser")

            print_format()

            logging.info("Running %s" % sections[i])
            bench = os.path.join(classify, sections[i])
            try:
                system_initialise(target)
                if classify == "server":
                    logging.info("Waiting for server to grant access")
                    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    sock.connect((server_ip, server_port))
                    logging.info("%s" % str(sock.recv(1024)))

                result = run_all_cases(target_exec_dir, target, bench,
                                       sections[i], run_file)
                if classify == "server":
                    sock.send("1")
                    sock.close()
            except Exception:
                logging.info("Running %s Exception" % sections[i])
                crash_handle.main()
                print_format()
                if sections[i] == "ltp":
                    try:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/; fi"
                        )
                    except Exception:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs ;fi"
                        )
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    continue
                else:
                    return result
            else:
                logging.info("Running %s Finished" % sections[i])
                if sections[i] == "ltp":
                    try:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                    except Exception:
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs/ ;fi"
                        )
                        unmount = target.run(
                            "if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi"
                        )
                print_format()

    return 0
示例#9
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name, run_file,
                  parser_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(caliper_path.config_files.tests_cfg_dir,
                                       kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
            bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    parser_result_file = log_bench + "_parser.log"
    tmp_parser_file = log_bench + "_parser_tmp.log"
    if os.path.exists(parser_result_file):
        os.remove(parser_result_file)
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    result = subprocess.call(
        "echo '$$ %s EXECUTION START: %s' >> %s" %
        (bench_name, str(starttime)[:19], Folder.caliper_log_file),
        shell=True)
    bench_test = "ltp"
    if bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run("if [[ ! -e /mnt/ltp ]]; then mkdir -p /mnt/ltp; fi")
        # fix me , now that we create the folder, why not we mount it directly here
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        try:
            # fix me , getting host ip to be optimised
            s.connect(("8.8.8.8", 80))
        except Exception:
            logging.debug(
                "Socket connection failed during ltp pre-requisite check")
        host_ip = s.getsockname()[0]
        try:
            xyz = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/ltp" %
                             (host_ip))
        except Exception:
            try:
                xyz = target.run("umount /mnt/ltp/")
                xyz = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/ltp" %
                                 (host_ip))
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file = log_bench + "_README"
        resultltp = subprocess.call("touch %s" % (readme_file), shell=True)
        resultltp = subprocess.call(
            "echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
            % (readme_file),
            shell=True)

    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            scores_way = configRun.get(sections_run[i], 'scores_way')
            parser = configRun.get(sections_run[i], 'parser')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
        if bench_name == bench_test:
            subsection = sections_run[i].split(" ")[1]
            subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_parser_file):
            os.remove(tmp_parser_file)
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench, target,
                                      command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                xyz = subprocess.call("mv /opt/caliper_nfs/ltp_log/* %s " %
                                      (Folder.exec_dir),
                                      shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
                continue
            else:
                return result
        else:
            if bench_name == bench_test:
                xyz = subprocess.call("mv /opt/caliper_nfs/ltp_log/* %s " %
                                      (Folder.exec_dir),
                                      shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file, subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\"" %
                             command)
                if os.path.exists(tmp_log_file):
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                    'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                        continue
                else:
                    return result
        # parser the result in the tmp_log_file, the result is the output of
        # running the command
        try:
            logging.debug("Parsering the result of command: %s" % command)
            if bench_name == bench_test:
                outfp = open(tmp_parser_file, "w")
                outfp.write("%s" % (subsection))
                outfp.close()
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser,
                                            subsection_file, tmp_parser_file)
            else:
                parser_result = parser_case(kind_bench, bench_name,
                                            parser_file, parser, tmp_log_file,
                                            tmp_parser_file)
        except Exception, e:
            logging.info(
                "There's wrong when parsering the result of \" %s \"" %
                sections_run[i])
            logging.info(e)
            if os.path.exists(tmp_parser_file):
                os.remove(tmp_parser_file)
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
示例#10
0
def caliper_run(target_exec_dir, target):
    # get the test cases defined files
    config_files = server_utils.get_cases_def_files(target_exec_dir)
    logging.debug("the selected configuration are %s" % config_files)

    for i in range(0, len(config_files)):
        # run benchmarks selected in each configuration file
        # config_file = os.path.join(caliper_path.CALIPER_PRE, config_files[i])
        config_file = os.path.join(config_files[i])
        config, sections = server_utils.read_config_file(config_file)
        logging.debug(sections)

        # get if it is the 'common' or 'arm' or 'android'
        classify = config_files[i].split("/")[-1].strip().split("_")[0]
        logging.debug(classify)

        for i in range(0, len(sections)):
            # run for each benchmark
            target_arch = server_utils.get_host_arch(target)
            build_name = sections[i]+'_'+target_arch+'.suc'
            build_suc = os.path.join(Folder.build_dir, build_name)
            if not os.path.exists(build_suc):
		      continue
            build_host_name = sections[i] + '_' + \
                    server_utils.get_local_machine_arch() + '.fail'
            if os.path.exists(build_host_name):
                continue


            # try to resolve the configuration of the configuration file
            try:
                run_file = config.get(sections[i], 'run')
                parser = config.get(sections[i], 'parser')
            except Exception:
                raise AttributeError("The is no option value of parser")

            print_format()
            logging.info("Running %s" % sections[i])
            bench = os.path.join(classify, sections[i])
            try:
                system_initialise(target)
                result = run_all_cases(target_exec_dir, target, bench,
                                        sections[i], run_file)
            except Exception:
                logging.info("Running %s Exception" % sections[i])
                crash_handle.main()
                print_format()
                if sections[i]== "ltp":
                    try:
                        unmount = target.run("if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/; fi")
                    except Exception:
                        unmount = target.run("if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs ;fi")
                        unmount = target.run("if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi")
                run_flag = server_utils.get_fault_tolerance_config(
                                'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    continue
                else:
                    return result
            else:
                logging.info("Running %s Finished" % sections[i])
                if sections[i] == "ltp":
                    try:
                         unmount = target.run("if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi")
                    except Exception:
                         unmount = target.run("if  df -h |grep caliper_nfs  ; then fuser -km /mnt/caliper_nfs/ ;fi")
                         unmount = target.run("if  df -h |grep caliper_nfs  ; then umount /mnt/caliper_nfs/ ;fi")
                print_format()

    return 0
示例#11
0
def run_all_cases(target_exec_dir, target, kind_bench, bench_name,
                    run_file):
    """
    function: run one benchmark which was selected in the configuration files
    """
    try:
        # get the abspath, which is filename of run config for the benchmark
        bench_conf_file = os.path.join(
                                    caliper_path.config_files.tests_cfg_dir,
                                    kind_bench, run_file)
        # get the config sections for the benchmrk
        configRun, sections_run = server_utils.read_config_file(
                                                    bench_conf_file)
    except AttributeError as e:
        raise AttributeError
    except Exception:
        raise
    logging.debug("the sections to run are: %s" % sections_run)
    if not os.path.exists(Folder.exec_dir):
        os.mkdir(Folder.exec_dir)
    log_bench = os.path.join(Folder.exec_dir, bench_name)
    logfile = log_bench + "_output.log"
    tmp_log_file = log_bench + "_output_tmp.log"
    if os.path.exists(logfile):
        os.remove(logfile)

    starttime = datetime.datetime.now()
    if os.path.exists(Folder.caliper_log_file):
        sections = bench_name + " EXECUTION"
        fp = open(Folder.caliper_log_file,"r")
        f = fp.readlines()
        fp.close()
        op = open(Folder.caliper_log_file,"w")
        for line in f:
            if not(sections in line):
                op.write(line)
        op.close()
    result = subprocess.call("echo '$$ %s EXECUTION START: %s' >> %s"
                            % (bench_name,
                                str(starttime)[:19],
                                Folder.caliper_log_file),
                            shell=True)
    bench_test = "ltp"
    if  bench_name == bench_test:
        tar_ip = settings.get_value('CLIENT', 'ip', type=str)
        target.run("if [[ ! -e /mnt/caliper_nfs ]]; then mkdir -p /mnt/caliper_nfs; fi")
# fix me , now that we create the folder, why not we mount it directly here
        try:
             tar_mask = ".".join(tar_ip.split(".")[0:3])
             p1 = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE)
             p2 = subprocess.Popen(["grep", tar_mask], stdin=p1.stdout, stdout=subprocess.PIPE)
             p1.stdout.close()
             output,err = p2.communicate()
             output = output.strip()
             host_ip = output.split("inet addr:")[1].split(" ")[0]
        except Exception:
            logging.debug("Unable to get the host_ip" )
        try:
            mount_cmd = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" % (host_ip) )
        except Exception:
            try:
                umount_cmd = target.run("umount /mnt/caliper_nfs/")
                mount_cmd = target.run("mount -t nfs %s:/opt/caliper_nfs /mnt/caliper_nfs" % (host_ip) )
            except Exception:
                logging.debug("Unable to mount")
                return result
        readme_file=log_bench+"_README"
        resultltp = subprocess.call("touch %s"
                             %(readme_file),shell=True)
        resultltp = subprocess.call("echo 'The categorization of ltp in caliper is\nCATEGORY\t\t\t\t\t\tSCENARIOS OF LTP\n\n[command]\t\t\t\t\t\tcommands\n[cpu]\t\t\t\t\t\tsched,cpuhotplug\n[memory]\t\t\t\t\t\tmm.numa,hugetlb\n[dio]\t\t\t\t\t\tdio,io,dma_thread_diotest,timers\n[filesystem]\t\t\t\t\t\tfilecaps,fs,fs_bind,fs_ext4,fs_perms_simple,fs_readonly\n[kernel/\t\t\t\t\t\tsyscalls,controllers,pty,containers,admin_tools,modules,can\n[proc]\t\t\t\t\t\tipc,hyperthreading,nptl,cap_bounds,connectors,pipes\n\n\nltp_output.log contains the screenshot of complete ltp execution and ltp_parser.log contains the information regarding the number of tests executed and among them which all have passed failed or skipped.\n\nFor more information regarding a particular category please see ltp_<category>_output.log which contains the output screen and parser log for that particular category' >> %s"
                  %(readme_file),shell=True)
    # for each command in run config file, read the config for the benchmark
    for i in range(0, len(sections_run)):
        flag = 0
        try:
            category = configRun.get(sections_run[i], 'category')
            command = configRun.get(sections_run[i], 'command')
        except Exception:
            logging.debug("no value for the %s" % sections_run[i])
            continue
	if bench_name == bench_test:
	    subsection = sections_run[i].split(" ")[1]
	    subsection_file = log_bench + "_" + subsection + "_output.log"
        if os.path.exists(tmp_log_file):
            os.remove(tmp_log_file)

        server_run_command = get_server_command(kind_bench, sections_run[i])
        logging.debug("Get the server command is: %s" % server_run_command)
        # run the command of the benchmarks
        try:
            flag = run_kinds_commands(sections_run[i], server_run_command,
                                      tmp_log_file, kind_bench,
                                      target, command)
        except Exception, e:
            logging.info(e)
            crash_handle.main()
            if bench_name == bench_test:
                 move_logs = subprocess.call("cp /opt/caliper_nfs/ltp_log/* %s "
                                % (Folder.exec_dir), shell=True)
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)
            run_flag = server_utils.get_fault_tolerance_config(
                                'fault_tolerance', 'run_error_continue')
            if run_flag == 1:
              continue
            else:
              return result
        else:
            if bench_name == bench_test:
                move_logs = subprocess.call("cp /opt/caliper_nfs/ltp_log/* %s "
                                % (Folder.exec_dir), shell=True)
                if os.path.exists(subsection_file):
                    server_utils.file_copy(tmp_log_file,subsection_file, 'a+')
            server_utils.file_copy(logfile, tmp_log_file, 'a+')
            if flag != 1:
                logging.info("There is wrong when running the command \"%s\""
                                % command)
                if os.path.exists(tmp_log_file):	
                    os.remove(tmp_log_file)
                crash_handle.main()

                run_flag = server_utils.get_fault_tolerance_config(
                                'fault_tolerance', 'run_error_continue')
                if run_flag == 1:
                    if bench_name != bench_test:
                       continue
                else:
                    return result
            if os.path.exists(tmp_log_file):
                os.remove(tmp_log_file)