Exemplo n.º 1
0
    def parallel_run_task(self, process_num):
        multiprocessing.freeze_support()
        pool = multiprocessing.Pool(
            min(multiprocessing.cpu_count(), process_num))
        result_queue = []
        if configs.get_val("fail_verbose"):
            callback_func = self.output_failed
        else:
            callback_func = None
        for tasks_name in self.task_set:
            for index, task in enumerate(self.task_set[tasks_name]):
                if task.result[0] == PASS or task.result[0] == UNRESOLVED:
                    continue
                result_queue.append(
                    pool.apply_async(
                        run_commands,
                        args=(
                            (tasks_name, index),
                            task.result,
                            task.commands,
                        ),
                        kwds=task.running_config,
                        callback=callback_func,
                    ))
        progress(result_queue, configs.get_val("progress"))
        pool.close()
        pool.join()

        result_queue = [result.get() for result in result_queue]
        for position, result in result_queue:
            tasks_name, index = position
            task = self.task_set[tasks_name][index]
            self.task_set_result[tasks_name][task.result[0]] -= 1
            task.result = result
            self.task_set_result[tasks_name][result[0]] += 1
Exemplo n.º 2
0
def main():
    test_suite_config, running_config, log_config = configs.init_config()
    logger = configs.LOGGER

    test_paths = test_suite_config.get("test_paths")
    test_cfg = test_suite_config.get("test_cfg")
    cli_running_config = test_suite_config.get("cli_running_config")

    retry = configs.get_val("retry")
    result = None
    failed = False
    for test in test_paths:
        test_failed = False
        if test.exists():
            if not test_cfg:
                test_cfg = test / "test.cfg"
            try:
                task = TestSuiteTask(test, test_cfg, running_config,
                                     cli_running_config)
            except TestError as e:
                logger.info(e)
                continue
            if not task.task_set:
                continue
            for run_time in range(1, retry + 2):
                logger.info("Run {} times".format(run_time))
                failed_num = task.run(configs.get_val("processes"))
                if failed_num > 0:
                    test_failed = True
                else:
                    test_failed = False
                result = task.gen_summary([])
            failed |= test_failed
        else:
            logger.info(
                "Test path: {} does not exist, please check".format(test))

    output = configs.get_val("output")
    if output:
        if output.exists() and output.is_file():
            name = "{}_{}{}".format(output.stem, int(time.time()),
                                    output.suffix)
            logger.info(
                "result file: {} exists, will move exists file to: {}".format(
                    output, name))
            shutil.move(str(output), str(output.parent / name))
        logger.info("Save test result at: {}".format(output))
        with output.open("w") as f:
            f.write(result)

    temp_dir = running_config.get("temp_dir")
    if configs.get_val("debug"):
        logger.debug("Keep temp file at %s", temp_dir)
    elif temp_dir.exists():
        logger.debug("remove temp_dir %s", temp_dir)
        shutil.rmtree(str(temp_dir))

    if configs.get_val("fail_exit") and failed:
        exit(1)
Exemplo n.º 3
0
 def gen_summary(self, print_type=None):
     self.result = defaultdict(int)
     for name in self.task_set_result:
         for status, num in self.task_set_result[name].items():
             self.result[status] += num
     if print_type is None:
         print_type = configs.get_val("print_type")
     brief_summary = self.gen_brief_summary()
     summary = "-" * 120
     summary += "\nTestSuite Path: {}\n".format(self.path)
     for tasks_name in self.task_set:
         for task in sorted(self.task_set[tasks_name],
                            key=lambda task: task.name):
             result = task.result[0]
             if not print_type or task.result[0] in configs.get_val(
                     "print_type"):
                 summary += "  {}, Case: {}, Result: {}\n".format(
                     tasks_name, task.case_path, result)
     summary += "\n" + brief_summary
     summary += "-" * 120
     return summary
Exemplo n.º 4
0
def run_commands(position,
                 old_result,
                 commands,
                 work_dir,
                 timeout,
                 log_config,
                 env=None):
    name = "{}_{}".format(log_config[1], int(time.time()))
    name = log_config[1]
    formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s",
                                  datefmt="%Y-%m-%d %H:%M:%S")
    logger = construct_logger(log_config[0], log_config[1], file_fmt=formatter)
    if not commands:
        err = "Run task exit unexpected : {}, Log file at: {}.log".format(
            old_result[-1], log_config[0].get("dir") / name)
        logger.error(err)
        return position, old_result
    remain_time = timeout
    result = (PASS, None)
    logger.debug("Work directory: {}".format(work_dir))
    if get_val("dry_run"):
        with (work_dir / "test.sh").open("w") as f:
            f.write("#!/bin/bash\n")
            for command in commands[:-1]:
                f.write(command)
                f.write(" && \\\n")
            f.write(commands[-1])
        return position, (NOT_RUN, None)

    for command in commands:
        start = timeit.default_timer()

        return_code, com_out, com_err = run_command(command, work_dir,
                                                    remain_time, logger, env)
        if return_code != 0:
            result = (FAIL, (return_code, command, shorten(com_err, width=84)))
            err = "Failed, Log file at: {}.log".format(
                log_config[0].get("dir") / name)
            logger.error(err)
            break

        run_time = timeit.default_timer() - start
        remain_time = remain_time - run_time
        logger.debug("Run time: {:.2}, remain time: {:.2}".format(
            run_time, remain_time))
    if result[0] == PASS:
        logger.debug("Task executed successfully")
    handlers = logger.handlers[:]
    for handler in handlers:
        handler.close()
        logger.removeHandler(handler)
    return position, result
Exemplo n.º 5
0
 def serial_run_task(self):
     for tasks_name in self.task_set:
         for index, task in enumerate(self.task_set[tasks_name]):
             if task.result[0] == PASS or task.result[0] == UNRESOLVED:
                 continue
             self.task_set_result[tasks_name][task.result[0]] -= 1
             _, task.result = run_commands((tasks_name, index), task.result,
                                           task.commands,
                                           **task.running_config)
             if configs.get_val("fail_verbose"):
                 self.output_failed((_, task.result))
             status, _ = task.result
             self.task_set_result[tasks_name][status] += 1
Exemplo n.º 6
0
 def run(self, process_num=1):
     logger = configs.LOGGER
     if process_num == 1:
         logger.debug(
             "The number of running processes is 1, which will run serial")
         self.serial_run_task()
     else:
         logger.debug(
             "The number of running processes is {}, and will run in parallel"
             .format(process_num))
         self.parallel_run_task(process_num)
     print_type = configs.get_val("print_type")
     for line in self.gen_summary(print_type).splitlines():
         logger.info(line)
     return self.result[FAIL]
Exemplo n.º 7
0
def main():
    test_suite_config, running_config, log_config = configs.init_config()
    logger = configs.LOGGER
    log_dir = log_config.get("dir")

    test_paths = test_suite_config.get("test_paths")
    cli_test_cfg = test_suite_config.get("test_cfg")
    cli_running_config = test_suite_config.get("cli_running_config")

    root = ElementTree.Element("testsuites")
    json_result = []

    retry = configs.get_val("retry")
    result = ""
    failed = False
    for test in test_paths:
        test_cfg = cli_test_cfg
        test_result = None
        test_failed = False
        if test.exists():
            if not test_cfg:
                test_cfg = test / "test.cfg"
            try:
                task = TestSuiteTask(test, test_cfg, running_config, cli_running_config)
            except TestError as e:
                logger.info(e)
                continue
            if not task.task_set:
                continue
            for run_time in range(1, retry + 2):
                logger.info("Run {} times".format(run_time))
                failed_num = task.run(configs.get_val("processes"))
                if failed_num > 0:
                    test_failed = True
                else:
                    test_failed = False
                test_result = task.gen_summary([])
            failed |= test_failed
            result += test_result
            task.gen_xml_result(root)
            json_result.append(task.gen_json_result())
        else:
            logger.info("Test path: {} does not exist, please check".format(test))

    xml_output = configs.get_val("xml_output")
    if xml_output:
        with xml_output.open("w") as f:
            f.write(ElementTree.tostring(root).decode("utf-8"))

    json_output = configs.get_val("json_output")
    if json_output:
        with json_output.open("w") as f:
            import json

            json.dump(json_result, f, indent=2)

    output = configs.get_val("output")
    if output:
        if output.exists() and output.is_file():
            name = "{}_{}{}".format(output.stem, int(time.time()), output.suffix)
            logger.info(
                "result file: {} exists, will move exists file to: {}".format(
                    output, name
                )
            )
            shutil.move(str(output), str(output.parent / name))
        logger.info("Save test result at: {}".format(output))
        with output.open("w", encoding="utf-8") as f:
            f.write(result)

    temp_dir = running_config.get("temp_dir")
    if configs.get_val("debug"):
        logger.debug("Keep temp file at %s", temp_dir)
    elif temp_dir.exists():
        logger.debug("remove temp_dir %s", temp_dir)
        shutil.rmtree(str(temp_dir))

    if configs.get_val("fail_exit") and failed:
        exit(1)