Esempio n. 1
0
def print_init_help():
    """
    print help
    """
    print(bcolors.OKGREEN + "Tester init" + bcolors.ENDC)
    print(
        "Generates config file template in yaml format. For json format use " +
        bcolors.bold("init --json"))
    print_option("init -h, init --help", "Print help")
    print_option("init --json", "Generates micro-template in json format")
    print_option("init --all, init --json --all", "Generates template")
    print_option(
        "init --force", "Generates template even if file exists\n" +
        bcolors.note("NOTE:") + " If you want to use it with " +
        bcolors.header("--all") + " write " + bcolors.header("--all --force"))
    print(
        bcolors.note("Note:") +
        "\n\t You can use following options at the end of command to fill template"
    )
    print_option("-W, --no-warnings", "Silence all test warnings")
    print_option("-D, --no-diff", "Stop storing stdout diff")
    print_option("-e, --executable " + bcolors.bold("EXECUTABLE"),
                 "set executable")
    print_option("-f, --only-failed", "Print only failed tests")
    print_option("-d, --test-dir " + bcolors.bold("TESTS_DIRECTORY"),
                 "Tests will run from " + bcolors.bold("TESTS_DIRECTORY"))
    print_option("-R, --result-dir " + bcolors.bold("RESULT_DIRECTORY"),
                 "Output directory")
    print_option("-T, --timeout " + bcolors.bold("SECONDS"),
                 "Maximum time for test\n" + bcolors.note("NOTE:"))
    print_option(
        "-E, --exclude " + bcolors.bold("REGEX"), "Directories matching " +
        bcolors.bold("REGEX") + " will be excluded\n" + bcolors.note("NOTE:") +
        " When using command line arguments you can use only one regex patter. If you want to use more, "
        "you have to use it in configuration file.")
Esempio n. 2
0
def print_testinit_help():
    """
    print help
    """
    print(bcolors.OKGREEN + "Tester init" + bcolors.ENDC)
    print(
        "Generates test config file template in yaml format. For json format use "
        + bcolors.bold("testinit --json"))
    print_option("testinit -h", "Print help")
    print_option("testinit --json", "Generates micro-template in json format")
    print_option("testinit --all, testinit --json --all", "Generates template")
    print_option(
        "testinit --force", "Generates template even if file exists\n" +
        bcolors.note("NOTE:") + " If you want to use it with " +
        bcolors.header("--all") + " write " + bcolors.header("--all --force"))
    print(
        bcolors.note("Note:") +
        "\n\t You can use following options at the end of command to fill template"
    )
    print_option(
        "-n, --name FOLDER",
        "Create " + bcolors.bold("FOLDER") + " and generated file save in it")
    print_option("-i, --input FILE", "File with stdin")
    print_option("-o, --output FILE", "File with stdout")
    print_option("-r, --return-code NUMBER", "Expected return code")
    print_option("-c, --comment", "Test comment")
    print_option("-T, --timeout", "Maximum time for test")
Esempio n. 3
0
 def call_error(self, error_code, msg=None):
     error_name = "Undefined error"
     pom_msg = "use -h or --help to print help"
     more_msg = None
     if msg is not None:
         pom_msg = msg
     if error_code == Errors.WRONG_PARAM:
         error_name = "Invalid arguments!"
     elif error_code == Errors.INVALID_JSON:
         error_name = "Invalid input json"
     elif error_code == Errors.WRONG_INPUT:
         error_name = "Invalid input"
     elif error_code == Errors.MISSING_FILE:
         error_name = "Missing file"
     elif error_code == Errors.FATAL_ERROR:
         error_name = "Fatal error"
     elif error_code == Errors.YAML_DOESNT_SUPPORT:
         error_name = "Yaml does not supported"
         more_msg = "Please install " + bcolors.note(bcolors.bold("pyyaml")) + " package to use yaml."
     if more_msg is not None:
         stderr.write(bcolors.fail(error_name) + "\n\t" + bcolors.warning(more_msg) + "\n\t" + pom_msg + "\n")
     else:
         stderr.write(bcolors.fail(error_name) + "\n\t" + pom_msg + "\n")
     if len(self.suspicions) > 0:
         stderr.write(bcolors.note("\nNOTE:\n"))
         for m in self.suspicions:
             stderr.write("\t" + m + "\n")
     self.clear_suspicions()
     exit(error_code.value)
Esempio n. 4
0
def show_score():
    """
    show score
    """
    poc_ok = len(score['OK'])
    poc_fail = len(score['FAIL'])
    poc_skip = len(score['SKIP'])
    if poc_fail == 0 and poc_ok == 0 and poc_skip == 0:
        return
    print()
    print(bcolors.note("###############"))
    print(bcolors.note("     SCORE    "))
    print(bcolors.note("###############"))
    if poc_ok > 0 and not only_failed:
        print()
        print(bcolors.success("Successful:"))
        for t in score['OK']:
            print("\t" + bcolors.success(t))
    if poc_fail > 0:
        print()
        print(bcolors.fail("Failed:"))
        for t in score['FAIL']:
            print("\t" + bcolors.fail(t))
    if poc_skip > 0 and not only_failed:
        print()
        print(bcolors.warning("Skipped:"))
        for t in score['SKIP']:
            print("\t" + bcolors.warning(t))
    print()
    print(bcolors.success("SUCC:\t"), poc_ok)
    print(bcolors.fail("FAIL:\t"), poc_fail)
    if poc_skip > 0:
        print(bcolors.warning("SKIP:\t"), poc_skip)
        if not only_failed:
            percents = 100 * poc_ok // (poc_ok + poc_fail + poc_skip) if (
                poc_ok + poc_fail + poc_skip > 0) else 0
            if percents < 60:
                print(bcolors.fail("Result with skip: " + str(percents) + "%"))
            elif percents < 80:
                print(
                    bcolors.warning("Result with skip: " + str(percents) +
                                    "%"))
            elif percents < 91:
                print(bcolors.note("Result with skip: " + str(percents) + "%"))
            else:
                print(
                    bcolors.success("Result with skip: " + str(percents) +
                                    "%"))
    percents = 100 * poc_ok // (poc_ok + poc_fail) if (
        poc_ok + poc_fail > 0) else 0
    if percents < 60:
        print(bcolors.fail("Final result: " + str(percents) + "%"))
    elif percents < 80:
        print(bcolors.warning("Final result: " + str(percents) + "%"))
    elif percents < 91:
        print(bcolors.note("Final result: " + str(percents) + "%"))
    else:
        print(bcolors.success("Final result: " + str(percents) + "%"))
    return poc_fail == 0
Esempio n. 5
0
def test_failed(proc_err, proc_out, proc_rc, real_out, result_code, result_out,
                result_files, test, test_directory, missing_files,
                wrong_files):
    score["FAIL"].append(test_directory)
    print(bcolors.note(test_directory) + ":")
    if test.comment != "":
        print("\t" + bcolors.warning(test.comment))
    print('\tReturn code: ' +
          (bcolors.success("OK") if result_code else bcolors.fail("FAIL")))
    if not result_code:
        print('\t\tYour return code:', proc_rc)
        print('\t\tReal return code:', test.code)
    sn, sm = get_signal(proc_rc)
    if sn != "" and sm != "":
        print("\t" + bcolors.warning(sn) + " " + sm)
    print('\tStdout: ' +
          (bcolors.success("OK") if result_out else bcolors.fail("FAIL")))
    a = test_directory.split('/')
    a.pop(0)
    c = "/".join(a)
    name = c
    if not result_out:
        # if  or proc_err != "":
        print('\t\tstdout saved: ' + str(resultDir) + name + "stdout.out")
        if no_diff:
            store_test(name, proc_out, None, proc_err)
        else:
            print('\t\t\tdiff saved: ' + str(resultDir) + name + "diff")
            store_test(name, proc_out, real_out, proc_err)
        if proc_err != "":
            print('\t\tstderr saved: ' + str(resultDir) + name + "stderr.out")
    if result_files is not None:
        print('\tFiles: ' + (
            bcolors.success("OK") if result_files else bcolors.fail("FAIL")))
        if not result_files:
            if len(missing_files) > 0:
                print('\tMissing files:')
                for mf in missing_files:
                    print('\t\t' + mf)
            if len(wrong_files) > 0:
                print('\tWrong files:')
                for f in wrong_files:
                    exp, out, diff = tuple(f)
                    exp = exp.split('/')[-1]
                    out = out.split('/')[-1]
                    store_diff(name, exp, out, diff)
                    print('\t\t' + exp + " x " + out + " is different")
                    print('\t\tdiff saved: ' + str(resultDir) + name + exp +
                          '_x_' + out + '.diff')
    print(bcolors.fail('\tTest FAILED\n'))
Esempio n. 6
0
 def call_warning(self, msg=""):
     if self.no_warnings:
         return
     if msg == "":
         if len(self.suspicions) == 0:
             return
     else:
         self.warning(msg)
     if len(self.suspicions) > 0:
         print(bcolors.note("NOTE:"))
         for m in self.suspicions:
             print("\t" + m)
     self.clear_suspicions()
     print("")
Esempio n. 7
0
def print_help():
    """
    print help
    """
    print(bcolors.OKGREEN + "Tester" + bcolors.ENDC)
    print_option("-h, --help", "Print help")
    print_option("-V, --version", "Print version")
    print_option("-W, --no-warnings", "Silence all test warnings")
    print_option("-D, --no-diff", "Stop storing stdout diff")
    print_option("-e, --executable " + bcolors.bold("EXECUTABLE"),
                 "set executable")
    print_option("-f, --only-failed", "Print only failed tests")
    print_option("-C, --clean", "Remove result directory and do not run tests")
    print_option(
        "-d, --test-dir " + bcolors.bold("TESTS_DIRECTORY"),
        "Tests will run from " + bcolors.bold("TESTS_DIRECTORY") + "\n" +
        bcolors.note("NOTE:") + " If " + bcolors.header("--test_dir") +
        " is set, this will have no effect")
    print_option(
        "-t, --test " + bcolors.bold("TEST_DIRECTORY"),
        "Only test from " + bcolors.bold("TEST_DIRECTORY") + " will run")
    print_option(
        "-R, --result-dir " + bcolors.bold("RESULT_DIRECTORY"),
        "Output directory\n" + bcolors.note("NOTE:") + " Default value is " +
        bcolors.bold("result"))
    print_option(
        "-T, --timeout " + bcolors.bold("SECONDS"), "Maximum time for test\n" +
        bcolors.note("NOTE:") + " Default value is " + bcolors.bold("15"))
    print_option(
        "-E, --exclude " + bcolors.bold("REGEX"), "Directories matching " +
        bcolors.bold("REGEX") + " will be excluded\n" + bcolors.note("NOTE:") +
        " When using command line arguments you can use only one regex patter. If you want to use more, "
        "you have to use it in configuration file.")
    print_option(
        "-c, --config " + bcolors.bold("CONFIG"),
        "will use " + bcolors.bold("CONFIG") + " file as configuration\n" +
        bcolors.note("NOTE:") +
        " configuration from json file can be overwritten with command line arguments"
    )
    print_option(
        "init " + bcolors.bold("OPTIONS"), "Generate " +
        bcolors.bold("config file") + " template\n" + bcolors.note("NOTE:") +
        " for more info use " + bcolors.bold("tester init -h"))
    print_option(
        "testinit " + bcolors.bold("OPTIONS"),
        "Generate " + bcolors.bold("test config file") + " template\n" +
        bcolors.note("NOTE:") + " for more info use " +
        bcolors.bold("tester testinit -h"))
Esempio n. 8
0
def lets_test(arguments):
    arguments = argstest.parse_args_test(arguments)
    if not arguments.valid:
        error_handler.call_error(Errors.WRONG_PARAM)
    if arguments.hlp:
        argstest.print_help()
        exit(0)
    elif arguments.version:
        print("Tester version - " + version)
        exit(0)
    if arguments.executable is None:
        error_handler.call_error(Errors.WRONG_PARAM, "Executable is not set")

    # init test variables
    my_test.executable = arguments.executable
    my_test.only_failed = arguments.onlyFailed
    my_test.resultDir = arguments.result_dir
    my_test.timeout = arguments.timeout
    my_test.exclude = arguments.exclude
    error_handler.no_warnings = arguments.no_warnings
    my_test.no_diff = arguments.no_diff
    # clearing result directory
    result_name = arguments.result_dir
    result_path = Path(result_name)
    if result_path.exists():
        if not result_path.is_dir():
            error_handler.call_error(
                Errors.WRONG_PARAM,
                bcolors.bold(result_name) + " is not a directory")
        else:
            rmtree(result_name)
    if arguments.clean:
        print(bcolors.note(arguments.result_dir) + " removed")
        exit(0)
    if arguments.tests_dir is None and arguments.test is None:
        error_handler.call_error(Errors.WRONG_INPUT, "No tests")
    if arguments.test is not None:
        my_test.run(arguments.test)
    elif arguments.tests_dir is not None:
        my_test.run_all(arguments.tests_dir)
    my_test.show_score()
Esempio n. 9
0
def read_test_file(test, test_file, data):
    """
    read test.json

    :param test test class

    :param test_file json file

    :param data file data
    """

    for key in data.keys():
        if key == 'input':
            if isinstance(data[key], str):
                test.input = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('input') + " in " + bcolors.bold(test_file) +
                    " has to be string")
        elif key == 'output':
            if isinstance(data[key], str):
                test.output = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('output') + " in " + bcolors.bold(test_file) +
                    " has to be string")
        elif key == 'comment':
            if isinstance(data[key], str):
                test.comment = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('comment') + " in " +
                    bcolors.bold(test_file) + " has to be string")
        elif key == 'returnCode':
            if isinstance(data[key], int):
                test.code = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('returnCode') + " in " +
                    bcolors.bold(test_file) + " has to be int")
        elif key == 'timeout':
            if isinstance(data[key], int) and data[key] > 0:
                test.timeout = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('timeout') + " in " +
                    bcolors.bold(test_file) + " has to be int bigger than 0")
        elif key == 'args':
            if isinstance(data[key], list):
                test.args = data[key]
            elif isinstance(data[key], str):
                test.args = [data[key]]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('args') + " in " + bcolors.bold(test_file) +
                    " has to be array, string")
        elif key == 'expectedFiles':
            if isinstance(data[key], list):
                test.expected_files = data[key]
            elif isinstance(data[key], str):
                test.expected_files = [data[key]]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('expectedFiles') + " in " +
                    bcolors.bold(test_file) + " has to be array or string")
        elif key == 'outputFiles':
            if isinstance(data[key], list):
                test.output_files = data[key]
            elif isinstance(data[key], str):
                test.output_files = [data[key]]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    bcolors.bold('output_files') + " in " +
                    bcolors.bold(test_file) + " has to be array or string")
        else:
            error_handler.warning("Unknown key \'" + bcolors.note(key) +
                                  "\' in " + test_file)
    if len(test.output_files) != len(test.expected_files):
        error_handler.call_error(
            Errors.FATAL_ERROR,
            "Count of files in " + bcolors.bold('outputFiles') + " and " +
            bcolors.bold('expectedFiles') + " is different")
Esempio n. 10
0
def run(test_directory):
    """
    run test and store results

    :param test_directory test directory
    """
    # exclude
    if check_exclude(test_directory):
        return
    if test_directory[-1] != '/':
        test_directory += '/'
    test_d = Path(test_directory)
    if not test_d.is_dir():
        error_handler.call_error(
            Errors.WRONG_INPUT,
            bcolors.bold(test_directory) + " does not exists or is not folder")
    test = Test()
    test_json_file = test_directory + test_file_name + ".json"
    test_yaml_file = test_directory + test_file_name + ".yaml"
    test_json_file_path = Path(test_json_file)
    test_yaml_file_path = Path(test_yaml_file)
    test.timeout = timeout
    yaml_exists = test_yaml_file_path.is_file()
    if not (yaml_exists or test_json_file_path.is_file()):
        missing_test(test, test_directory)
    else:
        test_file = None
        data = None
        if yaml_exists:
            if yaml_support:
                test_file = test_yaml_file
                data = read_yaml_test(test_file)
            else:
                error_handler.call_error(
                    Errors.YAML_DOESNT_SUPPORT,
                    "Can not read " + bcolors.note(test_file_name + ".yaml"))
        else:
            test_file = test_json_file
            data = read_json_test(test_file)
        read_test_file(test, test_file, data)
    input_data, real_out, valid = test_set_missing(test, test_directory,
                                                   test_json_file)
    if not valid:
        return
    cm = ""
    if test.args is not None and len(test.args) > 0:
        cm = ' '.join(test.args)
        cm = ' ' + cm
    cmd = (executable + cm)
    process = Popen(cmd.split(' '), stdout=PIPE, stderr=PIPE, stdin=PIPE)
    out = None
    err = None
    runout = False
    if input_data is None:
        try:
            out, err = process.communicate(timeout=test.timeout)
        except TimeoutExpired:
            process.kill()
            runout = True
    else:
        try:
            out, err = process.communicate(input=input_data,
                                           timeout=test.timeout)
        except TimeoutExpired:
            process.kill()
            runout = True

    proc_out = ""
    if out is not None:
        proc_out = out.decode('utf-8')
    proc_err = ""
    if err is not None:
        proc_err = err.decode('utf-8')
    if runout:
        score["FAIL"].append(test_directory)
        print(bcolors.note(test_directory) + ":")
        if test.comment != "":
            print("\t" + bcolors.warning(test.comment))
        print(bcolors.warning("\tRequest timed out"))
        print(bcolors.fail('\tTest FAILED\n'))
        return
    proc_rc = -9
    if process.returncode is not None:
        proc_rc = int(process.returncode)

    result_code = test.code == proc_rc
    result_out = real_out == proc_out
    result_files = None
    missing_files = list()
    wrong_files = list()
    if len(test.output_files) > 0:
        result_files = True
        i = 0
        while i < len(test.expected_files):
            exp = test_directory + test.expected_files[i]
            exp_path = Path(exp)
            p_out = test.output_files[i]
            p_out_path = Path(p_out)
            v = True
            if not exp_path.is_file():
                missing_files.append(exp)
                result_files = False
                v = False
            if not p_out_path.is_file():
                missing_files.append(p_out)
                result_files = False
                v = False
            if v:
                with open(exp, "r") as myfile:
                    exp_data = myfile.read()
                with open(p_out, "r") as myfile:
                    out_data = myfile.read()
                if exp_data == out_data:
                    result_files = result_files and True
                else:
                    result_files = False
                    d_out = out_data.strip().splitlines()
                    d_exp = exp_data.strip().splitlines()
                    diff = list()
                    for line in difflib.unified_diff(d_exp,
                                                     d_out,
                                                     test.expected_files[i],
                                                     p_out,
                                                     n=0,
                                                     lineterm=''):
                        diff.append(line)
                    diff = "\n".join(diff) + "\n"
                    wrong_files.append([exp, p_out, diff])

            i += 1
    result = result_code and result_out
    if result_files is not None:
        result = result and result_files
    if result:
        score["OK"].append(test_directory)
        if not only_failed:
            print(bcolors.note(test_directory) + ":")
            print(bcolors.success('\tTest OK\n'))
    else:
        test_failed(proc_err, proc_out, proc_rc, real_out, result_code,
                    result_out, result_files, test, test_directory,
                    missing_files, wrong_files)
Esempio n. 11
0
def parse_config(ar, config_file, data):
    for key in data.keys():
        if key == 'onlyFailed':
            if isinstance(data[key], bool):
                ar.onlyFailed = data[key]
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "OnlyFailed has to be bool")
        elif key == 'noWarnings':
            if isinstance(data[key], bool):
                ar.no_warnings = data[key]
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "noWarnings has to be bool")
        elif key == 'noDiff':
            if isinstance(data[key], bool):
                ar.no_diff = data[key]
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "noDiff has to be bool")
        elif key == "testsDir":
            if isinstance(data[key], str):
                ar.tests_dir = data[key]
                if ar.tests_dir[-1] != '/':
                    ar.tests_dir += '/'
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "testsDirectory has to be string")
        elif key == "resultDir":
            if isinstance(data[key], str):
                ar.result_dir = data[key]
                if ar.result_dir[-1] != '/':
                    ar.result_dir += '/'
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "resultDirectory has to be string")
        elif key == "timeout":
            if isinstance(data[key], int) and data[key] > 0:
                ar.timeout = data[key]
            else:
                error_handler.call_error(
                    Errors.INVALID_JSON,
                    "timeout has to be int and greater than 0")
        elif key == "exclude":
            if isinstance(data[key], str):
                if data[key] != "":
                    ar.exclude = [data[key]]
            elif isinstance(data[key], list):
                ar.exclude = data[key]
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "exclude has to be string or array")
        elif key == "executable":
            if isinstance(data[key], str):
                ar.executable = data[key]
            else:
                error_handler.call_error(Errors.INVALID_JSON,
                                         "executable has to be string path")
        else:
            error_handler.warning("Unknown key \'" + bcolors.note(key) +
                                  "\' in " + config_file)
Esempio n. 12
0
def lets_init(arguments):
    if arguments[0] == 'init':
        if len(arguments) > 1 and arguments[1] == '--json':
            arg = arguments.copy()
            arg.pop(0)
            arg.pop(0)
            use_all = False
            force = False
            if len(arg) > 0:
                if arg[0] == '--all':
                    arg.pop(0)
                    use_all = True
                if len(arg) > 0 and arg[0] == '--force':
                    arg.pop(0)
                    force = True
            json_file_path = Path(argstest.config_json)
            arg = argstest.parse_command_line_init(arg)
            if not arg.valid:
                error_handler.call_error(Errors.WRONG_PARAM)
            if arg.hlp:
                argstest.print_init_help()
                exit(0)
            arg = arg.export(use_all)
            json_data = json.dumps(arg, indent=4)
            if json_file_path.exists():
                if force:
                    error_handler.call_warning("Rewriting " + bcolors.note(argstest.config_json))
                else:
                    error_handler.call_error(Errors.FATAL_ERROR, bcolors.note(argstest.config_json) +
                                             " already exists. If you want to rewrite it use --force option")
            with open(argstest.config_json, "w") as json_file:
                print(json_data, file=json_file)
            print(bcolors.success("Success: ") + argstest.config_json + ' created')

        else:
            if not yaml_support:
                error_handler.call_error(Errors.YAML_DOESNT_SUPPORT, "Can not export to yaml")
            arg = arguments.copy()
            arg.pop(0)
            use_all = False
            force = False
            if len(arg) > 0:
                if arg[0] == '--all':
                    arg.pop(0)
                    use_all = True
                if len(arg) > 0 and arg[0] == '--force':
                    arg.pop(0)
                    force = True
            arg = argstest.parse_command_line_init(arg)
            if not arg.valid:
                error_handler.call_error(Errors.WRONG_PARAM)
            if arg.hlp:
                argstest.print_init_help()
                exit(0)
            arg = arg.export(use_all)
            yaml_data = yaml.dump(arg, explicit_start=True, default_flow_style=False)
            yaml_file_path = Path(argstest.config_yaml)
            if yaml_file_path.exists():
                if force:
                    error_handler.call_warning("Rewriting " + bcolors.note(argstest.config_yaml))
                else:
                    error_handler.call_error(Errors.FATAL_ERROR, bcolors.note(argstest.config_yaml) +
                                             " already exists. If you want to rewrite it use --force option")
            with open(argstest.config_yaml, "w") as yaml_file:
                print(yaml_data, file=yaml_file)
            print(bcolors.success("Success: ") + argstest.config_yaml + ' created')

    elif arguments[0] == 'testinit':
        if len(arguments) > 1 and arguments[1] == '--json':
            arg = arguments.copy()
            arg.pop(0)
            arg.pop(0)
            use_all = False
            force = False
            if '-h' in arg:
                argstest.print_testinit_help()
                exit(0)
            if len(arg) > 0:
                if arg[0] == '--all':
                    arg.pop(0)
                    use_all = True
                if len(arg) > 0 and arg[0] == '--force':
                    arg.pop(0)
                    force = True
            arg = argstest.parse_command_line_init_test(arg)
            name = my_test.test_file_name_json
            if arg.name is not None:
                if arg.name[-1] != '/':
                    arg.name += '/'
                    makedirs(arg.name)
                name = arg.name + name
            arg = arg.export(use_all)
            json_data = json.dumps(arg, indent=4)
            json_file_path = Path(name)
            if json_file_path.exists():
                if force:
                    error_handler.call_warning("Rewriting " + bcolors.note(name))
                else:
                    error_handler.call_error(Errors.FATAL_ERROR, bcolors.note(name) +
                                             " already exists. If you want to rewrite it use --force option")
            with open(name, "w") as json_file:
                print(json_data, file=json_file)
            print(bcolors.success("Success: ") + name + ' created')

        else:
            if not yaml_support:
                error_handler.call_error(Errors.YAML_DOESNT_SUPPORT, "Can not export to yaml")
            arg = arguments.copy()
            arg.pop(0)
            use_all = False
            force = False
            if '-h' in arg:
                argstest.print_testinit_help()
                exit(0)
            if len(arg) > 0:
                if arg[0] == '--all':
                    arg.pop(0)
                    use_all = True
                if len(arg) > 0 and arg[0] == '--force':
                    arg.pop(0)
                    force = True
            arg = argstest.parse_command_line_init_test(arg)
            name = my_test.test_file_name_yaml
            if arg.name is not None:
                if arg.name[-1] != '/':
                    arg.name += '/'
                    makedirs(arg.name)
                name = arg.name + name
            arg = arg.export(use_all)
            yaml_data = yaml.dump(arg, explicit_start=True, default_flow_style=False)
            json_file_path = Path(name)
            if json_file_path.exists():
                if force:
                    error_handler.call_warning("Rewriting " + bcolors.note(name))
                else:
                    error_handler.call_error(Errors.FATAL_ERROR, bcolors.note(name) +
                                             " already exists. If you want to rewrite it use --force option")
            with open(name, "w") as yaml_file:
                print(yaml_data, file=yaml_file)
            print(bcolors.success("Success: ") + name + ' created')
Esempio n. 13
0
import my_test
import json
from bColors import bcolors
from errors import error_handler
from errors import Errors
from shutil import rmtree
from pathlib import Path
from importlib import util
from os import makedirs

spam_spec = util.find_spec("yaml")
found = spam_spec is not None
yaml_support = True
if not found:
    error_handler.warning("yaml module does not exists. Please install " +
                          bcolors.note(bcolors.bold("pyyaml")) + " package for using yaml module")
    yaml_support = False
else:
    import yaml

version = "2.3.0.2"


def lets_test(arguments):
    arguments = argstest.parse_args_test(arguments)
    if not arguments.valid:
        error_handler.call_error(Errors.WRONG_PARAM)
    if arguments.hlp:
        argstest.print_help()
        exit(0)
    elif arguments.version:
Esempio n. 14
0
import my_test
import json
from bColors import bcolors
from errors import error_handler
from errors import Errors
from shutil import rmtree
from pathlib import Path
from importlib import util
from os import makedirs

spam_spec = util.find_spec("yaml")
found = spam_spec is not None
yaml_support = True
if not found:
    error_handler.warning("yaml module does not exists. Please install " +
                          bcolors.note(bcolors.bold("pyyaml")) +
                          " package for using yaml module")
    yaml_support = False
else:
    import yaml

version = "2.3.0.2"


def lets_test(arguments):
    arguments = argstest.parse_args_test(arguments)
    if not arguments.valid:
        error_handler.call_error(Errors.WRONG_PARAM)
    if arguments.hlp:
        argstest.print_help()
        exit(0)