Example #1
0
    def check_prerequisites(tool_paths) -> None:
        if not os.path.exists(tool_paths.data_model_to_pymcell_binary):
            fatal_error(
                "Could not find data model to pymcell conversion tool '" +
                tool_paths.data_model_to_pymcell_binary + ".")

        TesterBase.check_prerequisites(tool_paths)
Example #2
0
    def check_prerequisites(tool_paths) -> None:
        if not os.path.exists(tool_paths.python_binary) and not os.path.exists(
                shutil.which(tool_paths.python_binary)):
            fatal_error("Could not find Python binary executable '" +
                        tool_paths.python_binary + "'.")

        TesterBase.check_prerequisites(tool_paths)
Example #3
0
def check_prerequisites(opts):
    if sys.version_info[0] < 3 or (sys.version_info[0] == 3
                                   and sys.version_info[1] < 5):
        # this is what cellblender was using, not sure, maybe just version 3.* suffices
        fatal_error("Required Python version is at least 3.5")

    # also check cmake (although it is not needed for all task types)
    opts.cmake_executable = cmake_builder.build_cmake_if_version_is_insufficient(
        opts.work_dir)
Example #4
0
    def run_check(self, check: CheckInfo, mcell_ec: int) -> int:

        res = FAILED_DIFF
        if check.test_type in [TEST_TYPE_DIFF_FILE_CONTENT, TEST_TYPE_FDIFF_FILE_CONTENT]:
            # exact file compare
            res = data_output_diff.compare_data_output_files(
                os.path.join('..', self.test_src_path, REF_NUTMEG_DATA_DIR, check.data_file),
                os.path.join(self.test_work_path, check.data_file),
                exact=(check.test_type == TEST_TYPE_DIFF_FILE_CONTENT),
                fdiff_args=self.extra_args.fdiff_args )
            self.nutmeg_log("Comparison result of '" + check.data_file + "': " + RESULT_NAMES[res], check.test_type)

        elif check.test_type in [TEST_TYPE_ZERO_COUNTS, TEST_TYPE_POSITIVE_OR_ZERO_COUNTS, 
                                 TEST_TYPE_POSITIVE_COUNTS, TEST_TYPE_COUNT_MINMAX]:
            res = self.check_counts(check)
            self.nutmeg_log("Comparison result of '" + check.data_file + "': " + RESULT_NAMES[res], check.test_type)
                
        elif check.test_type == TEST_TYPE_CHECK_SUCCESS:
            if mcell_ec == 0:
                self.nutmeg_log("MCell exit code is 0 as expected.", check.test_type)
                res = PASSED
            else:
                self.nutmeg_log("Expected exit code 0 but mcell returned " + str(mcell_ec), check.test_type)

        elif check.test_type == TEST_TYPE_CHECK_EXIT_CODE:
            if mcell_ec == check.exit_code:
                self.nutmeg_log("MCell exit code is " + str(check.exit_code) + " as expected.", check.test_type)
                res = PASSED
            else:
                self.nutmeg_log("Expected exit code " + str(check.exit_code) + " but mcell returned " + str(mcell_ec), check.test_type)

        elif check.test_type == TEST_TYPE_FILE_MATCH_PATTERN:
            res = self.check_match_pattern(check)
            self.nutmeg_log("Checking of pattern '" + check.match_pattern + "' in " + check.data_file + ": " + RESULT_NAMES[res], check.test_type)

        elif check.test_type == TEST_TYPE_UPDATE_REFERENCE:
            ref_dir = os.path.join(self.test_src_path, REF_NUTMEG_DATA_DIR)
            if not os.path.exists(ref_dir):
                os.mkdir(ref_dir)
            ref_file = os.path.join(self.test_work_path, check.data_file)
            self.nutmeg_log("Updating contents in " + ref_dir + " with " + ref_file + ".", check.test_type)
            shutil.copy(ref_file, ref_dir)
            res = NUTMEG_UPDATED_REFERENCE
            
        elif check.test_type in [TEST_TYPE_CHECK_EMPTY_FILES, TEST_TYPE_CHECK_NONEMPTY_FILES]:
            ref_file = os.path.join(self.test_work_path, check.data_file)
            sz = os.path.getsize(ref_file)
            if check.test_type == TEST_TYPE_CHECK_EMPTY_FILES:
                res = PASSED if sz == 0 else FAILED_NUTMEG_SPEC
            else:
                res = PASSED if sz != 0 else FAILED_NUTMEG_SPEC
                
        else:
            fatal_error("Unexpected check type " + TEST_TYPE_ID_TO_NAME[check.test_type])

        return res
Example #5
0
    def check_prerequisites(tool_paths: ToolPaths) -> None:
        if not os.path.exists(tool_paths.mcell_binary):
            fatal_error("Could not find executable '" +
                        tool_paths.mcell_binary + ".")

        data_output_diff.check_or_build_fdiff()

        # work dir, e.g. /nadata/cnl/home/ahusar/src/mcell_tests/work
        if not os.path.exists(tool_paths.work_path):
            os.mkdir(tool_paths.work_path)
    def check_prerequisites(tool_paths: ToolPaths) -> None:
        if not os.path.exists(tool_paths.bngl_to_data_model_script):
            fatal_error(
                "Could not find bngl to data model conversion script '" +
                tool_paths.bngl_to_data_model_script + ".")

        if not os.path.exists(tool_paths.data_model_to_mdl_script):
            fatal_error("Could not find data model conversion script '" +
                        tool_paths.data_model_to_mdl_script + ".")

        TesterBase.check_prerequisites(tool_paths)
Example #7
0
 def get_config(self, config, section, option, default=None):
     log.debug("Reading [%s]:%s from config" % (section, option))
     try:
         value = config.get(section, option)
         log.debug("Found value: %s" % value)
         return value
     except ConfigParser.NoSectionError, e:
         if default is not None:
             log.debug('Not found, using default: %s' % default)
             return default
         fatal_error('Error: %s' % e)
Example #8
0
    def check_counts(self, check: CheckInfo) -> int:
        data_file_path = os.path.join(self.test_work_path, check.data_file)
        # expecting that the data file contains only two columns: iteration and value
        res = PASSED
        try:
            with open(data_file_path, "r") as fin:
                for line in fin:
                    vals = line.split(' ')
                    if vals[0] == '#':
                        continue  # header
                    time = float(vals[0])
                    val = int(vals[1])

                    time_constraint_valid = True
                    if check.min_time is not None: 
                        time_constraint_valid = time >= check.min_time

                    if check.max_time is not None:
                        time_constraint_valid = time_constraint_valid and time <= check.max_time
                    
                    if check.test_type == TEST_TYPE_ZERO_COUNTS:
                        if time_constraint_valid and val != 0:
                            res = FAILED_NUTMEG_SPEC
                    elif check.test_type == TEST_TYPE_POSITIVE_OR_ZERO_COUNTS:
                        if time_constraint_valid and val < 0:
                            res = FAILED_NUTMEG_SPEC
                    elif check.test_type == TEST_TYPE_POSITIVE_COUNTS:
                        if time_constraint_valid and val <= 0:
                            res = FAILED_NUTMEG_SPEC
                    elif check.test_type == TEST_TYPE_COUNT_MINMAX:
                        if time_constraint_valid and \
                            ((check.count_minimum is not None and val < check.count_minimum) or \
                             (check.count_maximum is not None and val > check.count_maximum)):
                            res = FAILED_NUTMEG_SPEC
                    else:
                        fatal_error("Unknown test type in check_counts: " + str(check.test_type))\
                        
                    if res != PASSED:
                        self.report_check_counts_error(line, check)
                        return res
                        
                        
        except Exception as e:
            self.nutmeg_log(
                "Failed while parsing data file '" + data_file_path + "', exception " + str(e.args), check.test_type)
            return FAILED_NUTMEG_SPEC

        return PASSED
Example #9
0
def report_results(results: Dict) -> int:
    print("\n**** RESULTS ****")
    passed_count = 0
    skipped_count = 0
    known_fails_count = 0
    todo_tests_count = 0
    ignored_tests_count = 0
    failed_tests = []
    for key, value in results.items():
        if not value:
            fatal_error('Invalid result for ' + key)
        if value != IGNORED:
            print(RESULT_NAMES[value] + ": " + str(key))

        if value == PASSED:
            passed_count += 1
        elif value in FAIL_CODES:
            failed_tests.append((value, key))
        elif value == SKIPPED:
            skipped_count += 1
        elif value == KNOWN_FAIL:
            known_fails_count += 1
        elif value == TODO_TEST:
            todo_tests_count += 1
        elif value == IGNORED:
            ignored_tests_count += 1
        else:
            fatal_error("Invalid test result value " + str(value))

    res = 0
    if failed_tests:
        log("\n\nFAILED TESTS:")
        for test in sorted(failed_tests):
            print(RESULT_NAMES[test[0]] + ": " + str(test[1]))

        log("\n!! THERE WERE ERRORS !!")
        res = 1
    else:
        log("\n-- SUCCESS --")
        res = 0

    log("PASSED: " + str(passed_count) + ", FAILED: " +
        str(len(failed_tests)) + ", SKIPPED: " + str(skipped_count) +
        ", KNOWN FAILS: " + str(known_fails_count) + ", TODO TESTS: " +
        str(todo_tests_count) + ", IGNORED: " + str(ignored_tests_count))

    return res
Example #10
0
def run_single_test(work_parser_dir, test_app, test_file, extra_args):

    expected_ec = 0
    expected_outputs = []
    with open(test_file, 'r') as f:
        line = f.readline()

        if '# FAIL' in line:
            expected_ec = 1  # exit code 1
        elif '# OK' in line:
            expected_ec = 0
        else:
            utils.fatal_error(test_file +
                              ": First line must be either '# FAIL' or '# OK'")

        line = f.readline()
        while line.startswith('# OUTPUT:'):
            expected_outputs.append(line[len('# OUTPUT:'):].strip())
            line = f.readline()

    cmd = [test_app, test_file]
    cmd += extra_args
    log_file = os.path.join(work_parser_dir,
                            os.path.basename(test_file) + '.log')
    ec = utils.run(cmd, cwd=work_parser_dir, fout_name=log_file, verbose=False)

    if (ec != expected_ec):
        print("!FAIL " + test_file + ": exit code was " + str(ec) +
              ", expected " + str(expected_ec) + ", log: " + log_file)
        return False

    with open(log_file, 'r') as f:
        log_content = f.read()

    for output in expected_outputs:
        if output not in log_content:
            print("!FAIL " + test_file + ": did not find  '" + output +
                  "' in " + log_file)
            return False

    print(" PASS " + test_file)
    return True
Example #11
0
 def check_prerequisites(tool_paths: ToolPaths) -> None:
     if not os.path.exists(tool_paths.pymcell4_lib):
         fatal_error("Could not find library '" + tool_paths.pymcell4_lib + ".")
     if not tool_paths.bng2pl_script:
         fatal_error("Path to bionetgen must be set using -n.")
     if not os.path.exists(tool_paths.bng2pl_script):
         fatal_error("Could not find script '" + tool_paths.bng2pl_script + ".")
Example #12
0
def test_all(opts, install_dirs):
    # check if there is an extracted bundle already
    if not install_dirs:
        install_dirs = bundle.get_extracted_bundle_install_dirs(opts)

    # running testing as a new process
    tests_path = os.path.join(THIS_DIR, '..', REPO_NAME_MCELL_TESTS)
    test_cmd = [
        PYTHON_SYSTEM_EXECUTABLE,
        os.path.join(tests_path, RUN_TESTS_SCRIPT)
    ]
    if REPO_NAME_MCELL in install_dirs:
        test_cmd += ['-m', install_dirs[REPO_NAME_MCELL]]

    if REPO_NAME_CELLBLENDER in install_dirs:
        test_cmd += ['-b', install_dirs[REPO_NAME_CELLBLENDER]]

    if PYTHON_BLENDER_EXECUTABLE in install_dirs:
        test_cmd += ['-t', install_dirs[PYTHON_BLENDER_EXECUTABLE]]

    # the current MacOS VM crashes if too many tests are run in parallel
    if platform.system() == 'Darwin':
        test_cmd += ['-s']

    # clean the test data immediatelly after pass
    test_cmd += ['-e']

    with open(os.path.join(THIS_DIR, WORK_DIR_NAME, "test_command.sh"),
              'w') as f:
        f.write(' '.join(test_cmd))

    # for some reason the script dos not terminate without the shell=True
    ec = run(test_cmd,
             timeout_sec=TEST_ALL_TIMEOUT,
             cwd=tests_path,
             shell=True)
    if ec != 0:
        fatal_error("Testing failed")
def get_testcases(testcase_dirs):
    """Return the list of testcases to run, by recursively inspecting
    the contents of the directories provided in the testcase_dirs list.

    The returned list is sorted in alphabetical order, so as to make
    two runs of the same testcases for the same platform comparable.

    PARAMETERS
        testcase_dirs: A list of directory names inside which the testsuite
            should look for testcases to run.  If empty, then default to
            DEFAULT_TESTCASE_DIR if that directory exists.

    RETURN VALUE
        A list of testcases to run.
    """
    # Note: We use a dictionary to store the list of testcase as
    # we are building it as a way of eliminating duplicates...
    testcases = {}
    # If no testcase was specified on the command line, then try
    # the default testcase directory.
    if not testcase_dirs:
        if not os.path.isdir(DEFAULT_TESTCASE_DIR):
            fatal_error("No testcase specified")
        testcase_dirs = [DEFAULT_TESTCASE_DIR]
    for testcase_dir in testcase_dirs:
        if not os.path.isdir(testcase_dir):
            fatal_error("Invalid directory name: %s" % testcase_dir)
        for root, dirs, files in os.walk(testcase_dir):
            if TESTCASE_SCRIPT_NAME in files:
                testcases[os.path.realpath(root)] = None
    # One last sanity check: Make sure that we have at least one testcase.
    if not testcases:
        fatal_error("No valid testcase found. Aborting.")
    result = testcases.keys()
    result.sort()
    return result
Example #14
0
def get_dict_value(d: Dict, key: str, fname: str) -> str:
    if key not in d:
        fatal_error("Required field '" + key + "' not found in '" + fname +
                    "'.")
    res = d[key]
    return res
Example #15
0
    def get_config(self, config, section, option, default=None):
        log.debug("Reading [%s]:%s from config" % (section, option))
        try:
            value = config.get(section, option)
            log.debug("Found value: %s" % value)
            return value
        except ConfigParser.NoSectionError, e:
            if default is not None:
                log.debug('Not found, using default: %s' % default)
                return default
            fatal_error('Error: %s' % e)
        except ConfigParser.NoOptionError, e:
            if default is not None:
                log.debug('Not found, using default: %s' % default)
                return default
            fatal_error('Error: %s' % e)

    def dbinsert(self, DBObj):
        ses = self.session()
        ses.add(DBObj)

        try:
            ses.commit()
            ses.close()
        except exc.ResourceClosedError:
            # Si hay desconexion de la BD pasa por esta excepcion; no hay que hacer nada porque lo
            # reintenta con otra sesion del pool
            pass
        except exc.InvalidRequestError:
            # Ante algunos errores de conexion a BD, hay que llamar rollback explicitamente antes
            # de poder escribir a la BD
Example #16
0
 def check_prerequisites(tool_paths: ToolPaths) -> None:
     if not os.path.exists(tool_paths.pymcell4_lib):
         fatal_error("Could not find library '" + tool_paths.pymcell4_lib +
                     ".")
Example #17
0
        sys.exit(0)

    a1 = sys.argv[1]

    opts = Options()
    opts.use_private_repos = True
    if a1 == 'clone' or a1 == 'checkout':
        if argc == 3:
            opts.branch = sys.argv[2]

        print("Cloning or updating branch " + opts.branch + ".")
        repositories.get_or_update(opts)

    elif a1 == 'pull':
        if argc > 2:
            fatal_error("Command pull does not have any extra arguments")
        print("Pulling all repositories")
        repositories.pull(opts)

    elif a1 == 'push':
        if argc > 2:
            fatal_error("Command push does not have any extra arguments")
        print("Pushing all repositories")
        repositories.push(opts)

    elif a1 == 'reset-hard':
        if argc > 2:
            fatal_error("Command reset-hard does not have any extra arguments")
        print("Reseting all repositories")
        repositories.reset_hard(opts)
Example #18
0
def main():
    opts = Options()
    opts.process_opts()
    check_prerequisites(opts)
    print(opts)

    log("Top directory: " + opts.top_dir)

    #
    if opts.print_platform_info:
        print("Required blender + python archive name:")
        print("Base: " + opts.prebuilt_blender_w_python_base)
        print("Override: " + opts.prebuilt_blender_w_python_override)

        print("Resulting bundle name:")
        print(opts.result_bundle_archive_path)
        return

    # clean
    if opts.clean:
        if os.path.exists(opts.work_dir):
            log("Cleaning '" + opts.work_dir + "'")
            shutil.rmtree(opts.work_dir)
        else:
            log("Nothing to clean in '" + opts.work_dir + "'")
        sys.exit(0)

    # 1) get all the sources
    if opts.do_repos:
        repositories.get_or_update(opts)

    # 2) build
    # returns dictionary  repo name -> where it was built
    if opts.do_build:
        # generate version file
        repositories.create_version_file(opts)

        # keys are REPO_NAME_MCELL and REPO_NAME_CELLBLENDER
        install_dirs = build.build_all(opts)
    else:
        # testing will use defaults
        install_dirs = {}

    # 3) create bundle
    # overwrite install_dirs with new values
    if opts.do_bundle:
        if opts.only_cellblender_mcell:
            cellblender_mcell_plugin.create_package(opts)

            install_dirs = cellblender_mcell_plugin.extract_resulting_package(
                opts)

        elif opts.only_pypi_wheel:
            install_dirs = pypi_wheel.create_pypi_wheel(opts)

        else:
            bundle.create_bundle(opts)
            # also extract it right away if testing is needed
            install_dirs = bundle.extract_resulting_bundle(opts)

    # 4) test
    if opts.do_test:
        test_all(opts, install_dirs)

    # 5) store the release
    if opts.store_build:
        if opts.release_version != INTERNAL_RELEASE_NO_VERSION:
            # release
            if os.path.exists(opts.mcell_build_infrastructure_releases_dir):
                log("Copying release '" + opts.result_bundle_archive_path +
                    "'  to '" + opts.mcell_build_infrastructure_releases_dir +
                    "'.")
                shutil.copy(opts.result_bundle_archive_path,
                            opts.mcell_build_infrastructure_releases_dir)
            else:
                fatal_error("Could not find directory '" +
                            opts.mcell_build_infrastructure_releases_dir +
                            "', release was not stored but can be found as '" +
                            opts.result_bundle_archive_path + "'.")
        else:
            if os.path.exists(opts.mcell_build_infrastructure_builds_dir):
                log("Copying release '" + opts.result_bundle_archive_path +
                    "'  to '" + opts.mcell_build_infrastructure_builds_dir +
                    "'.")
                shutil.copy(opts.result_bundle_archive_path,
                            opts.mcell_build_infrastructure_builds_dir)
            else:
                fatal_error("Could not find directory '" +
                            opts.mcell_build_infrastructure_builds_dir +
                            "', release was not stored but can be found as '" +
                            opts.result_bundle_archive_path + "'.")

    log("--- All tasks finished successfully ---")
    def update_reference(self) -> None:
        seed_dir = self.get_seed_dir()
        viz_reference = os.path.join(self.test_src_path,
                                     get_ref_viz_data_dir(self.mcell4_testing),
                                     seed_dir)
        viz_res = os.path.join(self.test_work_path,
                               get_viz_data_dir(self.mcell4_testing), seed_dir)

        if os.path.exists(viz_res):
            # remove whole directory
            if os.path.exists(viz_reference):
                log("Cleaning old data in " + viz_reference)
                shutil.rmtree(viz_reference)

            # copy the first and the last viz data file
            log("New reference from " + viz_res)
            files = os.listdir(viz_res)
            if not files:
                fatal_error("There are no reference data in " + viz_res)

            files.sort()

            log("Updating reference " + viz_reference + " with data from " +
                viz_res)
            log("  File 1:" + files[0])
            log("  File 1:" + files[-1])

            if not os.path.exists(viz_reference):
                os.makedirs(viz_reference)
            shutil.copyfile(os.path.join(viz_res, files[0]),
                            os.path.join(viz_reference, files[0]))
            shutil.copyfile(os.path.join(viz_res, files[-1]),
                            os.path.join(viz_reference, files[-1]))

        # copy the whole react data files
        react_reference = os.path.join(
            self.test_src_path,
            get_ref_react_data_dir(self.mcell4_testing,
                                   self.extra_args.mcell4_32), seed_dir)
        react_res = os.path.join(self.test_work_path,
                                 get_react_data_dir(self.mcell4_testing),
                                 seed_dir)

        if os.path.exists(react_res):
            # remove whole directory
            if os.path.exists(react_reference):
                log("Cleaning old data in " + react_reference)
                shutil.rmtree(react_reference)

            # and update all files
            log("Updating reference " + react_reference + " with data from " +
                react_res)
            shutil.copytree(react_res, react_reference)

        # copy the whole dyn_geom data files
        dyn_geom_reference = os.path.join(self.test_src_path,
                                          REF_DYN_GEOM_DATA_DIR)
        dyn_geom_res = os.path.join(self.test_work_path, DYN_GEOM_DATA_DIR)

        if os.path.exists(dyn_geom_res):
            # remove whole directory
            if os.path.exists(dyn_geom_reference):
                log("Cleaning old data in " + dyn_geom_reference)
                shutil.rmtree(dyn_geom_reference)

            # and use every 100th file
            files = os.listdir(dyn_geom_res)
            if not files:
                fatal_error("There are no reference data in " + dyn_geom_res)
            files.sort()

            if not os.path.exists(dyn_geom_reference):
                os.makedirs(dyn_geom_reference)

            log("Updating reference " + dyn_geom_reference +
                " with data from " + dyn_geom_res)
            for i in range(0, len(files), 50):
                log("Updating reference file '" + files[i] + '"')
                shutil.copyfile(os.path.join(dyn_geom_res, files[i]),
                                os.path.join(dyn_geom_reference, files[i]))

        # and also check the .gdat files generated with mcellr mode
        mcellr_gdat_reference = os.path.join(self.test_src_path,
                                             REF_MCELLR_GDAT_DATA_DIR)
        mcellr_gdat_res = os.path.join(self.test_work_path,
                                       MCELLR_GDAT_DATA_DIR)

        print("PATH:" + mcellr_gdat_res)
        gdat_files = os.listdir(mcellr_gdat_res)
        print("F1:" + str(gdat_files))
        gdat_files = [f for f in gdat_files if f.endswith('.gdat')]
        print("F2:" + str(gdat_files))
        if gdat_files:
            # remove whole directory
            if os.path.exists(mcellr_gdat_reference):
                log("Cleaning old data in " + mcellr_gdat_reference)
                shutil.rmtree(mcellr_gdat_reference)

            log("Updating reference .gdat files " + mcellr_gdat_reference +
                " with data from " + mcellr_gdat_res)
            if not os.path.exists(mcellr_gdat_reference):
                os.makedirs(mcellr_gdat_reference)
            for f in gdat_files:
                log("Updating reference file '" + f + "'")
                shutil.copyfile(os.path.join(mcellr_gdat_res, f),
                                os.path.join(mcellr_gdat_reference, f))
Example #20
0
def load_test_config(config_path: str) -> List[TestSetInfo]:
    top_dict = toml.load(config_path)

    config_dir = os.path.dirname(config_path)

    res = []
    if KEY_SET in top_dict:
        sets_list = get_dict_value(top_dict, KEY_SET, config_path)
        for set in sets_list:
            category = get_dict_value(set, KEY_CATEGORY, config_path)
            test_set_name = get_dict_value(set, KEY_TEST_SET, config_path)
            class_name = get_dict_value(set, KEY_TESTER_CLASS, config_path)
            if class_name == 'TesterMdl':
                tester_class = TesterMdl
            elif class_name == 'TesterDataModel':
                tester_class = TesterDataModel
            elif class_name == 'TesterDataModelConverter':
                tester_class = TesterDataModelConverter
            elif class_name == 'TesterNutmeg':
                tester_class = TesterNutmeg
            elif class_name == 'TesterPython':
                tester_class = TesterPython
            elif class_name == 'TesterPymcell4':
                tester_class = TesterPymcell4
            elif class_name == 'TesterDataModelPymcell4':
                tester_class = TesterDataModelPymcell4
            elif class_name == 'TesterMdlDataModelPymcell4':
                tester_class = TesterMdlDataModelPymcell4
            elif class_name == 'TesterNutmegPymcell4':
                tester_class = TesterNutmegPymcell4
            elif class_name == 'TesterBnglMcell3R':
                tester_class = TesterBnglMcell3R
            elif class_name == 'TesterBnglPymcell4':
                tester_class = TesterBnglPymcell4
            elif class_name == 'TesterBnglPymcell4Export':
                tester_class = TesterBnglPymcell4Export
            elif class_name == 'TesterBnglDataModelPymcell4':
                tester_class = TesterBnglDataModelPymcell4
            elif class_name == 'TesterPymcell4ExportBng':
                tester_class = TesterPymcell4ExportBng
            elif class_name == 'TesterExternal':
                tester_class = TesterExternal
            elif class_name == 'BenchmarkMdl':
                tester_class = BenchmarkMdl
            elif class_name == 'BenchmarkBngl':
                tester_class = BenchmarkBngl
            elif class_name == 'BenchmarkMdlDataModelPymcell4':
                tester_class = BenchmarkMdlDataModelPymcell4
            elif class_name == 'ValidatorBngVsPymcell4':
                tester_class = ValidatorBngVsPymcell4
            elif class_name == 'ValidatorMcell3VsMcell4Mdl':
                tester_class = ValidatorMcell3VsMcell4Mdl
            else:
                fatal_error("Unknown tester class '" + class_name + "' in '" +
                            config_path + "'.")

            test_dir_suffix = ''
            if KEY_TEST_DIR_SUFFIX in set:
                test_dir_suffix = get_dict_value(set, KEY_TEST_DIR_SUFFIX,
                                                 config_path)

            args = []
            if KEY_ARGS in set:
                args = set[KEY_ARGS]
            res.append(
                TestSetInfo(category, test_set_name, tester_class,
                            test_dir_suffix, args))

    if KEY_INCLUDE in top_dict:
        includes_list = get_dict_value(top_dict, KEY_INCLUDE, config_path)

        for include in includes_list:
            file = get_dict_value(include, KEY_FILE, config_path)
            # load included file recursively
            included_fname = os.path.join(config_dir, file)
            included_test_set_infos = load_test_config(included_fname)

            if KEY_TEST_DIR_SUFFIX in include:
                # append test dir suffix
                for info in included_test_set_infos:
                    info.test_dir_suffix += get_dict_value(
                        include, KEY_TEST_DIR_SUFFIX, included_fname)

            res += included_test_set_infos

    return res
Example #21
0
def check_file_exists(name):
    if not os.path.exists(name):
        fatal_error("Required file '" + name + "' does not exist")