Exemplo n.º 1
0
def func_compiler_find(args=None):
    """This method implements ``buildtest config compilers find`` which detects
    new compilers based on module names defined in configuration. If system has
    Lmod we use Lmodule API to detect the compilers. For environment-modules we
    search for all modules in current ``$MODULEPATH``.
    """

    settings_file = resolve_settings_file()
    configuration = load_settings(settings_file)

    bc = BuildtestCompilers(debug=args.debug)
    bc.find_compilers()
    configuration["compilers"]["compiler"] = bc.compilers

    custom_validator(configuration,
                     schema_table["settings.schema.json"]["recipe"])

    print(f"Configuration File: {settings_file}")
    print("{:_<80}".format(""))
    print(
        yaml.safe_dump(configuration,
                       default_flow_style=False,
                       sort_keys=False))
    print("{:_<80}".format(""))
    print(f"Updating settings file:  {settings_file}")

    with open(settings_file, "w") as fd:
        yaml.safe_dump(configuration,
                       fd,
                       default_flow_style=False,
                       sort_keys=False)
Exemplo n.º 2
0
def test_valid_config_schemas():

    valid_schema_dir = os.path.join(pytest_root, "examples", "config_schemas",
                                    "valid")
    schema_config = load_schema(DEFAULT_SETTINGS_SCHEMA)
    for schema in walk_tree(valid_schema_dir, ".yml"):
        example = load_recipe(os.path.abspath(schema))
        custom_validator(recipe=example, schema=schema_config)
Exemplo n.º 3
0
def check_settings(settings_path=None, executor_check=True, retrieve_settings=False):
    """Checks all keys in configuration file (settings/config.yml) are valid
    keys and ensure value of each key matches expected type. For some keys
    special logic is taken to ensure values are correct and directory path
    exists. If any error is found buildtest will terminate immediately.

    :param settings_path: Path to buildtest settings file
    :type settings_path: str, optional
    :param executor_check: boolean to control if executor checks are performed
    :type executor_check: bool
    :param retrieve_settings: return loaded buildtest settings that is validated by schema. By default, this method doesn't return anything other than validating buildtest settings
    :type retrieve_settings: bool
    :return: returns gracefully if all checks passes otherwise terminate immediately
    :rtype: exit code 1 if checks failed
    """

    user_schema = load_settings(settings_path)

    logger.debug(f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
    config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)

    logger.debug(f"Validating user schema with schema: {DEFAULT_SETTINGS_SCHEMA}")
    custom_validator(recipe=user_schema, schema=config_schema)
    logger.debug("Validation was successful")

    # only perform executor check if executor_check is True. This is default
    # behavior, this can be disabled only for regression test where executor check
    # such as slurm check are not applicable.
    if executor_check:

        # system = BuildTestSystem()

        slurm_executors = user_schema.get("executors", {}).get("slurm")
        lsf_executors = user_schema.get("executors", {}).get("lsf")
        cobalt_executors = user_schema.get("executors", {}).get("cobalt")

        if slurm_executors:
            validate_slurm_executors(slurm_executors)

        if lsf_executors:
            validate_lsf_executors(lsf_executors)

        if cobalt_executors:
            validate_cobalt_executors(cobalt_executors)

        if (
            user_schema.get("moduletool") != "N/A"
            and user_schema.get("moduletool") != system.system["moduletool"]
        ):

            raise BuildTestError(
                f"Cannot find modules_tool: {user_schema.get('moduletool')} from configuration, please confirm if you have environment-modules or lmod and specify the appropriate tool."
            )

    if retrieve_settings:
        return user_schema
Exemplo n.º 4
0
    def _validate(self):
        """This method validates the site configuration with schema"""

        logger.debug(
            f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
        config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)

        logger.debug(
            f"Validating user schema with schema: {DEFAULT_SETTINGS_SCHEMA}")
        custom_validator(recipe=self.config, schema=config_schema)
        logger.debug("Validation was successful")
Exemplo n.º 5
0
def check_valid_recipes(recipes, valids, loaded):
    """This method validates all recipes found in tests/valid/global with global schema: global/global.schema.json"""

    for recipe in recipes:
        assert recipe
        assert re.search("(yml|yaml)$", recipe)
        recipe_path = os.path.join(valids, recipe)
        content = load_recipe(recipe_path)

        custom_validator(recipe=content, schema=loaded)
        print("Recipe File: %s should be valid" % recipe_path)
Exemplo n.º 6
0
    def _validate(self):
        """This method will validate the entire buildspec file with global schema
        and each test section with a sub-schema. The global validation ensures
        that the overall structure of the file is sound for further parsing.
        We load in the global.schema.json for this purpose.

        A buildspec is composed of one or more tests, each section is validated
        with a sub-schema. The ``type`` field is used for sub-schema lookup
        from schema library. Finally we validate loaded recipe with sub-schema.
        """

        self.logger.info(
            f"Validating {self.buildspec} with schema: {schema_table['global.schema.json']['path']}"
        )
        custom_validator(
            recipe=self.recipe, schema=schema_table["global.schema.json"]["recipe"]
        )

        self.schema_version = self.recipe.get("version", "latest")

        assert isinstance(self.recipe.get("buildspecs"), dict)

        # validate all test instances in 'buildspecs' property. The validation
        # consist of checking schema type, executor name and validating each section
        # with sub schema
        for test in self.recipe["buildspecs"].keys():

            self.logger.info(
                "Validating test - '%s' in recipe: %s" % (test, self.buildspec)
            )

            # the buildspec section must be an dict where test is defined. If
            # it's not a dict then we should raise an error.
            assert isinstance(self.recipe["buildspecs"].get(test), dict)

            self._check_schema_type(test)
            self._check_executor(test)

            self.schema_file = os.path.basename(
                schema_table[f"{self.schema_type}-v{self.schema_version}.schema.json"][
                    "path"
                ]
            )
            # validate test instance with sub schema
            custom_validator(
                recipe=self.recipe["buildspecs"][test],
                schema=schema_table[
                    f"{self.schema_type}-v{self.schema_version}.schema.json"
                ]["recipe"],
            )
Exemplo n.º 7
0
def check_invalid_recipes(recipes, invalids, loaded):
    """This method validates all recipes found in tests/invalid/global with global schema: global/global.schema.json"""

    for recipe in recipes:
        assert recipe
        assert re.search("(yml|yaml)$", recipe)

        recipe_path = os.path.join(invalids, recipe)
        content = load_recipe(recipe_path)

        with pytest.raises(ValidationError) as excinfo:
            custom_validator(recipe=content, schema=loaded)
        print(excinfo.type, excinfo.value)
        print("Recipe File: %s  should be invalid" % recipe_path)
Exemplo n.º 8
0
def check_valid_recipes(recipes, valids, loaded, version):
    for recipe in recipes:
        assert recipe
        assert re.search("(yml|yaml)$", recipe)
        recipe_path = os.path.join(valids, recipe)
        content = load_recipe(recipe_path)

        # Ensure version is correct in header
        assert content["version"] == version
        del content["version"]

        # For each section, assume folder type and validate
        for name in content["buildspecs"].keys():
            custom_validator(recipe=content["buildspecs"][name], schema=loaded)
            print("Testing %s from recipe %s should be valid" % (name, recipe))
Exemplo n.º 9
0
def test_settings_examples():

    # load schema and ensure type is a dict
    recipe = load_schema(settings_schema)

    valid = os.path.join(settings_schema_examples, "valid")
    assert valid

    valid_recipes = os.listdir(valid)
    assert valid_recipes
    # check all valid recipes
    for example in valid_recipes:

        filepath = os.path.join(valid, example)
        print(f"Loading Recipe File: {filepath}")
        example_recipe = load_recipe(filepath)
        assert example_recipe

        print(f"Expecting Recipe File: {filepath} to be valid")
        custom_validator(recipe=example_recipe, schema=recipe)
Exemplo n.º 10
0
def test_build_executor(tmp_path):

    settings_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)
    example = load_recipe(DEFAULT_SETTINGS_FILE)
    custom_validator(recipe=example, schema=settings_schema)

    # Load BuildExecutor
    be = BuildExecutor(example)
    # We should have a total of 5 executors (local.bash, local.sh, local.csh, local.zsh, local.python)
    assert len(be.executors) == 5
    assert list(be.executors.keys()) == [
        "local.bash",
        "local.sh",
        "local.csh",
        "local.zsh",
        "local.python",
    ]

    # Each should have
    for name, executor in be.executors.items():
        assert hasattr(executor, "_settings")

    examples_dir = os.path.join(pytest_root, "examples", "buildspecs")
    for buildspec in os.listdir(examples_dir):
        buildspec = os.path.join(examples_dir, buildspec)
        try:
            bp = BuildspecParser(buildspec)
        except (SystemExit, ValidationError):
            continue

        bp_filters = {"tags": None}
        builders = Builder(bp=bp, filters=bp_filters, testdir=tmp_path)
        valid_builders = builders.get_builders()

        # build each test and then run it
        for builder in valid_builders:
            builder.build()
            be.run(builder)
            assert builder.metadata["result"]