Exemple #1
0
def test_compiler_examples():

    loaded = load_schema(schema_path)
    assert isinstance(loaded, dict)

    # Assert is named correctly
    print("Getting version of %s" % schema_file)
    match = re.search(
        "%s-v(?P<version>[0-9]{1}[.][0-9]{1})[.]schema[.]json" % schema_name,
        schema_file,
    )
    print(match)
    assert match

    # Ensure we found a version
    assert match.groups()
    version = match["version"]

    invalids = os.path.join(compiler_schema_examples, "invalid")
    valids = os.path.join(compiler_schema_examples, "valid")
    print(invalids, valids)
    assert invalids
    assert valids

    invalid_recipes = os.listdir(invalids)
    valid_recipes = os.listdir(valids)

    assert invalid_recipes
    assert valid_recipes

    check_valid_recipes(valid_recipes, valids, loaded, version)
    check_invalid_recipes(invalid_recipes, invalids, loaded, version)
Exemple #2
0
def test_global_examples():
    """This validates all valid/invalid examples for global schema"""

    loaded = load_schema(schema_path)
    assert isinstance(loaded, dict)

    invalid_dir = os.path.abspath(os.path.join(global_schema_examples, "invalid"))
    valid_dir = os.path.abspath(os.path.join(global_schema_examples, "valid"))

    assert invalid_dir
    assert valid_dir

    invalid_recipes = os.listdir(invalid_dir)
    valid_recipes = os.listdir(valid_dir)

    assert invalid_recipes
    assert valid_recipes

    print(f"Detected Invalid Global Directory: {invalid_dir}")
    print(f"Detected Valid Global Directory: {valid_dir}")
    print(f"Invalid Recipes: {invalid_recipes}")
    print(f"Valid Recipes: {valid_recipes}")

    check_invalid_recipes(invalid_recipes, invalid_dir, loaded)
    check_valid_recipes(valid_recipes, valid_dir, loaded)
Exemple #3
0
def test_valid_config_schemas():

    valid_schema_dir = os.path.join(pytest_root, "examples", "config_schemas",
                                    "valid")
    schema_config = load_schema(DEFAULT_SETTINGS_SCHEMA)
    for schema in walk_tree(valid_schema_dir, ".yml"):
        example = load_recipe(os.path.abspath(schema))
        custom_validator(recipe=example, schema=schema_config)
Exemple #4
0
def check_settings(settings_path=None, executor_check=True, retrieve_settings=False):
    """Checks all keys in configuration file (settings/config.yml) are valid
    keys and ensure value of each key matches expected type. For some keys
    special logic is taken to ensure values are correct and directory path
    exists. If any error is found buildtest will terminate immediately.

    :param settings_path: Path to buildtest settings file
    :type settings_path: str, optional
    :param executor_check: boolean to control if executor checks are performed
    :type executor_check: bool
    :param retrieve_settings: return loaded buildtest settings that is validated by schema. By default, this method doesn't return anything other than validating buildtest settings
    :type retrieve_settings: bool
    :return: returns gracefully if all checks passes otherwise terminate immediately
    :rtype: exit code 1 if checks failed
    """

    user_schema = load_settings(settings_path)

    logger.debug(f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
    config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)

    logger.debug(f"Validating user schema with schema: {DEFAULT_SETTINGS_SCHEMA}")
    custom_validator(recipe=user_schema, schema=config_schema)
    logger.debug("Validation was successful")

    # only perform executor check if executor_check is True. This is default
    # behavior, this can be disabled only for regression test where executor check
    # such as slurm check are not applicable.
    if executor_check:

        # system = BuildTestSystem()

        slurm_executors = user_schema.get("executors", {}).get("slurm")
        lsf_executors = user_schema.get("executors", {}).get("lsf")
        cobalt_executors = user_schema.get("executors", {}).get("cobalt")

        if slurm_executors:
            validate_slurm_executors(slurm_executors)

        if lsf_executors:
            validate_lsf_executors(lsf_executors)

        if cobalt_executors:
            validate_cobalt_executors(cobalt_executors)

        if (
            user_schema.get("moduletool") != "N/A"
            and user_schema.get("moduletool") != system.system["moduletool"]
        ):

            raise BuildTestError(
                f"Cannot find modules_tool: {user_schema.get('moduletool')} from configuration, please confirm if you have environment-modules or lmod and specify the appropriate tool."
            )

    if retrieve_settings:
        return user_schema
Exemple #5
0
    def _validate(self):
        """This method validates the site configuration with schema"""

        logger.debug(
            f"Loading default settings schema: {DEFAULT_SETTINGS_SCHEMA}")
        config_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)

        logger.debug(
            f"Validating user schema with schema: {DEFAULT_SETTINGS_SCHEMA}")
        custom_validator(recipe=self.config, schema=config_schema)
        logger.debug("Validation was successful")
Exemple #6
0
def test_settings_examples():

    # load schema and ensure type is a dict
    recipe = load_schema(settings_schema)

    valid = os.path.join(settings_schema_examples, "valid")
    assert valid

    valid_recipes = os.listdir(valid)
    assert valid_recipes
    # check all valid recipes
    for example in valid_recipes:

        filepath = os.path.join(valid, example)
        print(f"Loading Recipe File: {filepath}")
        example_recipe = load_recipe(filepath)
        assert example_recipe

        print(f"Expecting Recipe File: {filepath} to be valid")
        custom_validator(recipe=example_recipe, schema=recipe)
Exemple #7
0
def test_script_examples(tmp_path):
    """the script test_organization is responsible for all the schemas
       in the root of the repository, under <schema>/examples.
       A schema specific test is intended to run tests that
       are specific to a schema. In this case, this is the "script"
       folder. Invalid examples should be under ./invalid/script.
    """

    print("Testing schema %s" % schema_file)
    print("schema_path:", schema_path)
    loaded = load_schema(schema_path)
    assert isinstance(loaded, dict)

    # Assert is named correctly
    print("Getting version of %s" % schema_file)
    match = re.search(
        "%s-v(?P<version>[0-9]{1}[.][0-9]{1})[.]schema[.]json" % schema_name,
        schema_file,
    )
    assert match

    # Ensure we found a version
    assert match.groups()
    version = match["version"]

    # Ensure a version folder exists with invalids
    print("Checking that invalids exist for %s" % schema_file)
    invalids = os.path.join(schema_examples, "invalid")
    valids = os.path.join(schema_examples, "valid")

    assert invalids
    assert valids

    invalid_recipes = os.listdir(invalids)
    valid_recipes = os.listdir(valids)

    assert invalid_recipes
    assert valid_recipes

    check_valid_recipes(valid_recipes, valids, loaded, version)
    check_invalid_recipes(invalid_recipes, invalids, loaded, version)
Exemple #8
0
def test_build_executor(tmp_path):

    settings_schema = load_schema(DEFAULT_SETTINGS_SCHEMA)
    example = load_recipe(DEFAULT_SETTINGS_FILE)
    custom_validator(recipe=example, schema=settings_schema)

    # Load BuildExecutor
    be = BuildExecutor(example)
    # We should have a total of 5 executors (local.bash, local.sh, local.csh, local.zsh, local.python)
    assert len(be.executors) == 5
    assert list(be.executors.keys()) == [
        "local.bash",
        "local.sh",
        "local.csh",
        "local.zsh",
        "local.python",
    ]

    # Each should have
    for name, executor in be.executors.items():
        assert hasattr(executor, "_settings")

    examples_dir = os.path.join(pytest_root, "examples", "buildspecs")
    for buildspec in os.listdir(examples_dir):
        buildspec = os.path.join(examples_dir, buildspec)
        try:
            bp = BuildspecParser(buildspec)
        except (SystemExit, ValidationError):
            continue

        bp_filters = {"tags": None}
        builders = Builder(bp=bp, filters=bp_filters, testdir=tmp_path)
        valid_builders = builders.get_builders()

        # build each test and then run it
        for builder in valid_builders:
            builder.build()
            be.run(builder)
            assert builder.metadata["result"]
Exemple #9
0
schema_table["types"] = ["script", "compiler"]
schema_table["names"] = [
    "global.schema.json",
    "definitions.schema.json",
    "settings.schema.json",
    "compiler-v1.0.schema.json",
    "script-v1.0.schema.json",
]
schema_table["versions"] = {}
schema_table["versions"]["script"] = ["1.0"]
schema_table["versions"]["compiler"] = ["1.0"]

schema_table["global.schema.json"] = {}
schema_table["global.schema.json"]["path"] = os.path.join(
    here, "global.schema.json")
schema_table["global.schema.json"]["recipe"] = load_schema(
    schema_table["global.schema.json"]["path"])
schema_table["script-v1.0.schema.json"] = {}
schema_table["script-v1.0.schema.json"]["path"] = os.path.join(
    here, "script-v1.0.schema.json")
schema_table["script-v1.0.schema.json"]["recipe"] = load_schema(
    schema_table["script-v1.0.schema.json"]["path"])
schema_table["compiler-v1.0.schema.json"] = {}
schema_table["compiler-v1.0.schema.json"]["path"] = os.path.join(
    here, "compiler-v1.0.schema.json")
schema_table["compiler-v1.0.schema.json"]["recipe"] = load_schema(
    schema_table["compiler-v1.0.schema.json"]["path"])

schema_table["definitions.schema.json"] = {}
schema_table["definitions.schema.json"]["path"] = os.path.join(
    here, "definitions.schema.json")