コード例 #1
0
    def run(self, event, context):

        gh_hook = json.loads(event['body'])
        repo = gh_hook['repository']['full_name']
        sha = gh_hook['pull_request']['head']['sha']

        try:
            hooks_yml = get_github().get_repo(repo, lazy=True).get_file_contents('.hooks.yml', ref=sha)
            logger.info("Fetched .hooks.yml from repo {}".format(repo))
        except github.GithubException:
            logger.error("Missig .hooks.yml on repo {}".format(repo))
            send_status(event, context, gh_hook, self.configname, 'success', ".hooks.yml not present in branch")
            return

        try:
            hook_config = yaml.safe_load(hooks_yml.decoded_content)
            logger.info("Basic yml validation passed")
        except Exception as e:
            logger.error("Failed to decode hook yaml: " + e.message)
            send_status(event, context, gh_hook, self.configname, 'failure', "Could not decode branch .hooks.yml")
            return

        logger.info("Advanced schema validation")
        c = Core(source_data=hook_config,
                 schema_files=[os.path.join(os.path.dirname(__file__), "..", "hooks.schema.yml")])
        c.validate(raise_exception=False)
        vc = len(c.validation_errors)
        if vc > 0:
            for err in c.validation_errors:
                logger.error(" - {}".format(err))
            send_status(event, context, gh_hook, self.configname, 'failure', ".hooks.yml has {} validation errors; see log".format(vc))
            return

        send_status(event, context, gh_hook, self.configname, 'success', ".hooks.yml present and valid")
コード例 #2
0
ファイル: test_unicode.py プロジェクト: hideaki-t/pykwalify
    def test_files_with_unicode_content_success(self):
        """
        These tests should pass with no exception raised
        """
        _pass_tests = [
            # Test mapping with unicode key and value
            u"1s.yaml",
            # Test unicode filename
            u"2så.yaml",
            # Test sequence with unicode keys
            u"3s.yaml",
        ]

        for passing_test_files in _pass_tests:
            f = unicode(self.f(passing_test_files))

            with open(f, "r") as stream:
                yaml_data = yaml.load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]

            try:
                print(u"Running test files: {}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
                compare(c.validation_errors, [], prefix="No validation errors should exist...")
            except Exception as e:
                print(u"ERROR RUNNING FILES: {}".format(f))
                raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(c.root_rule._schema_str, schema, prefix=u"Parsed rules is not correct, something have changed... files : {}".format(f))
コード例 #3
0
ファイル: presence.py プロジェクト: datawire/presence
def load_config(path):

    """validates, loads and configures the yaml document at the specified path

    :param path: the path to the file
    :return: the parsed yaml document
    :raises SchemaError: if the yaml document does not validate
    """

    validator = Core(source_file=path, schema_data=config_schema)
    validator.validate(raise_exception=True)

    pattern = re.compile(r'^(.*)<%= ENV\[\'(.*)\'\] %>(.*)$')
    yaml.add_implicit_resolver('!env_regex', pattern)

    def env_regex(loader, node):
        value = loader.construct_scalar(node)
        front, variable_name, back = pattern.match(value).groups()
        return str(front) + os.environ[variable_name] + str(back)

    yaml.add_constructor('!env_regex', env_regex)

    with open(path, 'r') as stream:
        doc = yaml.load(stream)
        return doc
コード例 #4
0
ファイル: generator.py プロジェクト: jboss-dockerfiles/dogen
    def _validate_cfg(self):
        """
        Open and parse the YAML configuration file and ensure it matches
        our Schema for a Dogen configuration.
        """
        # Fail early if descriptor file is not found
        if not os.path.exists(self.descriptor):
            raise Error("Descriptor file '%s' could not be found. Please make sure you specified correct path." % self.descriptor)

        schema_path = os.path.join(self.pwd, "schema", "kwalify_schema.yaml")
        schema = {}
        with open(schema_path, 'r') as fh:
            schema = yaml.safe_load(fh)

        if schema is None:
            raise Error("couldn't read a valid schema at %s" % schema_path)

        for plugin in self.plugins:
            plugin.extend_schema(schema)

        with open(self.descriptor, 'r') as stream:
            self.cfg = yaml.safe_load(stream)

        c = Core(source_data=self.cfg, schema_data=schema)
        try:
            c.validate(raise_exception=True)
        except SchemaError as e:
            raise Error(e)
コード例 #5
0
def incoming(event, context):
    """
    Validate the incoming event from the API gateway
    """

    print json.dumps(event)  # not logger.info() so it doesn't show up in logview itself :)

    # validate the secret
    if not validate_secret(event['headers'].get('X-Hub-Signature'), event['body']):
        return {"body": json.dumps({"error": "invalid signature"}), "statusCode": 403}

    # Get the hook info
    try:
        hookdata = json.loads(event['body'])
    except Exception:
        logger.error("Failed to decode json")
        return {"body": json.dumps({"error": "json decode failure"}), "statusCode": 500}

    # this will only work, for now, with hooks that include repo information
    if 'repository' not in hookdata:
        logger.error("No repository in the hook, no processing")
        return {"body": json.dumps({"error": "unsupported hook type; missing repository information"}), "statusCode": 501}

    repo = hookdata['repository']['full_name']

    # Now, we fetch the config from the repo to see what hooks we should trigger
    try:
        hooks_yml = get_github().get_repo(repo, lazy=True).get_file_contents('.hooks.yml')
        logger.info("Fetched .hooks.yml from repo {}".format(repo))
    except github.GithubException:
        logger.error("Missig .hooks.yml on repo {}".format(repo))
        return {"body": json.dumps({"error": "no .hooks.yml present"}), "statusCode": 501}

    try:
        hook_config = yaml.safe_load(hooks_yml.decoded_content)
    except Exception:
        logger.error("Failed to decode hook yaml")
        return {"body": json.dumps({"error": "hook yaml failure"}), "statusCode": 500}

    # Schema based validation
    c = Core(source_data=hook_config, schema_files=[os.path.join(os.path.dirname(__file__), "..", "hooks.schema.yml")])
    c.validate(raise_exception=False)
    if len(c.validation_errors) > 0:
        logger.error(c.validation_errors)
        return {"body": json.dumps({"error": "invalid hooks configuration"}), "statusCode": 501}

    ghevent = event['headers'].get('X-GitHub-Event', '')

    # Check hooks!
    logger.info("Qualifying checks:")
    for name, check in all_checks.get_all_checks().iteritems():
        check_config = check.qualify(ghevent, hookdata, hook_config)
        if check_config != False: # use boolean in case of "empty" configs
            logger.info("- {} passed qualify, invoking secondary call".format(name))
            invoke_secondary(name, check_config, event)
        else:
            logger.info("- {} did not qualify, skipping".format(name))

    # all done!
    return {"body": json.dumps({"message": "Thanks"}), "statusCode": 200}
コード例 #6
0
ファイル: config.py プロジェクト: ZhangAustin/attention-lvcsr
    def __init__(self, config_path, schema_path, config_changes):
        with open(config_path, 'rt') as src:
            config = read_config(src)
        make_config_changes(config, config_changes)

        self.multi_stage = 'stages' in config
        if self.multi_stage:
            ordered_changes = OrderedDict(
                sorted(config['stages'].items(),
                       key=lambda (k, v): v['number'],))
            self.ordered_stages = OrderedDict()
            for name, changes in ordered_changes.items():
                current_config = copy.deepcopy(config)
                del current_config['stages']
                del changes['number']
                merge_recursively(current_config, changes)
                self.ordered_stages[name] = current_config

        # Validate the configuration and the training stages
        with open(os.path.expandvars(schema_path)) as schema_file:
            schema = yaml.safe_load(schema_file)
            core = Core(source_data=config, schema_data=schema)
            core.validate(raise_exception=True)
            if self.multi_stage:
                for stage in self.ordered_stages.values():
                    core = Core(source_data=config, schema_data=schema)
                    core.validate(raise_exception=True)
        super(Configuration, self).__init__(config)
コード例 #7
0
ファイル: test_unicode.py プロジェクト: hideaki-t/pykwalify
    def test_files_with_unicode_content_failing(self):
        """
        These tests should fail with the specified exception
        """
        _fail_tests = [
            # Test mapping with unicode key and value but wrong type
            (u"1f.yaml", SchemaError),
            # Test unicode filename with validation errors
            (u"2få.yaml", SchemaError),
            # Test unicode data inside seq but wrong type
            (u"3f.yaml", SchemaError),
        ]

        for failing_test, exception_type in _fail_tests:
            # f = self.f(os.path.join("fail", failing_test))
            f = unicode(self.f(failing_test))

            with open(f, "r") as stream:
                yaml_data = yaml.load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]
                errors = yaml_data["errors"]

            try:
                print(u"Running test files: {}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
            except exception_type:
                pass  # OK
            else:
                raise AssertionError(u"Exception {} not raised as expected... FILES: {} : {}".format(exception_type, exception_type))

            compare(sorted(c.validation_errors), sorted(errors), prefix=u"Wrong validation errors when parsing files : {}".format(f))
コード例 #8
0
ファイル: ds_test.py プロジェクト: CingHu/lagopus
def check_schema_test(opts, file):
    logging.info("check schema...: %s" % file)
    try:
        c = Core(source_file=file, schema_files=[opts.yaml_schema])
        c.validate(raise_exception=True)
    except SchemaError, e:
        print "check schema: %-80s  %s" % (file, RET_ERROR)
        raise
コード例 #9
0
ファイル: presence.py プロジェクト: datawire/presence
def validate_result(data):
    try:
        validator = Core(source_data=data, schema_data=result_schema)
        validator.validate(raise_exception=True)
    except SchemaError as se:
        raise PresenceError(se)

    return data
コード例 #10
0
ファイル: _ext.py プロジェクト: piranha/osgameclones
def validate_with_schema(source_data, schema_file):
    core = Core(source_data=source_data, schema_files=[schema_file])
    try:
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(source_data, core.errors)
        else:
            raise error
コード例 #11
0
ファイル: config.py プロジェクト: rabbitstack/fibratus
 def load(self, validate=True):
     schema_file = os.path.join(sys._MEIPASS, 'schema.yml') \
         if hasattr(sys, '_MEIPASS') else self._default_schema_path
     try:
         self._yaml = anyconfig.load(self.path, ignore_missing=False)
     except FileNotFoundError:
         panic('ERROR - %s configuration file does not exist' % self.path)
     if validate:
         validator = Core(source_file=self.path, schema_files=[schema_file])
         validator.validate(raise_exception=True)
コード例 #12
0
def validate_config_yaml(package_name):
  """Check that an integration's config.yaml file has a valid schema

  Raises:
    Exception if the config.yaml file has an improper schema
  """
  resource_path = os.path.join('schema_files', 'config_schema.yaml')
  file_path = pkg_resources.resource_filename(resource_package, resource_path)
  schema_validator = Core(source_file=os.path.join(package_name, 'config.yaml'), schema_files=[file_path])
  schema_validator.validate(raise_exception=True)
コード例 #13
0
ファイル: lagopus_test.py プロジェクト: 1514louluo/lagopus
def check_schema_test(opts, file):
    logging.info("check schema...: %s" % file)
    try:
        c = Core(source_file=file, schema_files=[opts.yaml_schema])
        c.validate(raise_exception=True)
    except SchemaError as e:
        six.print_("check schema: %-80s  ERROR" % file)
        raise
    else:
        six.print_("check schema: %-80s  OK" % file)
コード例 #14
0
ファイル: testcore.py プロジェクト: huanghao/pykwalify
    def test_validation_error_but_not_raise_exception(self):
        """
        Test that if 'raise_exception=False' when validating that no exception is raised.

        Currently file 2a.yaml & 2b.yaml is designed to cause exception.
        """
        c = Core(source_file=self.f("cli", "2a.yaml"), schema_files=[self.f("cli", "2b.yaml")])
        c.validate(raise_exception=False)

        assert c.validation_errors == ["Value: 1 is not of type 'str' : /0", "Value: 2 is not of type 'str' : /1", "Value: 3 is not of type 'str' : /2"]
コード例 #15
0
ファイル: test_unicode.py プロジェクト: Grokzen/pykwalify
    def test_files_with_unicode_content_failing(self, tmpdir):
        """
        These tests should fail with the specified exception
        """
        # To trigger schema exception we must pass in a source file
        fail_data_2f_yaml = {
            'schema': {
                'type': 'map',
                'mapping': {
                    'msg': {
                        'type': 'int',
                    },
                }
            },
            'data': {
                'msg': 'Foobar',
            },
            'errors': ["Value 'Foobar' is not of type 'int'. Path: '/msg'"]
        }

        source_f = tmpdir.join(u"2få.json")
        source_f.write(yaml.safe_dump(fail_data_2f_yaml, allow_unicode=True))

        _fail_tests = [
            # Test mapping with unicode key and value but wrong type
            (u"1f.yaml", SchemaError),
            # Test unicode filename with validation errors.
            # It is not possible to package a file with unicode characters
            # like åäö in the filename in some python versions.
            # Mock a file with åäö during testing to properly simulate this again.
            (unicode(source_f), SchemaError),
            # Test unicode data inside seq but wrong type
            (u"3f.yaml", SchemaError),
        ]

        for failing_test, exception_type in _fail_tests:
            f = self.f(failing_test)

            with open(f, "r") as stream:
                yaml_data = yaml.safe_load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]
                errors = yaml_data["errors"]

            try:
                print(u"Running test files: {0}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
            except exception_type:
                pass  # OK
            else:
                raise AssertionError(u"Exception {0} not raised as expected... FILES: {1} : {2}".format(exception_type, exception_type))

            compare(sorted(c.validation_errors), sorted(errors), prefix=u"Wrong validation errors when parsing files : {0}".format(f))
コード例 #16
0
def test_component_data_valid():
    """ Check that the content of data fits with masonry schema v2 """
    validator = Core(source_data={}, schema_data=get_schema())
    for component_file in iglob('*/component.yaml'):
        print(component_file)
        source_data = yaml.load(open(component_file))
        validator.source = source_data
        try:
            validator.validate(raise_exception=True)
        except:
            assert False, "Error found in: {0}".format(component_file)
コード例 #17
0
ファイル: test_unicode.py プロジェクト: Grokzen/pykwalify
    def test_files_with_unicode_content_success(self, tmpdir):
        """
        These tests should pass with no exception raised
        """
        fail_data_2s_yaml = {
            'schema': {
                'type': 'map',
                'mapping': {
                    'msg': {
                        'type': 'int',
                    },
                }
            },
            'data': {
                'msg': 123,
            },
            'errors': []
        }

        source_f = tmpdir.join(u"2så.json")
        source_f.write(yaml.safe_dump(fail_data_2s_yaml, allow_unicode=True))

        _pass_tests = [
            # Test mapping with unicode key and value
            u"1s.yaml",
            # # Test unicode filename.
            # It is not possible to package a file with unicode characters
            # like åäö in the filename in some python versions.
            # Mock a file with åäö during testing to properly simulate this again.
            unicode(source_f),
            # Test sequence with unicode keys
            u"3s.yaml",
        ]

        for passing_test_files in _pass_tests:
            f = self.f(passing_test_files)

            with open(f, "r") as stream:
                yaml_data = yaml.safe_load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]

            try:
                print(u"Running test files: {0}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
                compare(c.validation_errors, [], prefix="No validation errors should exist...")
            except Exception as e:
                print(u"ERROR RUNNING FILES: {0}".format(f))
                raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(c.root_rule.schema_str, schema, prefix=u"Parsed rules is not correct, something have changed... files : {0}".format(f))
コード例 #18
0
def validate_fittings(file_path):
    with open(file_path, 'r') as document_stream:
        document = document_stream.read()
        for settings in yaml.load_all(document):
            logging.debug("Validating source data %s",
                         settings)

            c = Core(source_data=settings, schema_files=["schema.yaml"])
            try:
                c.validate(raise_exception=True)
            except pykwalify.errors.SchemaError as schema_error:
                logging.error("Validation of %s failed.", file_path)
                logging.error(schema_error)
コード例 #19
0
ファイル: snapsync.py プロジェクト: dtaylor84/pysnapsync
    def load(self, config_file):
        """Load configuration from config_file."""
        with resource_stream(__name__, 'config-schema.yaml') as schema_stream:
            schema = yaml.load(schema_stream)

        core = Core(source_file=config_file, schema_data=schema)
        self.config = core.validate(raise_exception=True)
コード例 #20
0
 def validate_config(self):
     try:
         c = Core(source_file="/Users/JohnS5/dev/replication_manager/src/webapp/bdr_app.yml",
             schema_files=['/Users/JohnS5/dev/replication_manager/src/webapp/schema.yml'])
         return c.validate(raise_exception=True)
     except Exception as e:
         print "LOG: ERROR: config file is not valid"
         print e
         return None
コード例 #21
0
ファイル: validate.py プロジェクト: gijzelaerr/kliko
def validate_parameters(parameters, kliko):
    """
    validate a set of parameters given a kliko definition

    args:
        parameters (dict): A structure that should follow the given kliko structure
        kliko (dict): A nested dict which defines the valid parameters in Kliko format

    returns:
        str: the parsed parameters

    raises:
        an exception if the string can't be parsed or is not in the defining valid parameters
    """
    schema = convert_to_parameters_schema(kliko)
    c = Core(source_data=parameters, schema_data=schema)
    c.validate(raise_exception=True)
    return True
コード例 #22
0
ファイル: validate.py プロジェクト: gijzelaerr/kliko
def validate_kliko(kliko, version=SCHEMA_VERSION):
    """
    validate a kliko yaml string

    args:
        kliko: a parsed kliko object

    returns:
        dict: a (nested) kliko structure

    raises:
        an exception if the string can't be parsed or is not in the following the Kliko schema
    """
    # first try to parse it, to make sure it is parsable

    schema_file = os.path.join(here, "schemas/%s.yml" % version)
    c = Core(source_data=kliko, schema_files=[schema_file])
    c.validate(raise_exception=True)
    return kliko
コード例 #23
0
ファイル: domain.py プロジェクト: rohitjun08/rasa_core
    def validate_domain_yaml(cls, yaml):
        """Validate domain yaml."""
        from pykwalify.core import Core

        log = logging.getLogger('pykwalify')
        log.setLevel(logging.WARN)

        schema_file = pkg_resources.resource_filename(__name__,
                                                      "schemas/domain.yml")
        source_data = utils.read_yaml_string(yaml)
        c = Core(source_data=source_data,
                 schema_files=[schema_file])
        try:
            c.validate(raise_exception=True)
        except SchemaError:
            raise ValueError("Failed to validate your domain yaml. "
                             "Make sure the file is correct, to do so"
                             "take a look at the errors logged during "
                             "validation previous to this exception. ")
コード例 #24
0
ファイル: _ext.py プロジェクト: DrMcCoy/osgameclones
def parse_data(site):
    base = op.join(op.dirname(__file__), 'games')
    data = []
    for fn in sorted(os.listdir(base)):
        if fn.endswith('.yaml'):
            data.extend(yaml.load(open(op.join(base, fn))))

    try:
        core = Core(source_data=data, schema_files=['schema.yaml'])
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(data, core.errors)
        else:
            raise error

    for item in data:
        parse_global_tags(site, item.get('meta', {}), 'genre')
        parse_items(site, item, 'remakes')
        parse_items(site, item, 'clones')
コード例 #25
0
ファイル: yaml.py プロジェクト: EdwardBetts/sismic
def import_from_yaml(statechart: Iterable[str], ignore_schema: bool=False, ignore_validation: bool=False) -> Statechart:
    """
    Import a statechart from a YAML representation.
    YAML is first validated against *sismic.io.yaml.SCHEMA_PATH*, and resulting statechart is validated
    using its *validate* method.

    :param statechart: string or any equivalent object
    :param ignore_schema: set to *True* to disable yaml validation.
    :param ignore_validation: set to *True* to disable statechart validation.
    :return: a *Statechart* instance
    """
    data = yaml.load(statechart)  # type: dict
    if not ignore_schema:
        checker = Core(source_data=data, schema_files=[SCHEMA_PATH])
        checker.validate(raise_exception=True)

    sc = import_from_dict(data)
    if not ignore_validation:
        sc.validate()
    return sc
コード例 #26
0
ファイル: io.py プロジェクト: tommens/sismic
def import_from_yaml(statechart: str, validate_yaml=True, validate_statechart=True) -> StateChart:
    """
    Import a statechart from a YAML representation.
    YAML is first validated against ``io.SCHEMA``.

    :param statechart: string or any equivalent object
    :param validate_yaml: set to ``False`` to disable yaml validation.
    :param validate_statechart: set to ``False`` to disable statechart validation
      (see ``model.StateChart.validate``).
    :return: a ``StateChart`` instance
    """
    statechart_data = yaml.load(statechart)
    if validate_yaml:
        checker = Core(source_data=statechart_data, schema_files=[SCHEMA_PATH])
        checker.validate(raise_exception=True)

    sc = _import_from_dict(statechart_data['statechart'])
    if validate_statechart:
        sc.validate()

    return sc
コード例 #27
0
ファイル: testcore.py プロジェクト: huanghao/pykwalify
    def test_multi_file_support(self):
        """
        This should test that multiple files is supported correctly
        """
        pass_tests = [
            # Test that include directive can be used at top level of the schema
            ([self.f("partial_schemas", "1s-schema.yaml"), self.f("partial_schemas", "1s-partials.yaml")], self.f("partial_schemas", "1s-data.yaml"), {'sequence': [{'include': 'fooone'}], 'type': 'seq'}),
            # # This test that include directive works inside sequence
            # ([self.f("33a.yaml"), self.f("33b.yaml")], self.f("33c.yaml"), {'sequence': [{'include': 'fooone'}], 'type': 'seq'}),
            # This test recursive schemas
            ([self.f("partial_schemas", "2s-schema.yaml"), self.f("partial_schemas", "2s-partials.yaml")], self.f("partial_schemas", "2s-data.yaml"), {'sequence': [{'include': 'fooone'}], 'type': 'seq'})
        ]

        failing_tests = [
            # Test include inside partial schema
            ([self.f("partial_schemas", "1f-schema.yaml"), self.f("partial_schemas", "1f-partials.yaml")], self.f("partial_schemas", "1f-data.yaml"), SchemaError, ['No partial schema found for name : fooonez : Existing partial schemas: fooone, foothree, footwo'])
        ]

        for passing_test in pass_tests:
            try:
                c = Core(source_file=passing_test[1], schema_files=passing_test[0])
                c.validate()
                compare(c.validation_errors, [], prefix="No validation errors should exist...")
            except Exception as e:
                print("ERROR RUNNING FILE: {} : {}".format(passing_test[0], passing_test[1]))
                raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(c.root_rule._schema_str, passing_test[2], prefix="Parsed rules is not correct, something have changed...")

        for failing_test in failing_tests:
            with pytest.raises(failing_test[2], msg="Test files: {} : {}".format(", ".join(failing_test[0]), failing_test[1])):
                c = Core(schema_files=failing_test[0], source_file=failing_test[1])
                c.validate()

            if not c.validation_errors:
                raise AssertionError("No validation_errors was raised...")

            compare(sorted(c.validation_errors), sorted(failing_test[3]), prefix="Wrong validation errors when parsing files : {} : {}".format(failing_test[0], failing_test[1]))
コード例 #28
0
ファイル: config_manage.py プロジェクト: msauria/galaxy
def _validate(args, app_desc):
    path = _find_config(args, app_desc)
    # Allow empty mapping (not allowed by pykawlify)
    raw_config = _order_load_path(path)
    if raw_config.get(app_desc.app_name, None) is None:
        raw_config[app_desc.app_name] = {}
        config_p = tempfile.NamedTemporaryFile(delete=False, suffix=".yml")
        _ordered_dump(raw_config, config_p)
        config_p.flush()
        path = config_p.name

    fp = tempfile.NamedTemporaryFile(delete=False, suffix=".yml")
    _ordered_dump(app_desc.schema.raw_schema, fp)
    fp.flush()
    name = fp.name
    if Core is None:
        raise Exception("Cannot validate file, pykwalify is not installed.")
    c = Core(
        source_file=path,
        schema_files=[name],
    )
    c.validate()
コード例 #29
0
ファイル: configurator.py プロジェクト: smarr/ReBench
def load_config(file_name):
    """
    Load the file, verify that it conforms to the schema,
    and return the configuration.
    """
    import yaml
    from pykwalify.core import Core
    from pykwalify.errors import SchemaError

    # Disable most logging for pykwalify
    import logging
    logging.getLogger('pykwalify').setLevel(logging.CRITICAL)
    logging.getLogger('pykwalify').addHandler(logging.NullHandler())

    try:
        with open(file_name, 'r') as conf_file:
            data = yaml.safe_load(conf_file)
            validator = Core(
                source_data=data,
                schema_files=[dirname(__file__) + "/rebench-schema.yml"])
            try:
                validator.validate(raise_exception=True)
            except SchemaError as err:
                errors = [escape_braces(val_err) for val_err in validator.validation_errors]
                raise UIError(
                    "Validation of " + file_name + " failed.\n{ind}" +
                    "\n{ind}".join(errors) + "\n", err)
            return data
    except IOError as err:
        if err.errno == 2:
            assert err.strerror == "No such file or directory"
            raise UIError("The requested config file (%s) could not be opened. %s.\n"
                          % (file_name, err.strerror), err)
        raise UIError(str(err) + "\n", err)
    except yaml.YAMLError as err:
        raise UIError("Parsing of the config file "
                      + file_name + " failed.\nError " + str(err) + "\n", err)
コード例 #30
0
    def validate_deploy(self):
        """
        Validates the deployment yaml file with the schema
        :raises pykwalify.errors.SchemaError: if validation fails
        :raises pykwalify.errors.CoreError: for other type of errors
        """
        logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name)
        if not self.deploy_file:
            raise AssertionError

        try:
            c = Core(source_file=self.deploy_file,
                     schema_files=[self.schema_file],
                     extensions=[self.schema_functions_file])
            c.validate()
        except CoreError as e:
            # Most probably there is something wrong with the source files
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + e.msg)
            raise
        except SchemaError as e:
            # The deploy file is not valid
            logging.debug(self.__class__.__name__ + ': ' + sys._getframe().f_code.co_name + ': ' + e.msg)
            print("The deployment file at '%s' is not valid." % (self.deploy_file_host,))
            raise
コード例 #31
0
    def __init__(self,
                 config_file,
                 options=None,
                 layer_name=None,
                 base_config=None):
        self._close_actions = []
        self._layers_geoms = {}
        self.error_files_ = {}

        if options is not None:
            if not hasattr(options, 'bbox'):
                options.bbox = None
            if not hasattr(options, 'zoom'):
                options.zoom = None
            if not hasattr(options, 'test'):
                options.test = None
            if not hasattr(options, 'near'):
                options.near = None
            if not hasattr(options, 'time'):
                options.time = None
            if not hasattr(options, 'geom'):
                options.geom = True

        self._configure_logging(
            options, '%(levelname)s:%(name)s:%(funcName)s:%(message)s')

        with open(config_file) as f:
            self.config = {}
            self.config.update({} if base_config is None else base_config)
            self.config.update(yaml.safe_load(f))
        self.options = options
        if 'defaults' in self.config:
            del self.config['defaults']
        # generate base structure
        if 'cost' in self.config:
            if 's3' not in self.config['cost']:
                self.config['cost']['s3'] = {}
            if 'cloudfront' not in self.config['cost']:
                self.config['cost']['cloudfront'] = {}
            if 'sqs' not in self.config['cost']:
                self.config['cost']['sqs'] = {}
        if 'generation' not in self.config:
            self.config['generation'] = {}
        for gname, grid in sorted(self.config.get('grids', {}).items()):
            if grid is not None:
                grid["name"] = gname
        for cname, cache in sorted(self.config.get('caches', {}).items()):
            if cache is not None:
                cache["name"] = cname
        for lname, layer in sorted(self.config.get('layers', {}).items()):
            if layer is not None:
                layer["name"] = lname

        c = Core(
            source_data=self.config,
            schema_data=yaml.safe_load(
                pkgutil.get_data("tilecloud_chain", "schema.yaml")),
        )
        path_ = ''
        try:
            self.config = c.validate()

            for name, cache in self.config['caches'].items():
                if cache['type'] == 's3':
                    c = Core(
                        source_data=cache,
                        schema_data=yaml.safe_load(
                            pkgutil.get_data("tilecloud_chain",
                                             "schema-cache-s3.yaml")),
                    )
                    path_ = 'caches/{}'.format(name)
                    self.config['caches'][name] = c.validate()
            for name, layer in self.config['layers'].items():
                c = Core(
                    source_data=layer,
                    schema_data=yaml.safe_load(
                        pkgutil.get_data(
                            "tilecloud_chain",
                            "schema-layer-{}.yaml".format(layer['type']))),
                )
                path_ = 'layers/{}'.format(name)
                self.config['layers'][name] = c.validate()

        except SchemaError:
            logger.error("The config file '{}' is invalid.\n{}".format(
                config_file, "\n".join(
                    sorted([
                        " - {}: {}".format(
                            os.path.join('/', path_,
                                         re.sub('^/', '', error.path)),
                            re.sub(" Path: '{path}'", '',
                                   error.msg).format(**error.__dict__))
                        for error in c.errors
                    ]))))
            exit(1)
        except NotSequenceError as e:  # pragma: no cover
            logger.error("The config file '{}' is invalid.\n - {}".format(
                config_file, e.msg))
            exit(1)
        except NotMappingError as e:  # pragma: no cover
            logger.error("The config file '{}' is invalid.\n - {}".format(
                config_file, e.msg))
            exit(1)

        error = False
        self.grids = self.config['grids']
        for gname, grid in sorted(self.grids.items()):
            if 'resolution_scale' in grid:
                scale = grid['resolution_scale']
                for r in grid['resolutions']:
                    if r * scale % 1 != 0.0:
                        logger.error(
                            "The resolution {} * resolution_scale {} is not an integer."
                            .format(r, scale))
                        error = True
            else:
                grid['resolution_scale'] = self._resolution_scale(
                    grid['resolutions'])

            srs = int(grid["srs"].split(":")[1])
            if 'proj4_literal' not in grid:
                if srs == 3857:  # pragma: no cover
                    grid['proj4_literal'] = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 ' \
                        '+x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over'
                elif srs == 21781:
                    grid['proj4_literal'] = \
                        '+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 ' \
                        '+x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 ' \
                        '+units=m +no_defs'
                elif srs == 2056:  # pragma: no cover
                    grid['proj4_literal'] = \
                        '+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 ' \
                        '+x_0=2600000 +y_0=1200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 ' \
                        '+units=m +no_defs'
                else:  # pragma: no cover
                    grid['proj4_literal'] = '+init={}'.format(grid['srs'])

            scale = grid['resolution_scale']
            grid['obj'] = FreeTileGrid(
                resolutions=[int(r * scale) for r in grid['resolutions']],
                scale=scale,
                max_extent=grid['bbox'],
                tile_size=grid['tile_size']) if not error else None

        self.layers = self.config['layers']
        for lname, layer in sorted(self.layers.items()):
            layer['grid_ref'] = self.grids[
                layer['grid']] if not error else None
            self.layers[lname] = layer
            if 'geoms' not in layer:
                layer['geoms'] = []
            if 'params' not in layer and layer['type'] == 'wms':
                layer['params'] = {}
            if 'headers' not in layer and layer['type'] == 'wms':
                layer['headers'] = {
                    'Cache-Control': 'no-cache, no-store',
                    'Pragma': 'no-cache',
                }
            if 'dimensions' not in layer:
                layer['dimensions'] = []
            if layer['type'] == 'mapnik' and \
                    layer['output_format'] == 'grid' and \
                    layer.get('meta', False):  # pragma: no cover
                logger.error(
                    "The layer '{}' is of type Mapnik/Grid, that can't support matatiles."
                    .format(lname))
                error = True

        self.caches = self.config['caches']
        self.metadata = self.config.get('metadata')
        self.provider = self.config.get('provider')

        if error:
            exit(1)

        if 'log_format' in self.config.get('generation', {}):
            self._configure_logging(options,
                                    self.config['generation']['log_format'])

        if options is not None and options.zoom is not None:
            error_message = (
                "The zoom argument '%s' has incorrect format, "
                "it can be a single value, a range (3-9), a list of values (2,5,7)."
            ) % options.zoom
            if options.zoom.find('-') >= 0:
                r = options.zoom.split('-')
                if len(r) != 2:  # pragma: no cover
                    logger.error(error_message)
                    error = True
                try:
                    options.zoom = range(int(r[0]), int(r[1]) + 1)
                except ValueError:  # pragma: no cover
                    logger.error(error_message, exc_info=True)
                    error = True
            elif options.zoom.find(',') >= 0:
                try:
                    options.zoom = [int(z) for z in options.zoom.split(',')]
                except ValueError:  # pragma: no cover
                    logger.error(error_message, exc_info=True)
                    error = True
            else:
                try:
                    options.zoom = [int(options.zoom)]
                except ValueError:  # pragma: no cover
                    logger.error(error_message, exc_info=True)
                    error = True

        if error:  # pragma: no cover
            exit(1)

        if layer_name and not error:
            self.init_layer(self.layers[layer_name], options)
コード例 #32
0
ファイル: main.py プロジェクト: caiorasec/netprobify
    def load_conf(self, schema_file="schema_config.yaml"):
        """Load the configuration from a config file.

        Keyword arguments:
        schema_file -- relative/absolute path and filename for yaml schema
        """
        log.debug("Loading configuration")

        # cleaning targets list
        self.list_groups = []
        self.list_targets = []
        self.list_special_targets = []
        self.list_target_name = []
        self.global_vars = {}
        self.first_iter = True

        # instantiate a new generator
        self.instantiate_generator()

        # validate yaml config with the schema
        schema = pkg_resources.resource_filename(__name__, schema_file)
        yaml_validator = Core(source_file=self.config_file,
                              schema_files=[schema])
        yaml_validator.validate(raise_exception=True)

        # we load the configuration from the file
        with open(self.config_file, "r") as conf_file:
            # load as a yaml
            conf = yaml.safe_load(conf_file)

        # get global variables
        self.global_vars = conf["global"]

        # setting logging level
        log.setLevel(self.global_vars["logging_level"])

        # setting default percentile values if needed
        if self.global_vars.get("percentile") is None:
            self.global_vars["percentile"] = [95, 50]

        # get groups
        for group_name in conf["groups"]:
            group = conf["groups"][group_name]
            self.list_groups.append(
                Group(
                    name=group_name,
                    src_ipv4=group.get("src_ipv4", group.get("src_ip")),
                    src_ipv6=group.get("src_ipv6"),
                    src_subnet_ipv4=group.get("src_subnet_ipv4"),
                    src_subnet_ipv6=group.get("src_subnet_ipv6"),
                    src_port_a=group.get("src_port_a", 65000),
                    src_port_z=group.get("src_port_z", 65001),
                    ip_payload_size=group.get("ip_payload_size"),
                    dscp=group.get("dscp", 0),
                    permit_target_auto_register=group.get(
                        "permit_target_auto_register", True),
                ))

        # check targets are set
        if not conf.get("targets"):
            return

        # get target list
        for target_name in conf["targets"]:
            target = conf["targets"][target_name]

            if not target.get("address_family"):
                try:
                    ip = ip_address(target.get("destination"))
                    target["address_family"] = "ipv{}".format(ip.version)
                except ValueError:
                    log.debug(
                        "was not able to detect address-family from destination"
                        ", setting to default (%s)",
                        DEFAULT_ADDRESS_FAMILY,
                    )
                    target["address_family"] = DEFAULT_ADDRESS_FAMILY

            if target_name in self.list_target_name:
                log.warning("Duplicate target name %s", target_name)
                APP_TARGET_NAME_DUP.labels(target_name=target_name).inc(1)
            else:
                self.list_target_name.append(target_name)

            # manage group association
            target_groups = set()

            # we register to all group if allowed
            if target.get("auto_register_to_groups", True):
                for grp in self.list_groups:
                    if grp.permit_target_auto_register:
                        target_groups.add(grp.name)

            # we explicitly register to a group
            for grp in target.get("explicit_groups",
                                  {}).get("register_to", []):
                if grp in conf["groups"]:
                    target_groups.add(grp)
                else:
                    log.warning(
                        "Trying to associate '%s' to an inexistant group: %s",
                        target_name, grp)

            # we remove the target from a group
            for grp in target.get("explicit_groups",
                                  {}).get("exclude_from", []):
                try:
                    target_groups.remove(grp)
                except Exception:
                    log.info("Failed to remove target %s from %s", target_name,
                             grp)

            # create target objects
            self.load_target_conf(target, target_name, target_groups)

            log.debug("Target %s created", target_name)

            if len(target_groups) == 0 and target["type"] != "iperf":
                log.warning("Target %s disabled: not associated to any group",
                            target_name)
コード例 #33
0
 def validate_config_format(config_file, config_schema):
     schema = os.path.abspath(config_schema)
     c = PyKwalify(source_file=config_file, schema_files=[schema])
     c.validate(raise_exception=True)
コード例 #34
0
def validate(f):
    logger.info('Validating {}'.format(f.name))
    c = Core(source_file=f.name, schema_files=[resolve_from_site_packages('schema.yaml')])
    c.validate(raise_exception=True)
    click.echo("Finished validating: {}".format(f.name))
    click.echo("Finished validating: OK")
コード例 #35
0
 def test_scenario_02(self):
     c = Core(source_file="wetest/tests/scenario_example02.yaml",
             schema_files=["wetest/resources/scenario_schema.yaml"])
     c.validate()
コード例 #36
0
def validate_metadata_contents(metadata, filepath, cache):
    # Initialize output
    is_metadata_error = False
    metadata_error_output = []

    core = Core(source_file=filepath, schema_files=["schema.yml"])
    core.validate(raise_exception=False, silent=True)

    if len(core.validation_errors) > 0:
        metadata_error_output.extend(
            ['METADATA_ERROR: %s' % err for err in core.validation_errors])
        is_metadata_error = True

    pat_model = re.compile(r"metadata-(.+)\.txt")
    model_name_file = re.findall(pat_model, os.path.basename(filepath))[0]
    # print(f"model_name_file: {model_name_file} \t\t filepath: {filepath}")

    # This is a critical error and hence do not run further checks.
    if 'model_abbr' not in metadata:
        metadata_error_output.extend([
            'METADATA_ERROR: model_abbr key not present in the metadata file'
        ])
        is_metadata_error = True
        return is_metadata_error, metadata_error_output

    if model_name_file != metadata['model_abbr']:
        metadata_error_output.append(
            f"METADATA_ERROR: Model abreviation in metadata inconsistent with folder name for model_abbr={metadata['model_abbr']} as specified in metadata. NOTE: model name on file is: {model_name_file}"
        )
        is_metadata_error = True
    metadata['team_abbr'] = metadata['model_abbr'].split('-')[0]
    # Check if every team has only one `team_model_designation` as `primary`
    if 'team_abbr' in metadata.keys():
        # add designated primary model acche entry to the cache if not present
        if DESIGNATED_MODEL_CACHE_KEY not in cache:
            cache[DESIGNATED_MODEL_CACHE_KEY] = []

        # if the current models designation is primary AND the team_name is already present in the cache, then report error
        if metadata['team_abbr'] in cache[
                DESIGNATED_MODEL_CACHE_KEY] and metadata[
                    'team_model_designation'] == 'primary':
            is_metadata_error = True
            metadata_error_output.append(
                'METADATA ERROR: %s has more than 1 model designated as \"primary\"'
                % (metadata['team_abbr']))
        # else if the current model designation is "primary", then add it to the cache
        elif metadata['team_model_designation'] == 'primary':
            cache[DESIGNATED_MODEL_CACHE_KEY].append(metadata['team_abbr'])

    # if `this_model_is_an_emnsemble` is rpesent, show a warning.

    # Check for Required Fields
    required_fields = [
        'team_name', 'team_abbr', 'model_name', 'model_contributors',
        'model_abbr', 'website_url', 'license', 'team_model_designation',
        'methods'
    ]
    # required_fields = ['team_name', 'team_abbr', 'model_name', 'model_abbr',\
    #                        'methods', 'team_url', 'license', 'include_in_ensemble_and_visualization']

    # for field in required_fields:
    #     if field not in metadata.keys():
    #         is_metadata_error = True
    #         metadata_error_output += ["METADATA ERROR: %s missing '%s'" % (filepath, field)]

    # Check methods character length (warning not error)
    # if 'methods' in metadata.keys():
    #     methods_char_lenth = len(metadata['methods'])
    #     if methods_char_lenth > 200:
    #         metadata_error_output += [
    #             "METADATA WARNING: %s methods is too many characters (%i should be less than 200)" %
    #             (filepath, methods_char_lenth)]

    # Check if forecast_startdate is date
    if 'forecast_startdate' in metadata.keys():
        forecast_startdate = str(metadata['forecast_startdate'])
        try:
            dateutil.parser.parse(forecast_startdate)
            is_date = True
        except ValueError:
            is_date = False
        if not is_date:
            is_metadata_error = True
            metadata_error_output += [
                "METADATA ERROR: %s forecast_startdate %s must be a date and should be in YYYY-MM-DD format"
                % (filepath, forecast_startdate)
            ]

    # Check if this_model_is_an_ensemble and this_model_is_unconditional are boolean
    boolean_fields = [
        'this_model_is_an_ensemble', 'this_model_is_unconditional',
        'include_in_ensemble_and_visualization'
    ]
    possible_booleans = ['true', 'false']
    for field in boolean_fields:
        if field in metadata.keys():
            if metadata[field] not in possible_booleans:
                is_metadata_error = True
                metadata_error_output += [
                    "METADATA ERROR: %s '%s' field must be lowercase boolean (true, false) not '%s'"
                    % (filepath, field, metadata[field])
                ]

    # Validate team URLS
    regex = re.compile(
        r'^(?:http|ftp)s?://'  # http:// or https://
        r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
        r'localhost|'  # localhost...
        r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ...or ip
        r'(?::\d+)?'  # optional port
        r'(?:/?|[/?]\S+)$',
        re.IGNORECASE)

    # if 'team_url' in metadata.keys():
    #     if re.match(regex, str(metadata['team_url'])) is None:
    #         is_metadata_error = True
    #         metadata_error_output += [
    #             "METADATA ERROR: %s 'team_url' field must be a full URL (https://www.example.com) '%s'" %
    #             (filepath, metadata[field])]

    # Validate licenses
    license_df = pd.read_csv('accepted-licenses.csv')
    accepted_licenses = list(license_df['license'])
    if 'license' in metadata.keys():
        if metadata['license'] not in accepted_licenses:
            is_metadata_error = True
            metadata_error_output += [
                "METADATA ERROR: %s 'license' field must be in `./code/accepted-licenses.csv` 'license' column '%s'"
                % (filepath, metadata['license'])
            ]
    return is_metadata_error, metadata_error_output
コード例 #37
0
    def validate_with(schema, target, raise_exception=True):
        validator = Core(source_data=target,
                         schema_files=[f"{schema_path}/{schema}.yml"])

        return validator.validate(raise_exception=raise_exception)
コード例 #38
0
#!/usr/bin/env python

import glob
from pykwalify.core import Core
import pykwalify

exitCode = 0

pykwalify.init_logging(0)

for file in glob.glob("radar/**/*.yaml"):
    c = Core(source_file=file, schema_files=["src/radar_entry.schema.yaml"])
    try:
        c.validate(raise_exception=True)
    except Exception as e:
        print("ERROR - " + file + "\n" + e.msg)
        exitCode += 1

for file in glob.glob("radar/**/*.yml"):
    print("ERROR - " + file + "\nIncorrect extension, rename to '.yaml'")
    exitCode += 1

if exitCode > 0:
    print(str(exitCode) + " validation error(s).")

exit(exitCode)
コード例 #39
0
ファイル: parser.py プロジェクト: xwalls/popper
    def parse(
        file=None,
        wf_data=None,
        step=None,
        skipped_steps=[],
        substitutions=[],
        allow_loose=False,
        immutable=True,
    ):
        """Returns an immutable workflow structure (a frozen Box) with 'steps' and
        'options' properties. See WorkflowParser._wf_schema above for their structure.
        If immutable=False is given, the returned object representing the workflow
        can be modified (an "unfrozen" ``box.Box``).
        """

        if not file and not wf_data:
            log.fail("Expecting 'file' or 'wf_data'")

        if file:
            if wf_data:
                log.fail("Expecting only one of 'file' and 'wf_data'")

            if not os.path.exists(file):
                log.fail(f"File {file} was not found.")

            if not file.endswith(".yml") and not file.endswith(".yaml"):
                log.fail("Unrecognized workflow file format.")

            with open(file, "r") as f:
                _wf_data = yaml.safe_load(f)

                if not _wf_data:
                    log.fail(f"File {file} is empty")
        else:
            _wf_data = dict(wf_data)

        # disable logging in order to silence warnings about "error() to fail()" change
        logging.disable(logging.CRITICAL)

        v = YMLValidator(source_data=_wf_data,
                         schema_data=WorkflowParser._wf_schema)

        try:
            v.validate()
        except SchemaError as e:
            # reenable logging
            logging.disable(logging.NOTSET)
            log.fail(f"{e.msg}")

        logging.disable(logging.NOTSET)

        WorkflowParser.__add_missing_ids(_wf_data)
        WorkflowParser.__propagate_options_to_steps(_wf_data)
        WorkflowParser.__apply_substitutions(_wf_data,
                                             substitutions=substitutions,
                                             allow_loose=allow_loose)
        WorkflowParser.__skip_steps(_wf_data, skipped_steps)
        WorkflowParser.__filter_step(_wf_data, step)

        # create and frozen a box
        wf_box = Box(_wf_data,
                     frozen_box=(immutable is True),
                     default_box=True)

        log.debug(f"Parsed workflow:\n{wf_box}")

        return wf_box
コード例 #40
0
    def test_core_files(self):
        # These tests should pass with no exception raised
        pass_tests = [
            # All tests for keyword assert
            "test_assert.yaml",
            # All tests for keyword default
            "test_default.yaml",
            # All tests for keyword desc
            "test_desc.yaml",
            # All tests for keyword enum
            "test_enum.yaml",
            # All tests for keyword example
            "test_example.yaml",
            # All tests for keyword extensions
            "test_extensions.yaml",
            # All tests for keyword func
            "test_func.yaml",
            # All tests for keyword ident
            "test_ident.yaml",
            # All tests for keyword include
            "test_include.yaml",
            # All tests for keyword length
            "test_length.yaml",
            # All tests for keyword mapping
            "test_mapping.yaml",
            # All tests for keyword matching
            "test_matching.yaml",
            # All tests for keyword name
            "test_name.yaml",
            # All tests for keyword pattern
            "test_pattern.yaml",
            # All tests for keyword range
            "test_range.yaml",
            # All tests for keyword required
            "test_required.yaml",
            # All tests for keyword schema
            "test_schema.yaml",
            # All tests for keyword sequence
            "test_sequence.yaml",
            # All tests for keyword unique
            "test_unique.yaml",
            # All tests for keyword version
            "test_version.yaml",

            # All test cases for Multiple sequence checks
            "test_sequence_multi.yaml",
            # All test cases for merging
            "test_merge.yaml",
            # All test cases for yaml anchors
            "test_anchor.yaml",

            # All tests for TYPE: any
            "test_type_any.yaml",
            # All tests for TYPE: bool
            "test_type_bool.yaml",
            # All tests for TYPE: date
            "test_type_date.yaml",
            # All tests for TYPE: enum
            "test_type_enum.yaml",
            # All tests for TYPE: float
            "test_type_float.yaml",
            # All tests for TYPE: int
            "test_type_int.yaml",
            # All tests for TYPE: map
            "test_type_map.yaml",
            # All tests for TYPE: none
            "test_type_none.yaml",
            # All tests for TYPE: number
            "test_type_number.yaml",
            # All tests for TYPE: scalar
            "test_type_scalar.yaml",
            # All tests for TYPE: seq
            "test_type_seq.yaml",
            # All tests for TYPE: str
            "test_type_str.yaml",
            # All tests for TYPE: symbol
            "test_type_symbol.yaml",
            # All tests for TYPE: text
            "test_type_text.yaml",
            # All tests for TYPE: timestamp
            "test_type_timestamp.yaml",
        ]

        _fail_tests = [
            # All tests for keyword assert
            ("test_assert.yaml", SchemaError),
            # All tests for keyword default
            ("test_default.yaml", SchemaError),
            # All tests for keyword desc
            ("test_desc.yaml", SchemaError),
            # All tests for keyword enum
            ("test_enum.yaml", SchemaError),
            # All tests for keyword example
            ("test_example.yaml", SchemaError),
            # All tests for keyword extensions
            ("test_extensions.yaml", SchemaError),
            # All tests for keyword func
            ("test_func.yaml", SchemaError),
            # All tests for keyword ident
            ("test_ident.yaml", SchemaError),
            # All tests for keyword include
            ("test_include.yaml", SchemaError),
            # All tests for keyword length
            ("test_length.yaml", SchemaError),
            # All tests for keyword mapping
            ("test_mapping.yaml", SchemaError),
            # All tests for keyword matching
            ("test_matching.yaml", SchemaError),
            # All tests for keyword name
            ("test_name.yaml", SchemaError),
            # All tests for keyword pattern
            ("test_pattern.yaml", SchemaError),
            # All tests for keyword range
            ("test_range.yaml", SchemaError),
            # All tests for keyword required
            ("test_required.yaml", SchemaError),
            # All tests for keyword schema
            ("test_schema.yaml", SchemaError),
            # All tests for keyword sequence
            ("test_sequence.yaml", SchemaError),
            # All tests for keyword unique
            ("test_unique.yaml", SchemaError),
            # All tests for keyword version
            ("test_version.yaml", SchemaError),

            # All test cases for Multiple sequence checks
            ("test_sequence_multi.yaml", SchemaError),
            # All test cases for merging
            ("test_merge.yaml", SchemaError),
            # All test cases for yaml anchors
            ("test_anchor.yaml", SchemaError),

            # All tests for TYPE: any
            ("test_type_any.yaml", SchemaError),
            # All tests for TYPE: bool
            ("test_type_bool.yaml", SchemaError),
            # All tests for TYPE: date
            ("test_type_date.yaml", SchemaError),
            # All tests for TYPE: float
            ("test_type_float.yaml", SchemaError),
            # All tests for TYPE: int
            ("test_type_int.yaml", SchemaError),
            # All tests for TYPE: map
            ("test_type_map.yaml", SchemaError),
            # All tests for TYPE: none
            ("test_type_none.yaml", SchemaError),
            # All tests for TYPE: number
            ("test_type_number.yaml", SchemaError),
            # All tests for TYPE: scalar
            ("test_type_scalar.yaml", SchemaError),
            # All tests for TYPE: seq
            ("test_type_seq.yaml", SchemaError),
            # All tests for TYPE: str
            ("test_type_str.yaml", SchemaError),
            # All tests for TYPE: symbol
            ("test_type_symbol.yaml", SchemaError),
            # All tests for TYPE: text
            ("test_type_text.yaml", SchemaError),
            # All tests for TYPE: timestamp
            ("test_type_timestamp.yaml", SchemaError),
        ]

        # Add override magic to make it easier to test a specific file
        if "S" in os.environ:
            pass_tests = [os.environ["S"]]
            _fail_tests = []
        elif "F" in os.environ:
            pass_tests = []
            _fail_tests = [(os.environ["F"], SchemaError)]

        for passing_test_file in pass_tests:
            f = self.f(os.path.join("success", passing_test_file))
            with open(f, "r") as stream:
                yaml_data = yaml.load_all(stream)

                for document_index, document in enumerate(yaml_data):
                    data = document["data"]
                    schema = document["schema"]

                    try:
                        print("Running test files: {0}".format(f))
                        c = Core(source_data=data,
                                 schema_data=schema,
                                 strict_rule_validation=True,
                                 allow_assertions=True)
                        c.validate()
                        compare(c.validation_errors, [],
                                prefix="No validation errors should exist...")
                    except Exception as e:
                        print("ERROR RUNNING FILES: {0} : {1}:{2}".format(
                            f, document_index, document.get('name',
                                                            'UNKNOWN')))
                        raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(
                c.root_rule.schema_str,
                schema,
                prefix=
                "Parsed rules is not correct, something have changed... files : {0} : {1}"
                .format(f, document_index))

        for failing_test, exception_type in _fail_tests:
            f = self.f(os.path.join("fail", failing_test))
            with open(f, "r") as stream:
                yaml_data = yaml.load_all(stream)

                for document_index, document in enumerate(yaml_data):
                    data = document["data"]
                    schema = document["schema"]
                    errors = document.get("errors", [])

                    try:
                        print("Running test files: {0}".format(f))
                        c = Core(source_data=data,
                                 schema_data=schema,
                                 strict_rule_validation=True,
                                 allow_assertions=True)
                        c.validate()
                    except exception_type as e:
                        pass
                    else:
                        print("ERROR RUNNING FILES: {0} : {1}:{2}".format(
                            f, document_index, document.get('name',
                                                            'UNKNOWN')))
                        raise AssertionError(
                            "Exception {0} not raised as expected... FILES: {1} : {2} : {3}:{4}"
                            .format(exception_type, exception_type,
                                    failing_test, document_index,
                                    document.get('name', 'UNKNOWN')))

                    compare(
                        sorted(c.validation_errors),
                        sorted(errors),
                        prefix=
                        "Wrong validation errors when parsing files : {0} : {1} : {2}"
                        .format(f, document_index,
                                document.get('name', 'UNKNOWN')))
コード例 #41
0
    def test_multi_file_support(self):
        """
        This should test that multiple files is supported correctly
        """
        pass_tests = [
            # Test that include directive can be used at top level of the schema
            ([
                self.f("partial_schemas", "1s-schema.yaml"),
                self.f("partial_schemas", "1s-partials.yaml"),
            ], self.f("partial_schemas", "1s-data.yaml"), {
                'sequence': [{
                    'include': 'fooone'
                }],
                'type': 'seq',
            }),
            # # This test that include directive works inside sequence
            # ([self.f("33a.yaml"), self.f("33b.yaml")], self.f("33c.yaml"), {'sequence': [{'include': 'fooone'}], 'type': 'seq'}),
            # This test recursive schemas
            ([
                self.f("partial_schemas", "2s-schema.yaml"),
                self.f("partial_schemas", "2s-partials.yaml"),
            ], self.f("partial_schemas", "2s-data.yaml"), {
                'sequence': [{
                    'include': 'fooone'
                }],
                'type': 'seq',
            })
        ]

        failing_tests = [
            # Test include inside partial schema
            ([
                self.f("partial_schemas", "1f-schema.yaml"),
                self.f("partial_schemas", "1f-partials.yaml")
            ], self.f("partial_schemas", "1f-data.yaml"), SchemaError, [
                "Cannot find partial schema with name 'fooonez'. Existing partial schemas: 'fooone, foothree, footwo'. Path: '/0'"
            ]),
            ([self.f('partial_schemas', '2f-schema.yaml')],
             self.f('partial_schemas', '2f-data.yaml'), SchemaError,
             ["Value 'True' is not of type 'str'. Path: '/0'"]),
            ([self.f('partial_schemas', '3f-schema.yaml')],
             self.f('partial_schemas', '3f-data.yaml'), SchemaError,
             ["Value 'True' is not of type 'str'. Path: ''"]),
            ([self.f('partial_schemas', '4f-schema.yaml')],
             self.f('partial_schemas', '4f-data.yaml'), SchemaError,
             ["Value 'True' is not of type 'str'. Path: '/0/foo/0/bar'"]),
            ([self.f('partial_schemas', '5f-schema.yaml')],
             self.f('partial_schemas', '5f-data.yaml'), SchemaError,
             ["Value 'True' is not of type 'str'. Path: '/0/0/0/0'"]),
            ([self.f('partial_schemas', '6f-schema.yaml')],
             self.f('partial_schemas', '6f-data.yaml'), SchemaError,
             ["Value 'True' is not of type 'str'. Path: '/foo/bar/qwe/ewq'"])
        ]

        for passing_test in pass_tests:
            try:
                c = Core(source_file=passing_test[1],
                         schema_files=passing_test[0])
                c.validate()
                compare(c.validation_errors, [],
                        prefix="No validation errors should exist...")
            except Exception as e:
                print("ERROR RUNNING FILE: {0} : {1}".format(
                    passing_test[0], passing_test[1]))
                raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(
                c.root_rule.schema_str,
                passing_test[2],
                prefix="Parsed rules is not correct, something have changed..."
            )

        for failing_test in failing_tests:
            with pytest.raises(failing_test[2],
                               msg="Test files: {0} : {1}".format(
                                   ", ".join(failing_test[0]),
                                   failing_test[1])):
                c = Core(schema_files=failing_test[0],
                         source_file=failing_test[1])
                c.validate()

            if not c.validation_errors:
                raise AssertionError("No validation_errors was raised...")

            compare(
                sorted(c.validation_errors),
                sorted(failing_test[3]),
                prefix="Wrong validation errors when parsing files : {0} : {1}"
                .format(
                    failing_test[0],
                    failing_test[1],
                ),
            )
コード例 #42
0
ファイル: validation.py プロジェクト: alexlana/rasa-1
def validate_yaml_schema(yaml_file_content: Text, schema_path: Text) -> None:
    """
    Validate yaml content.

    Args:
        yaml_file_content: the content of the yaml file to be validated
        schema_path: the schema of the yaml file
    """
    from pykwalify.core import Core
    from pykwalify.errors import SchemaError
    from ruamel.yaml import YAMLError
    import pkg_resources
    import logging

    log = logging.getLogger("pykwalify")
    log.setLevel(logging.CRITICAL)

    try:
        # we need "rt" since
        # it will add meta information to the parsed output. this meta information
        # will include e.g. at which line an object was parsed. this is very
        # helpful when we validate files later on and want to point the user to the
        # right line
        source_data = rasa.shared.utils.io.read_yaml(
            yaml_file_content, reader_type=["safe", "rt"]
        )
    except YAMLError:
        raise YamlValidationException(
            "The provided yaml file is invalid. You can use "
            "http://www.yamllint.com/ to validate the yaml syntax "
            "of your file."
        )
    except DuplicateKeyError as e:
        raise YamlValidationException(
            "The provided yaml file contains a duplicated key: '{}'. You can use "
            "http://www.yamllint.com/ to validate the yaml syntax "
            "of your file.".format(str(e))
        )

    schema_file = pkg_resources.resource_filename(PACKAGE_NAME, schema_path)
    schema_utils_file = pkg_resources.resource_filename(
        PACKAGE_NAME, RESPONSES_SCHEMA_FILE
    )
    schema_extensions = pkg_resources.resource_filename(
        PACKAGE_NAME, SCHEMA_EXTENSIONS_FILE
    )

    c = Core(
        source_data=source_data,
        schema_files=[schema_file, schema_utils_file],
        extensions=[schema_extensions],
    )

    try:
        c.validate(raise_exception=True)
    except SchemaError:
        raise YamlValidationException(
            "Please make sure the file is correct and all "
            "mandatory parameters are specified. Here are the errors "
            "found during validation",
            c.errors,
            content=source_data,
        )
コード例 #43
0
def validate_metadata_contents(metadata, filepath, cache):
    # Initialize output
    is_metadata_error = False
    metadata_error_output = []

    core = Core(source_file=filepath, schema_files=["schema.yml"])
    core.validate(raise_exception=False, silent=True)

    if len(core.validation_errors) > 0:
        metadata_error_output.extend(
            ['METADATA_ERROR: %s' % err for err in core.validation_errors])
        is_metadata_error = True

    folder_name = filepath.split('/')[-2]
    if folder_name != metadata['model_abbr']:
        metadata_error_output.append(
            f"METADATA_ERROR: Model abreviation in metadata inconsistent with folder name for model_abbr={metadata['model_abbr']} as specified in metadata. NOTE: Folder name is: {folder_name}"
        )
        is_metadata_error = True
    metadata['team_abbr'] = metadata['model_abbr'].split('-')[0]
    # Check if every team has only one `team_model_designation` as `primary`

    # Check if forecast_startdate is date
    if 'forecast_startdate' in metadata.keys():
        forecast_startdate = str(metadata['forecast_startdate'])
        try:
            dateutil.parser.parse(forecast_startdate)
            is_date = True
        except ValueError:
            is_date = False
        if not is_date:
            is_metadata_error = True
            metadata_error_output += [
                "METADATA ERROR: %s forecast_startdate %s must be a date and should be in YYYY-MM-DD format"
                % (filepath, forecast_startdate)
            ]

    # Check if this_model_is_an_ensemble and this_model_is_unconditional are boolean
    boolean_fields = [
        'this_model_is_an_ensemble', 'this_model_is_unconditional',
        'include_in_ensemble_and_visualization'
    ]
    possible_booleans = ['true', 'false']
    for field in boolean_fields:
        if field in metadata.keys():
            if metadata[field] not in possible_booleans:
                is_metadata_error = True
                metadata_error_output += [
                    "METADATA ERROR: %s '%s' field must be lowercase boolean (true, false) not '%s'"
                    % (filepath, field, metadata[field])
                ]

    # Validate team URLS
    regex = re.compile(
        r'^(?:http|ftp)s?://'  # http:// or https://
        r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
        r'localhost|'  # localhost...
        r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ...or ip
        r'(?::\d+)?'  # optional port
        r'(?:/?|[/?]\S+)$',
        re.IGNORECASE)

    # if 'team_url' in metadata.keys():
    #     if re.match(regex, str(metadata['team_url'])) is None:
    #         is_metadata_error = True
    #         metadata_error_output += [
    #             "METADATA ERROR: %s 'team_url' field must be a full URL (https://www.example.com) '%s'" %
    #             (filepath, metadata[field])]

    # Validate licenses
    license_df = pd.read_csv('./code/validation/accepted-licenses.csv')
    accepted_licenses = list(license_df['license'])
    if 'license' in metadata.keys():
        if metadata['license'] not in accepted_licenses:
            is_metadata_error = True
            metadata_error_output += [
                "METADATA ERROR: %s 'license' field must be in `./code/validations/accepted-licenses.csv` 'license' column '%s'"
                % (filepath, metadata['license'])
            ]
    return is_metadata_error, metadata_error_output
コード例 #44
0
def parse_data(site):
    base = op.dirname(__file__)

    originals = []
    for fn in os.listdir(op.join(base, 'originals')):
        if fn.endswith('.yaml'):
            originals.extend(yaml.load(open(op.join(base, 'originals', fn))))

    def sort_key(game):
        name = game_name(game)
        # Always sort SCUMM first
        if name == 'SCUMM':
            return '0'
        if name.startswith('The '):
            return name[4:]
        return name

    originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
    print(str(len(originals)) + ' games in total')

    try:
        core = Core(source_data=originals,
                    schema_files=['schema/originals.yaml'])
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(originals, core.errors)
        else:
            raise error

    clones = []
    for fn in sorted(os.listdir(op.join(base, 'games'))):
        if fn.endswith('.yaml'):
            clones.extend(yaml.load(open(op.join(base, 'games', fn))))
    print(str(len(clones)) + ' clones in total')

    try:
        core = Core(source_data=clones, schema_files=['schema/games.yaml'])
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(clones, core.errors)
        else:
            raise error

    errors = []
    originals_map = {}

    for item in originals:
        name = game_name(item)

        if name in originals_map:
            errors.append({
                "name": name,
                "error": "Duplicate original game '%s'" % name
            })

        originals_map[name] = item

    if len(errors) > 0:
        show_errors(errors)

    for clone in clones:
        if 'originals' not in clone:
            show_errors([{
                "name":
                clone["name"],
                "error":
                "Unable to find 'remakes' or 'clones' in game"
            }])

        for original in clone['originals']:
            if original not in originals_map:
                errors.append({
                    "name":
                    clone["name"],
                    "error":
                    "Original game '%s' not found" % original
                })

    if len(errors) > 0:
        show_errors(errors)

    for item in originals:
        parse_global_tags(site, item.get('meta', {}), 'genre')
        # Recombine originals and clones
        combined = copy.deepcopy(item)
        name = game_name(combined)

        combined['games'] = [
            clone for clone in clones if name in clone['originals']
        ]
        parse_items(site, combined, 'games')
コード例 #45
0
ファイル: testcore.py プロジェクト: deltaforge/pykwalify
    def testCore(self):
        # These tests should pass with no exception raised
        pass_tests = [
            # Test sequence with only string values
            ("1a.yaml", "1b.yaml", {
                'sequence': [{
                    'type': 'str'
                }],
                'type': 'seq'
            }),
            # Test sequence where the only valid items is integers
            ("3a.yaml", "3b.yaml", {
                'sequence': [{
                    'type': 'int'
                }],
                'type': 'seq'
            }),
            # Test sequence with only booleans
            ("4a.yaml", "4b.yaml", {
                'sequence': [{
                    'type': 'bool'
                }],
                'type': 'seq'
            }),
            # Test mapping with different types of data and some extra conditions
            ("8a.yaml", "8b.yaml", {
                'mapping': {
                    'age': {
                        'type': 'int'
                    },
                    'birth': {
                        'type': 'str'
                    },
                    'email': {
                        'pattern': '.+@.+',
                        'type': 'str'
                    },
                    'name': {
                        'required': True,
                        'type': 'str'
                    }
                },
                'type': 'map'
            }),
            # Test sequence with mapping with valid mapping
            ("10a.yaml", "10b.yaml", {
                'sequence': [{
                    'mapping': {
                        'email': {
                            'type': 'str'
                        },
                        'name': {
                            'required': True,
                            'type': 'str'
                        }
                    },
                    'type': 'map'
                }],
                'type':
                'seq'
            }),
            # Test mapping with sequence with mapping and valid data
            ("12a.yaml", "12b.yaml", {
                'mapping': {
                    'company': {
                        'required': True,
                        'type': 'str'
                    },
                    'email': {
                        'type': 'str'
                    },
                    'employees': {
                        'sequence': [{
                            'mapping': {
                                'code': {
                                    'required': True,
                                    'type': 'int'
                                },
                                'email': {
                                    'type': 'str'
                                },
                                'name': {
                                    'required': True,
                                    'type': 'str'
                                }
                            },
                            'type': 'map'
                        }],
                        'type':
                        'seq'
                    }
                },
                'type': 'map'
            }),
            # Test most of the implemented functions
            ("14a.yaml", "14b.yaml", {
                'sequence': [{
                    'mapping': {
                        'age': {
                            'range': {
                                'max': 30,
                                'min': 18
                            },
                            'type': 'int'
                        },
                        'birth': {
                            'type': 'str'
                        },
                        'blood': {
                            'enum': ['A', 'B', 'O', 'AB'],
                            'type': 'str'
                        },
                        'deleted': {
                            'type': 'bool'
                        },
                        'email': {
                            'pattern': '.+@.+',
                            'required': True,
                            'type': 'str'
                        },
                        'memo': {
                            'type': 'any'
                        },
                        'name': {
                            'required': True,
                            'type': 'str'
                        },
                        'password': {
                            'length': {
                                'max': 16,
                                'min': 8
                            },
                            'type': 'str'
                        }
                    },
                    'type': 'map'
                }],
                'type':
                'seq'
            }),
            # This will test the unique constraint
            ("16a.yaml", "16b.yaml", {
                'sequence': [{
                    'mapping': {
                        'email': {
                            'type': 'str'
                        },
                        'groups': {
                            'sequence': [{
                                'type': 'str',
                                'unique': True
                            }],
                            'type': 'seq'
                        },
                        'name': {
                            'required': True,
                            'type': 'str',
                            'unique': True
                        }
                    },
                    'required': True,
                    'type': 'map'
                }],
                'type':
                'seq'
            }),
            #
            ("18a.yaml", "18b.yaml", {
                'mapping': {
                    'datasources': {
                        'allowempty': True,
                        'type': 'map'
                    }
                },
                'type': 'map'
            }),
            #
            ("19a.yaml", "19b.yaml", {
                'mapping': {
                    'datasources': {
                        'allowempty': True,
                        'mapping': {
                            'test1': {
                                'type': 'str'
                            }
                        },
                        'type': 'map'
                    }
                },
                'type': 'map'
            }),
            #
            ("20a.yaml", "20b.yaml", {
                'type': 'float'
            }),
            # This tests number validation rule
            ("21a.yaml", "21b.yaml", {
                'sequence': [{
                    'type': 'number'
                }],
                'type': 'seq'
            }),
            # This test the text validation rule
            ("23a.yaml", "23b.yaml", {
                'sequence': [{
                    'type': 'text'
                }],
                'type': 'seq'
            }),
            # This test the text validation rule
            ("24a.yaml", "25b.yaml", {
                'sequence': [{
                    'type': 'any'
                }],
                'type': 'seq'
            }),
            #
            ("26a.yaml", "26b.yaml", {
                'type': 'any'
            }),
            #
            ("28a.yaml", "28b.yaml", {
                'allowempty': True,
                'mapping': {
                    'name': {
                        'type': 'str'
                    }
                },
                'pattern': '^[a-z0-9]+$',
                'type': 'map'
            }),
            #
            ("29a.yaml", "29b.yaml", {
                'sequence': [{
                    'mapping': {
                        'bits': {
                            'type': 'str'
                        },
                        'name': {
                            'type': 'str'
                        }
                    },
                    'pattern': '.+',
                    'type': 'map'
                }],
                'type':
                'seq'
            }),
            #
            ("30a.yaml", "30b.yaml", {
                'sequence': [{
                    'mapping': {
                        'foobar': {
                            'mapping': {
                                'opa': {
                                    'type': 'bool'
                                }
                            },
                            'type': 'map'
                        },
                        'media': {
                            'type': 'int'
                        },
                        'regex;[mi.+]': {
                            'sequence': [{
                                'type': 'str'
                            }],
                            'type': 'seq'
                        },
                        'regex;[mo.+]': {
                            'sequence': [{
                                'type': 'bool'
                            }],
                            'type': 'seq'
                        }
                    },
                    'matching-rule': 'any',
                    'type': 'map'
                }],
                'type':
                'seq'
            }),
            # This test that a regex that will compile
            ("31a.yaml", "31b.yaml", {
                'mapping': {
                    'regex;mi.+': {
                        'sequence': [{
                            'type': 'str'
                        }],
                        'type': 'seq'
                    }
                },
                'matching-rule': 'any',
                'type': 'map'
            }),
        ]

        # These tests are designed to fail with some exception raised
        fail_tests = [
            # Test sequence with defined string content type but data only has integers
            ("2a.yaml", "2b.yaml", SchemaError, [
                "Value: 1 is not of type 'str' : /0",
                "Value: 2 is not of type 'str' : /1",
                "Value: 3 is not of type 'str' : /2"
            ]),
            # Test sequence with defined string content type but data only has booleans
            ("5a.yaml", "5b.yaml", SchemaError, [
                "Value: True is not of type 'str' : /0",
                "Value: False is not of type 'str' : /1"
            ]),
            # Test sequence with defined booleans but with one integer
            ("6a.yaml", "6b.yaml", SchemaError,
             ["Value: 1 is not of type 'bool' : /2"]),
            # Test sequence with strings and and lenght on each string
            ("7a.yaml", "7b.yaml", SchemaError,
             ['length.toolong : 5 < 6 : /2']),
            # Test mapping that do not work
            ("9a.yaml", "8b.yaml", SchemaError, [
                "Value: twnty is not of type 'int' : /age",
                'pattern.unmatch : .+@.+ --> foo(at)mail.com : /email'
            ]),
            # Test sequence with mapping with missing required key
            ("11a.yaml", "10b.yaml", SchemaError, [
                'required.nokey : name : /1', 'key.undefined : naem : /1',
                'key.undefined : mail : /2'
            ]),
            # Test mapping with sequence with mapping and invalid data
            ("13a.yaml", "12b.yaml", SchemaError, [
                "Value: A101 is not of type 'int' : /employees/0/code",
                'key.undefined : mail : /employees/1'
            ]),
            # TODO: write
            ("15a.yaml", "14b.yaml", SchemaError, [
                "Value: twenty is not of type 'int' : /0/age",
                'length.tooshort : 8 > 6 : /0/password',
                'pattern.unmatch : .+@.+ --> foo(at)mail.com : /0/email',
                'enum.notexists : a : /0/blood', 'required.nokey : name : /1',
                'key.undefined : given-name : /1',
                'key.undefined : family-name : /1',
                'range.toosmall : 18 > 15 : /1/age'
            ]),
            # TODO: The reverse unique do not currently work proper # This will test the unique constraint but should fail
            ("17a.yaml", "16b.yaml", SchemaError,
             ['value.notunique :: value: foo : /0/groups/3 : /0/groups/0']),
            # This tests number validation rule with wrong data
            ("22a.yaml", "22b.yaml", SchemaError,
             ["Value: abc is not of type 'number' : /2"]),
            # This test the text validation rule with wrong data
            ("24a.yaml", "24b.yaml", SchemaError,
             ["Value: True is not of type 'text' : /3"]),
            # This tests pattern matching on keys in a map
            ("27a.yaml", "27b.yaml", SchemaError,
             ['pattern.unmatch : ^[a-z]+$ --> na1me : ']),
        ]

        for passing_test in pass_tests:
            c = Core(source_file=self.f(passing_test[0]),
                     schema_file=self.f(passing_test[1]))
            c.validate()
            compare(c.validation_errors, [],
                    prefix="No validation errors should exist...")

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(
                c.root_rule._schema_str,
                passing_test[2],
                prefix="Parsed rules is not correct, something have changed..."
            )

        for failing_test in fail_tests:
            with self.assertRaises(failing_test[2],
                                   msg="Test file: {} : {}".format(
                                       failing_test[0], failing_test[1])):
                c = Core(source_file=self.f(failing_test[0]),
                         schema_file=self.f(failing_test[1]))
                c.validate()

            compare(
                sorted(c.validation_errors),
                sorted(failing_test[3]),
                prefix="Wrong validation errors when parsing files : {} : {}".
                format(failing_test[0], failing_test[1]))
from pykwalify.core import Core
import yaml

inventory = yaml.load(open("manoci.yaml"), Loader=yaml.FullLoader)
#
print(inventory)
for i in inventory['localhost_fake'].keys():
    print(i)

inven_validation = Core(source_file="manoci.yaml",
                        schema_files=["schema2.yaml"])
inven_validation.validate(raise_exception=True)
コード例 #47
0
def validate_metadata_contents(metadata, filepath, cache):
    # Initialize output
    is_metadata_error = False
    metadata_error_output = []

    core = Core(source_file=filepath, schema_files=[SCHEMA_FILE])
    core.validate(raise_exception=False, silent=True)

    if len(core.validation_errors) > 0:
        metadata_error_output.extend(
            ['METADATA_ERROR: %s' % err for err in core.validation_errors])
        is_metadata_error = True

    path = os.path.normpath(filepath)
    folder_name = path.split(os.sep)[-2]

    # This is a critical error and hence do not run further checks.
    if 'model_abbr' not in metadata:
        metadata_error_output.extend([
            'METADATA_ERROR: model_abbr key not present in the metadata file'
        ])
        is_metadata_error = True
        return is_metadata_error, metadata_error_output

    if folder_name != metadata['model_abbr']:
        metadata_error_output.append(
            f"METADATA_ERROR: Model abreviation in metadata inconsistent with folder name for model_abbr={metadata['model_abbr']} as specified in metadata. NOTE: Folder name is: {folder_name}"
        )
        is_metadata_error = True
    metadata['team_abbr'] = metadata['model_abbr'].split('-')[0]
    # Check if every team has only one `team_model_designation` as `primary`
    if 'team_abbr' in metadata.keys():
        # add designated primary model acche entry to the cache if not present
        if DESIGNATED_MODEL_CACHE_KEY not in cache:
            cache[DESIGNATED_MODEL_CACHE_KEY] = []

        # if the current models designation is primary AND the team_name is already present in the cache, then report error
        if metadata['team_abbr'] in cache[
                DESIGNATED_MODEL_CACHE_KEY] and metadata[
                    'team_model_designation'] == 'primary':
            is_metadata_error = True
            metadata_error_output.append(
                'METADATA ERROR: %s has more than 1 model designated as \"primary\"'
                % (metadata['team_abbr']))
        # else if the current model designation is "primary", then add it to the cache
        elif metadata['team_model_designation'] == 'primary':
            cache[DESIGNATED_MODEL_CACHE_KEY].append(metadata['team_abbr'])

    # Check if forecast_startdate is date
    if 'forecast_startdate' in metadata.keys():
        forecast_startdate = str(metadata['forecast_startdate'])
        try:
            dateutil.parser.parse(forecast_startdate)
            is_date = True
        except ValueError:
            is_date = False
        if not is_date:
            is_metadata_error = True
            metadata_error_output += [
                "METADATA ERROR: %s forecast_startdate %s must be a date and should be in YYYY-MM-DD format"
                % (filepath, forecast_startdate)
            ]

    # Check if this_model_is_an_ensemble and this_model_is_unconditional are boolean
    boolean_fields = [
        'this_model_is_an_ensemble', 'this_model_is_unconditional',
        'include_in_ensemble_and_visualization'
    ]
    possible_booleans = ['true', 'false']
    for field in boolean_fields:
        if field in metadata.keys():
            if metadata[field] not in possible_booleans:
                is_metadata_error = True
                metadata_error_output += [
                    "METADATA ERROR: %s '%s' field must be lowercase boolean (true, false) not '%s'"
                    % (filepath, field, metadata[field])
                ]

    # Validate team URLS
    regex = re.compile(
        r'^(?:http|ftp)s?://'  # http:// or https://
        r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
        r'localhost|'  # localhost...
        r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ...or ip
        r'(?::\d+)?'  # optional port
        r'(?:/?|[/?]\S+)$',
        re.IGNORECASE)

    # Validate licenses
    license_df = pd.read_csv('./code/validation/accepted-licenses.csv')
    accepted_licenses = list(license_df['license'])
    if 'license' in metadata.keys():
        if metadata['license'] not in accepted_licenses:
            is_metadata_error = True
            metadata_error_output += [
                "\nMETADATA ERROR: %s 'license' field must be in `./code/validations/accepted-licenses.csv` 'license' column '%s'"
                % (filepath, metadata['license'])
            ]
    return is_metadata_error, metadata_error_output
コード例 #48
0
def validate_yaml_schema(
    yaml_file_content: Text, schema_path: Text, show_validation_errors: bool = True
) -> None:
    """
    Validate yaml content.

    Args:
        yaml_file_content: the content of the yaml file to be validated
        schema_path: the schema of the yaml file
        show_validation_errors: if true, validation errors are shown
    """
    from pykwalify.core import Core
    from pykwalify.errors import SchemaError
    from ruamel.yaml import YAMLError
    import pkg_resources
    import rasa.utils.io
    import logging

    log = logging.getLogger("pykwalify")
    if show_validation_errors:
        log.setLevel(logging.WARN)
    else:
        log.setLevel(logging.CRITICAL)

    try:
        source_data = rasa.utils.io.read_yaml(yaml_file_content)
    except YAMLError:
        raise InvalidYamlFileError(
            "The provided yaml file is invalid. You can use "
            "http://www.yamllint.com/ to validate the yaml syntax "
            "of your file."
        )
    except DuplicateKeyError as e:
        raise InvalidYamlFileError(
            "The provided yaml file contains a duplicated key: '{}'. You can use "
            "http://www.yamllint.com/ to validate the yaml syntax "
            "of your file.".format(str(e))
        )

    try:
        schema_file = pkg_resources.resource_filename(PACKAGE_NAME, schema_path)
        schema_utils_file = pkg_resources.resource_filename(
            PACKAGE_NAME, SCHEMA_UTILS_FILE
        )
        schema_extensions = pkg_resources.resource_filename(
            PACKAGE_NAME, SCHEMA_EXTENSIONS_FILE
        )

        c = Core(
            source_data=source_data,
            schema_files=[schema_file, schema_utils_file],
            extensions=[schema_extensions],
        )
        c.validate(raise_exception=True)
    except SchemaError:
        raise InvalidYamlFileError(
            "Failed to validate yaml file. "
            "Please make sure the file is correct and all "
            "mandatory parameters are specified; to do so, "
            "take a look at the errors logged during "
            "validation previous to this exception."
        )
コード例 #49
0
def parse_data(site):
    base = op.join(op.dirname(__file__), 'games')

    originals = []
    for fn in os.listdir(op.join(base, 'originals')):
        if fn.endswith('.yaml'):
            originals.extend(yaml.load(open(op.join(base, 'originals', fn))))

    def sort_key(game):
        name = game_name(game)
        # Always sort SCUMM first
        if name == 'SCUMM':
            return '0'
        if name.startswith('The '):
            return name[4:]
        return name

    originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
    print(str(len(originals)) + ' games in total')

    try:
        core = Core(source_data=originals,
                    schema_files=['schema_originals.yaml'])
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(originals, core.errors)
        else:
            raise error

    clones = []
    for fn in sorted(os.listdir(op.join(base, 'clones'))):
        if fn.endswith('.yaml'):
            clones.extend(yaml.load(open(op.join(base, 'clones', fn))))
    print(str(len(clones)) + ' clones in total')

    try:
        core = Core(source_data=clones, schema_files=['schema_clones.yaml'])
        core.validate(raise_exception=True)
    except Exception as error:
        if len(core.errors) > 0:
            show_validation_errors(clones, core.errors)
        else:
            raise error

    for item in originals:
        parse_global_tags(site, item.get('meta', {}), 'genre')
        # Recombine originals and clones
        combined = copy.deepcopy(item)
        name = game_name(combined)
        combined_remakes = [
            clone for clone in clones
            if 'remakes' in clone and name in clone['remakes']
        ]
        if len(combined_remakes) > 0:
            combined['remakes'] = combined_remakes
        combined_clones = [
            clone for clone in clones
            if 'clones' in clone and name in clone['clones']
        ]
        if len(combined_clones) > 0:
            combined['clones'] = combined_clones
        parse_items(site, combined, 'remakes')
        parse_items(site, combined, 'clones')
コード例 #50
0
 def test_suite(self):
     c = Core(source_file="wetest/tests/suite_example01.yaml",
             schema_files=["wetest/resources/suite_schema.yaml"])
     c.validate()
コード例 #51
0
 def validate_topology(scheme_file, data_file):
     c = Core(source_file=data_file, schema_files=[scheme_file])
     c.validate(raise_exception=True)
     print 'Topology validated'
コード例 #52
0
    def validate_config(self, config_file):
        """Validates configuration file.
        Returns tuple of content, version, where content is validated config dict.
        Else raises ConfigErrors.
        """
        with open(config_file, 'r') as file:
            try:
                config_content = ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader, version=(1, 1))
            except BaseException as exc:
                raise ConfigErrors(config_file, {'at top level': [str(exc)]})


        version = None
        # Validate each worker section against the schema and
        # parse schema to extract types and set up cmd argument parser

        #self._parser = parser = cls.__primary_parser(add_help=True)
        validated_content = OrderedDict()

        errors = OrderedDict()

        for worker, variables in config_content.items():
            # schema_version specifies config version
            if worker == "schema_version":
                version = variables
                continue
            _worker = worker.split("__")[0]

            if worker in self._schemas:
                schema_fn, _ = self._schemas[worker]
            elif _worker in self._schemas:
                schema_fn, _ = self._schemas[worker] = self._schemas[_worker]
            else:
                schema_fn = os.path.join(caracal.pckgdir,"schema", "{0:s}_schema.yml".format(_worker))

                if _worker == "worker" or not os.path.exists(schema_fn):
                    errors[worker] = ["this is not a recognized worker name, or its schema file is missing"]
                    continue

                with open(schema_fn, 'r') as file:
                    full_schema = ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader, version=(1, 1))

                schema = full_schema["mapping"][_worker]
                self._schemas[worker] = self._schemas[_worker] = schema_fn, schema

            # validate worker config
            core = Core(source_data={_worker: variables}, schema_files=[schema_fn])

            validated_content[worker] = core.validate(raise_exception=False)[_worker]

            # check for errors
            if core.validation_errors:
                errs = errors[worker] = []
                for message in core.validation_errors:
                    # crude hack: we're already fooling the schema by using "flag" for the worker name
                    # when the name is e.g. "flag__2", so the message is misleading. Substitute the hack back.
                    message = message.replace("'/{}'".format(_worker), "'/{}'".format(worker))
                    errs.append(message)

        if errors:
            raise ConfigErrors(config_file, errors)

        return validated_content, version
コード例 #53
0
def validate(config,schema):
    c = Core(source_file=config, schema_files=[schema])
    c.validate(raise_exception=True)
コード例 #54
0
ファイル: api.py プロジェクト: kpetremann/netprobify
def start(
    targets, module_name, logging_level, config_file="api.yaml", schema_file="api.schema.yaml"
):
    """Start the API.

    Keyword arguments:
    targets -- shared target list
    module_name -- module name set by the main code to separate targets
    logging_level -- level logging set in the global config file
    config_file -- name of the config file for the API
    schema_file -- schema of the config file for the API
    """
    global shared_targets_list
    global global_module_name
    global token
    global max_targets
    global target_lifetime
    global max_target_lifetime

    # initiate list in dict from Manager (shared variable with main code)
    targets[module_name] = []

    # transform config file to absolute path
    entry_path = os.path.dirname(os.path.abspath(sys.argv[0]))
    config_file_abs = os.path.join(entry_path, config_file)

    # load inventory configuration
    if not os.path.exists(config_file_abs):
        log_api.info("No configuration file for api module")
        return

    # getting a reference of this list and module name
    shared_targets_list = targets
    global_module_name = module_name

    # validate yaml config with the schema
    schema = pkg_resources.resource_filename(__name__, schema_file)
    yaml_validator = Core(source_file=config_file_abs, schema_files=[schema])
    yaml_validator.validate(raise_exception=True)

    # if config file exists, we load it and parse it
    with open(config_file_abs, "r") as conf_file:
        try:
            conf = yaml.safe_load(conf_file)
            log_api.debug("network_devices configuration loaded")
        except Exception as error:
            log_api.error("Unable to load config file: {0}".format(error))

    listen_address = conf.get("listen_address", "0.0.0.0")
    listen_port = conf.get("listen_port", 8009)
    groups.extend(conf.get("groups", []))
    token = conf.get("token")
    max_targets = conf.get("max_targets", 10)
    target_lifetime = common.explode_datetime(conf.get("target_lifetime", "1d"))
    max_target_lifetime = common.explode_datetime(conf.get("max_target_lifetime", "30d"))

    # starting the API
    log_api.logging_level = "WARNING"
    app = connexion.FlaskApp(__name__, server="tornado", debug=False)
    app.add_api("api.swagger.yml")
    app.run(host=listen_address, port=listen_port, debug=False)
コード例 #55
0
ファイル: test_core.py プロジェクト: isabella232/pykwalify
    def test_core_files(self):
        # These tests should pass with no exception raised
        pass_tests = [
            # Test sequence with only string values
            "1s.yaml",
            # Test sequence where the only valid items is integers
            "2s.yaml",
            # Test sequence with only booleans
            "3s.yaml",
            # Test mapping with different types of data and some extra conditions
            "4s.yaml",
            # Test sequence with mapping with valid mapping
            "5s.yaml",
            # Test mapping with sequence with mapping and valid data
            "6s.yaml",
            # Test most of the implemented functions
            "7s.yaml",
            # This will test the unique constraint
            "8s.yaml",
            #
            "9s.yaml",
            #
            "10s.yaml",
            #
            "11s.yaml",
            # This tests number validation rule
            "12s.yaml",
            # This test the text validation rule
            "13s.yaml",
            # This test the any validation rule
            "14s.yaml",
            #
            "15s.yaml",
            #
            # TODO: Currently slightly broken
            # # "16s.yaml",
            # This test that a regex that will compile
            "17s.yaml",
            # Test that type can be set to 'None' and it will validate ok
            "18s.yaml",
            # Test that range validates with map
            "19s.yaml",
            # Test that range validates with seq
            "20s.yaml",
            # Test that 'seq' can use seq instead of 'sequence'
            "21s.yaml",
            # Test that 'map' can be used instead of 'mapping'
            "22s.yaml",
            # Test that 're' can be used instead of 'regex'
            "23s.yaml",
            # Test that 'req' can be used instead of 'required'
            "24s.yaml",
            # Test that there is no need to specify 'type: seq' or 'type: map'
            "25s.yaml",
            # Test that the different types of timestamps can be validated
            "26s.yaml",
            # Test that multiple sequence values is supported
            "27s.yaml",
            # Test that multiple sequence values with matching 'all' is supported
            "28s.yaml",
            # Test that multiple sequence values with matching '*' is supported
            "29s.yaml",
            # Test that multiple sequence values with nested data structures work
            "30s.yaml",
            # Test that multiple sequence vlaues with nested lists works
            "31s.yaml",
            # Test Complex tree with many different structures
            "32s.yaml",
        ]

        _fail_tests = [
            # Test sequence with defined string content type but data only has integers
            ("1f.yaml", SchemaError),
            # Test sequence with defined string content type but data only has booleans
            ("2f.yaml", SchemaError),
            # Test sequence with defined booleans but with one integer
            ("3f.yaml", SchemaError),
            # Test sequence with strings and and lenght on each string
            ("4f.yaml", SchemaError),
            # Test mapping that do not work
            ("5f.yaml", SchemaError),
            # Test sequence with mapping with missing required key
            ("6f.yaml", SchemaError),
            # Test mapping with sequence with mapping and invalid data
            ("7f.yaml", SchemaError),
            #
            ("8f.yaml", SchemaError),
            # TODO: The reverse unique do not currently work proper # This will test the unique constraint but should fail
            ("9f.yaml", SchemaError),
            # This tests number validation rule with wrong data
            ("10f.yaml", SchemaError),
            # This test the text validation rule with wrong data
            ("11f.yaml", SchemaError),
            # This test that typechecking works when value in map is None
            ("12f.yaml", SchemaError),
            # Test that range validates on 'map' raise correct error
            ("13f.yaml", SchemaError),
            # Test that range validates on 'seq' raise correct error
            ("14f.yaml", SchemaError),
            # Test timestamps that should throw errors
            ("15f.yaml", SchemaError),
            # Test multiple sequence values with wrong sub type and 'any' matching rule
            ("16f.yaml", SchemaError),
            # Test multiple sequence values with wrong sub type and 'all' matching rule
            ("17f.yaml", SchemaError),
            # Test multiple nested sequence values with error in level 2 with 'any' matching rule
            ("18f.yaml", SchemaError),
        ]

        # Add override magic to make it easier to test a specific file
        if "S" in os.environ:
            pass_tests = [os.environ["S"]]
            _fail_tests = []
        elif "F" in os.environ:
            pass_tests = []
            _fail_tests = [(os.environ["F"], SchemaError)]

        for passing_test_file in pass_tests:
            f = self.f(os.path.join("success", passing_test_file))
            with open(f, "r") as stream:
                yaml_data = yaml.load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]

            try:
                print("Running test files: {}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
                compare(c.validation_errors, [],
                        prefix="No validation errors should exist...")
            except Exception as e:
                print("ERROR RUNNING FILES: {}".format(f))
                raise e

            # This serve as an extra schema validation that tests more complex structures then testrule.py do
            compare(
                c.root_rule._schema_str,
                schema,
                prefix=
                "Parsed rules is not correct, something have changed... files : {}"
                .format(f))

        for failing_test, exception_type in _fail_tests:
            f = self.f(os.path.join("fail", failing_test))
            with open(f, "r") as stream:
                yaml_data = yaml.load(stream)
                data = yaml_data["data"]
                schema = yaml_data["schema"]
                errors = yaml_data["errors"]

            try:
                print("Running test files: {}".format(f))
                c = Core(source_data=data, schema_data=schema)
                c.validate()
            except exception_type:
                pass  # OK
            else:
                raise AssertionError(
                    "Exception {} not raised as expected... FILES: {} : {}".
                    format(exception_type, exception_type))

            compare(sorted(c.validation_errors),
                    sorted(errors),
                    prefix="Wrong validation errors when parsing files : {}".
                    format(f))