Пример #1
0
 def _yaml_schema(cls) -> Validator:
     return strictyaml.Map({
         "project-id":
         strictyaml.Str(),
         "project-slug":
         strictyaml.Str(),
         strictyaml.Optional("changelog-file"):
         strictyaml.Str(),
     })
Пример #2
0
 def _yaml_schema(cls) -> strictyaml.Validator:
     return strictyaml.Map({
         "tags":
         strictyaml.MapPattern(
             strictyaml.Str(),
             strictyaml.Str(),
         ),
         "files":
         strictyaml.Seq(strictyaml.Str(), ),
     })
Пример #3
0
def load_config():
    schema = strictyaml.Map({
        'email_address':
        strictyaml.Email(),
        'topic_arn':
        strictyaml.Str(),
        'meals':
        strictyaml.Seq(
            strictyaml.MapPattern(strictyaml.Str(), strictyaml.Str()))
    })

    root_dir = path.abspath(path.dirname(__file__))
    with open(path.join(root_dir, 'config.yaml')) as config_file:
        return strictyaml.load(config_file.read(), schema).data
Пример #4
0
 def _yaml_schema(cls) -> strictyaml.Validator:
     return strictyaml.Map({
         "name":
         strictyaml.Str(),
         "wow-versions":
         strictyaml.Seq(strictyaml.Str(), ),
         strictyaml.Optional("curseforge"):
         CurseforgeConfig._yaml_schema(),
         "addons":
         strictyaml.Seq(AddonConfig._yaml_schema()),
         strictyaml.Optional("tags"):
         strictyaml.MapPattern(
             strictyaml.Str(),
             strictyaml.Str(),
         ),
     })
Пример #5
0
def _ProtoFieldToSchema(field):
  """Convert a Proto Field to strictyaml schema."""
  if field.type == field.TYPE_STRING:
    return syaml.Str()
  if field.type == field.TYPE_BOOL:
    return syaml.Bool()
  if field.type in (field.TYPE_INT32, field.TYPE_UINT32,
                    field.TYPE_INT64, field.TYPE_UINT64):
    return syaml.Int()
  if field.type in (field.TYPE_DOUBLE, field.TYPE_FLOAT):
    return syaml.Decimal()
  if field.type == field.TYPE_MESSAGE:
    return _ProtoDescriptorToSchema(field.message_type)
  if field.type == field.TYPE_ENUM:
    return syaml.Str()
  raise ConfigError('Unknown field type in lab_config_pb2: %r.' % field.type)
Пример #6
0
 def _load_yaml_file(cls, yaml_content):
     import strictyaml
     schema = strictyaml.Map({
         "plugin_name": strictyaml.Str(),
         "plugin_version": strictyaml.Str(),
         "author": strictyaml.Str(),
         "email": strictyaml.Str(),
         "shared_lib": strictyaml.Str(),
     })
     plugin_config_file_content = strictyaml.load(yaml_content, schema).data
     if sys.platform == 'win32':
         plugin_config_file_content[
             'shared_lib_name'] = f"{plugin_config_file_content['shared_lib']}.dll"
     else:
         plugin_config_file_content[
             'shared_lib_name'] = f"lib{plugin_config_file_content['shared_lib']}.so"
     return plugin_config_file_content
Пример #7
0
def _get_match_schema(self):
    return strictyaml.Map(
        {
            "duration": strictyaml.Int(),
            "matches": strictyaml.Seq(
                strictyaml.Map(
                    {
                        "away": strictyaml.Str(),
                        "date": strictyaml.Str(),
                        "newdate": strictyaml.Str(),
                        "our_score": strictyaml.Int(),
                        "opp_score": strictyaml.Int(),
                    }
                )
            ),
        }
    )
Пример #8
0
    def _load_yaml_file(cls, yaml_content):
        import strictyaml

        schema = strictyaml.Map({
            "caption": strictyaml.Str(),
            "version": strictyaml.Str(),
            "author": strictyaml.Str(),
            "email": strictyaml.Str(),
            "id": strictyaml.Str(),
        })
        plugin_config_file_content = strictyaml.load(yaml_content, schema).data
        if sys.platform == "win32":
            plugin_config_file_content[
                "shared_lib_name"] = f"{plugin_config_file_content['id']}.dll"
        else:
            plugin_config_file_content[
                "shared_lib_name"] = f"lib{plugin_config_file_content['id']}.so"
        return plugin_config_file_content
Пример #9
0
    def _validate_inputs(self):
        """Validate the input files. Each input type (i.e. image, selavy, noise,
        background) may be given as one of the following:
            1. A list of files.
            2. A glob expression.
            3. A list of glob expressions.
            4. A mapping of epochs to any of the above.
        Each input type is validated individually. Extra input validation steps, e.g. to
        ensure each input type has the same number of files, are performed in
        `validate()`.

        Raises:
            PipelineConfigError: The run config inputs fail schema validation.
        """
        try:
            # first pass validation
            self._yaml["inputs"].revalidate(yaml.Map(self._SCHEMA_INPUTS))

            for input_type in self._yaml["inputs"]:
                input_yaml = self._yaml["inputs"][input_type]
                if input_yaml.is_mapping():
                    # inputs are either epoch-mode, glob expressions, or both
                    if "glob" in input_yaml:
                        # validate globs
                        input_yaml.revalidate(
                            yaml.Map(self._SCHEMA_GLOB_INPUTS))
                    else:
                        # validate epoch mode which may also contain glob expressions
                        input_yaml.revalidate(
                            yaml.MapPattern(
                                yaml.Str(),
                                yaml.UniqueSeq(yaml.Str())
                                | yaml.Map(self._SCHEMA_GLOB_INPUTS),
                            ))
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)
  def _ValidateBlock(self, unvalidated_block: str,
                     validation_fn: Callable[[syaml.YAML], None]) -> None:
    """Validates a yaml-formatted string using the provided function.

    Args:
      unvalidated_block: string. A block of unvalidated entities
      validation_fn: a validation function that takes YAML as an argument
    """
    try:
      validated = syaml.load(unvalidated_block,
                             syaml.MapPattern(syaml.Str(), syaml.Any()))
      validation_fn(validated)

    except (ValueError, ruamel.yaml.parser.ParserError,
            ruamel.yaml.scanner.ScannerError,
            syaml.exceptions.YAMLValidationError,
            syaml.exceptions.DuplicateKeysDisallowed,
            syaml.exceptions.InconsistentIndentationDisallowed) as exception:
      print(exception)
      sys.exit(0)
_CONFIG_METADATA_PATTERN = re.compile(_CONFIG_METADATA_REGEX)
# Key that marks the mode to parse file in.
_CONFIG_MODE_KEY = 'operation'

# A valid device field must match this
_FIELD_REGEX = u'^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*(?:_[0-9]+)*$'
"""Schema separately parses translation to account for multiple valid formats

github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md
#defining-translations
"""
_TRANSLATION_SCHEMA = syaml.MapPattern(
    syaml.Regex(_FIELD_REGEX),
    # Note: This block is somewhat permissive as the logic was difficult to
    # implement in syaml.  Additional validation occurs in EntityInstance
    syaml.Str() | syaml.Map({
        PRESENT_VALUE_KEY:
            syaml.Str(),
        syaml.Optional(STATES_KEY):
            syaml.MapPattern(
                syaml.Regex(u'^[A-Z][A-Z_]+'),
                syaml.Str() | syaml.Seq(syaml.Str())),
        syaml.Optional(UNITS_KEY):
            syaml.Map({
                UNIT_NAME_KEY: syaml.Str(),
                UNIT_VALUES_KEY: syaml.MapPattern(syaml.Str(), syaml.Str())
            }),
    }))

_METADATA_SCHEMA = syaml.Map({
    syaml.Optional(_CONFIG_MODE_KEY):
Пример #12
0
    def validate(self, user: User = None):
        """Perform extra validation steps not covered by the default schema validation.
        The following checks are performed in order. If a check fails, an exception is
        raised and no further checks are performed.

        1. All input files have the same number of epochs and the same number of files
            per epoch.
        2. The number of input files does not exceed the configured pipeline maximum.
            This is only enforced if a regular user (not staff/admin) created the run.
        3. There are at least two input images.
        4. Background input images are required is source monitoring is turned on.
        5. All input files exist.

        Args:
            user: Optional. The User of the request if made through the UI. Defaults to
                None.

        Raises:
            PipelineConfigError: a validation check failed.
        """
        # run standard base schema validation
        try:
            self._yaml.revalidate(self.SCHEMA)
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)

        # epochs defined for images only, used as the reference list of epochs
        epochs_image = self["inputs"]["image"].keys()
        # map input type to a set of epochs
        epochs_by_input_type = {
            input_type: set(self["inputs"][input_type].keys())
            for input_type in self["inputs"].keys()
        }
        # map input type to total number of files from all epochs
        n_files_by_input_type = {}
        for input_type, epochs_set in epochs_by_input_type.items():
            n_files_by_input_type[input_type] = 0
            for epoch in epochs_set:
                n_files_by_input_type[input_type] += len(
                    self["inputs"][input_type][epoch])
        n_files = 0  # total number of input files
        # map input type to a mapping of epoch to file count
        epoch_n_files: Dict[str, Dict[str, int]] = {}
        for input_type in self["inputs"].keys():
            epoch_n_files[input_type] = {}
            for epoch in self["inputs"][input_type].keys():
                n = len(self["inputs"][input_type][epoch])
                epoch_n_files[input_type][epoch] = n
                n_files += n

        # Note by this point the input files have been converted to a mapping regardless
        # of the user's input format.
        # Ensure all input file types have the same epochs.
        try:
            for input_type in self["inputs"].keys():
                self._yaml["inputs"][input_type].revalidate(
                    yaml.Map({
                        epoch: yaml.Seq(yaml.Str())
                        for epoch in epochs_image
                    }))
        except yaml.YAMLValidationError:
            # number of epochs could be different or the name of the epochs may not match
            # find out which by counting the number of unique epochs per input type
            n_epochs_per_input_type = [
                len(epochs_set)
                for epochs_set in epochs_by_input_type.values()
            ]
            if len(set(n_epochs_per_input_type)) > 1:
                if self.epoch_based:
                    error_msg = "The number of epochs must match for all input types.\n"
                else:
                    error_msg = "The number of files must match for all input types.\n"
            else:
                error_msg = "The name of the epochs must match for all input types.\n"
            counts_str = ""
            if self.epoch_based:
                for input_type in epoch_n_files.keys():
                    n = len(epoch_n_files[input_type])
                    counts_str += (
                        f"{input_type} has {n} epoch{'s' if n > 1 else ''}:"
                        f" {', '.join(epoch_n_files[input_type].keys())}\n")
            else:
                for input_type, n in n_files_by_input_type.items():
                    counts_str += f"{input_type} has {n} file{'s' if n > 1 else ''}\n"

            counts_str = counts_str[:-1]
            raise PipelineConfigError(error_msg + counts_str)

        # Ensure all input file type epochs have the same number of files per epoch.
        # This could be combined with the number of epochs validation above, but we want
        # to give specific feedback to the user on failure.
        try:
            for input_type in self["inputs"].keys():
                self._yaml["inputs"][input_type].revalidate(
                    yaml.Map({
                        epoch: yaml.FixedSeq([
                            yaml.Str()
                            for _ in range(epoch_n_files["image"][epoch])
                        ])
                        for epoch in epochs_image
                    }))
        except yaml.YAMLValidationError:
            # map input type to a mapping of epoch to file count
            file_counts_str = ""
            for input_type in self["inputs"].keys():
                file_counts_str += f"{input_type}:\n"
                for epoch in sorted(self["inputs"][input_type].keys()):
                    file_counts_str += (
                        f"  {epoch}: {len(self['inputs'][input_type][epoch])}\n"
                    )
            file_counts_str = file_counts_str[:-1]
            raise PipelineConfigError(
                "The number of files per epoch does not match between input types.\n"
                + file_counts_str)

        # ensure the number of input files is less than the user limit
        if user and n_files > settings.MAX_PIPERUN_IMAGES:
            if user.is_staff:
                logger.warning(
                    "Maximum number of images"
                    f" ({settings.MAX_PIPERUN_IMAGES}) rule bypassed with"
                    " admin status.")
            else:
                raise PipelineConfigError(
                    f"The number of images entered ({n_files})"
                    " exceeds the maximum number of images currently"
                    f" allowed ({settings.MAX_PIPERUN_IMAGES}). Please ask"
                    " an administrator for advice on processing your run.")

        # ensure at least two inputs are provided
        check = [
            n_files_by_input_type[input_type] < 2
            for input_type in self["inputs"].keys()
        ]
        if any(check):
            raise PipelineConfigError(
                "Number of image files must to be larger than 1")

        # ensure background files are provided if source monitoring is requested
        if self["source_monitoring"]["monitor"]:
            inputs_schema = yaml.Map({
                k: yaml.UniqueSeq(yaml.Str())
                | yaml.MapPattern(yaml.Str(), yaml.UniqueSeq(yaml.Str()))
                for k in self._REQUIRED_INPUT_TYPES
            })
            try:
                self._yaml["inputs"].revalidate(inputs_schema)
            except yaml.YAMLValidationError:
                raise PipelineConfigError(
                    "Background files must be provided if source monitoring is enabled."
                )

        # ensure the input files all exist
        for input_type in self["inputs"].keys():
            for epoch, file_list in self["inputs"][input_type].items():
                for file in file_list:
                    if not os.path.exists(file):
                        raise PipelineConfigError(f"{file} does not exist.")
Пример #13
0
class PipelineConfig:
    """Pipeline run configuration.

    Attributes:
        SCHEMA: class attribute containing the YAML schema for the run config.
        TEMPLATE_PATH: class attribute containing the path to the default Jinja2 run
            config template file.
        epoch_based: boolean indicating if the original run config inputs were provided
            with user-defined epochs.

    Raises:
        PipelineConfigError: the input YAML config violates the schema.
    """

    # key: config input type, value: boolean indicating if it is required
    _REQUIRED_INPUT_TYPES: Dict[str, bool] = {
        "image": True,
        "selavy": True,
        "noise": True,
        "background": False,
    }
    # Inputs may be optional. All inputs will be either a unique list or a mapping (epoch
    # mode and/or glob expressions). These possibilities cannot be validated at once, so
    # it will accept Any and then revalidate later.
    _SCHEMA_INPUTS = {(k if v else yaml.Optional(k)):
                      yaml.MapPattern(yaml.Str(), yaml.Any())
                      | yaml.UniqueSeq(yaml.Str())
                      for k, v in _REQUIRED_INPUT_TYPES.items()}
    _SCHEMA_GLOB_INPUTS = {"glob": yaml.Str() | yaml.Seq(yaml.Str())}
    _VALID_ASSOC_METHODS: List[str] = ["basic", "advanced", "deruiter"]
    SCHEMA = yaml.Map({
        "run":
        yaml.Map({
            "path": yaml.Str(),
            "suppress_astropy_warnings": yaml.Bool(),
        }),
        "inputs":
        yaml.Map(_SCHEMA_INPUTS),
        "source_monitoring":
        yaml.Map({
            "monitor": yaml.Bool(),
            "min_sigma": yaml.Float(),
            "edge_buffer_scale": yaml.Float(),
            "cluster_threshold": yaml.Float(),
            "allow_nan": yaml.Bool(),
        }),
        "source_association":
        yaml.Map({
            "method": yaml.Enum(_VALID_ASSOC_METHODS),
            "radius": yaml.Float(),
            "deruiter_radius": yaml.Float(),
            "deruiter_beamwidth_limit": yaml.Float(),
            "parallel": yaml.Bool(),
            "epoch_duplicate_radius": yaml.Float(),
        }),
        "new_sources":
        yaml.Map({
            "min_sigma": yaml.Float(),
        }),
        "measurements":
        yaml.Map({
            "source_finder": yaml.Enum(["selavy"]),
            "flux_fractional_error": yaml.Float(),
            "condon_errors": yaml.Bool(),
            "selavy_local_rms_fill_value": yaml.Float(),
            "write_arrow_files": yaml.Bool(),
            "ra_uncertainty": yaml.Float(),
            "dec_uncertainty": yaml.Float(),
        }),
        "variability":
        yaml.Map({
            "source_aggregate_pair_metrics_min_abs_vs": yaml.Float(),
        }),
    })
    # path to default run config template
    TEMPLATE_PATH: str = os.path.join(settings.BASE_DIR, "vast_pipeline",
                                      "config_template.yaml.j2")

    def __init__(self, config_yaml: yaml.YAML):
        """Initialises PipelineConfig with parsed (but not necessarily validated) YAML.

        Args:
            config_yaml (yaml.YAML): Input YAML, usually the output of `strictyaml.load`.

        Raises:
            PipelineConfigError: The input YAML config violates the schema.
        """
        self._yaml: yaml.YAML = config_yaml
        # The epoch_based parameter below is for if the user has entered just lists we
        # don't have access to the dates until the Image instances are created. So we
        # flag this as true so that we can reorder the epochs once the date information
        # is available. It is also recorded in the database such that there is a record
        # of the fact that the run was processed in an epoch based mode.
        self.epoch_based: bool

        # Determine if epoch-based association should be used based on input files.
        # If inputs have been parsed to dicts, then the user has defined their own epochs.
        # If inputs have been parsed to lists, we must convert to dicts and auto-fill
        # the epochs.

        # ensure the inputs are valid in case .from_file(..., validate=False) was used
        try:
            self._validate_inputs()
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)

        # detect simple list inputs and convert them to epoch-mode inputs
        for input_file_type in self._REQUIRED_INPUT_TYPES:
            # skip missing optional input types, e.g. background
            if (not self._REQUIRED_INPUT_TYPES[input_file_type]
                    and input_file_type not in self["inputs"]):
                continue

            input_files = self["inputs"][input_file_type]

            # resolve glob expressions if present
            if isinstance(input_files, dict):
                # must be either a glob expression, list of glob expressions, or epoch-mode
                if "glob" in input_files:
                    # resolve the glob expressions
                    self.epoch_based = False
                    file_list = self._resolve_glob_expressions(
                        self._yaml["inputs"][input_file_type])
                    self._yaml["inputs"][
                        input_file_type] = self._create_input_epochs(file_list)
                else:
                    # epoch-mode with either a list of files or glob expressions
                    self.epoch_based = True
                    for epoch in input_files:
                        if "glob" in input_files[epoch]:
                            # resolve the glob expressions
                            file_list = self._resolve_glob_expressions(
                                self._yaml["inputs"][input_file_type][epoch])
                            self._yaml["inputs"][input_file_type][
                                epoch] = file_list
            else:
                # Epoch-based association not requested and no globs present. Replace
                # input lists with dicts where each input file has it's own epoch.
                self.epoch_based = False
                self._yaml["inputs"][
                    input_file_type] = self._create_input_epochs(input_files)

    def __getitem__(self, name: str):
        """Retrieves the requested YAML chunk as a native Python object."""
        return self._yaml[name].data

    @staticmethod
    def _create_input_epochs(input_files: List[str]) -> Dict[str, List[str]]:
        """Convert a list of input files into a dict where each list element is placed
        into its own list of length 1 and mapped to by a unique key, a string that is a
        0-padded integer. For example, ["A", "B", "C", ..., "Z"] would be converted to
        {
            "01": ["A"],
            "02": ["B"],
            "03": ["C"],
            ...
            "26": ["Z"],
        }
        The keys are 0-padded to ensure the strings are sortable regardless of the
        length of `input_files`.
        This conversion is required for run configs that are not defined in "epoch mode"
        as after config validation, the pipeline assumes that there will be defined
        epochs.

        Args:
            input_files (List[str]): the list of input file paths.

        Returns:
            Dict[str, List[str]]: the input file paths mapped to by unique epoch keys.
        """
        pad_width = len(str(len(input_files)))
        input_files_dict = {
            f"{i + 1:0{pad_width}}": [val]
            for i, val in enumerate(input_files)
        }
        return input_files_dict

    @classmethod
    def from_file(
        cls,
        yaml_path: str,
        label: str = "run config",
        validate: bool = True,
        add_defaults: bool = True,
    ) -> "PipelineConfig":
        """Create a PipelineConfig object from a run configuration YAML file.

        Args:
            yaml_path: Path to the run config YAML file.
            label: A label for the config object that will be used in error messages.
                Default is "run config".
            validate: Perform config schema validation immediately after loading
                the config file. If set to False, the full schema validation
                will not be performed until PipelineConfig.validate() is
                explicitly called. The inputs are always validated regardless.
                Defaults to True.
            add_defaults: Add missing configuration parameters using configured
                defaults. The defaults are read from the Django settings file.
                Defaults to True.

        Raises:
            PipelineConfigError: The run config YAML file fails schema validation.

        """
        schema = PipelineConfig.SCHEMA if validate else yaml.Any()
        with open(yaml_path) as fh:
            config_str = fh.read()
        try:
            config_yaml = yaml.load(config_str, schema=schema, label=label)
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)

        if add_defaults:
            # make a template config based on defaults
            config_defaults_str = make_config_template(
                cls.TEMPLATE_PATH,
                **settings.PIPE_RUN_CONFIG_DEFAULTS,
            )
            config_defaults_dict: Dict[str, Any] = yaml.load(
                config_defaults_str).data

            # merge configs
            config_dict = dict_merge(config_defaults_dict, config_yaml.data)
            config_yaml = yaml.as_document(config_dict,
                                           schema=schema,
                                           label=label)
        return cls(config_yaml)

    @staticmethod
    def _resolve_glob_expressions(input_files: yaml.YAML) -> List[str]:
        """Resolve glob expressions in a YAML chunk, returning a list of sorted file
        paths.

        Args:
            input_files (yaml.YAML): A validated YAML chunk of input files that is a
                mapping of "glob" to either a single glob expression or a sequence of
                glob expressions. e.g.
                ---
                glob: /foo/*.fits
                ---
                or
                ---
                glob:
                - /foo/A/*.fits
                - /foo/B/*.fits
                ---

        Returns:
            List[str]: The resolved file paths in lexicographical order.
        """
        file_list: List[str] = []
        if input_files["glob"].is_sequence():
            for glob_expr in input_files["glob"]:
                file_list.extend(sorted(list(glob(glob_expr.data))))
        else:
            file_list.extend(sorted(list(glob(input_files["glob"].data))))
        return file_list

    def _validate_inputs(self):
        """Validate the input files. Each input type (i.e. image, selavy, noise,
        background) may be given as one of the following:
            1. A list of files.
            2. A glob expression.
            3. A list of glob expressions.
            4. A mapping of epochs to any of the above.
        Each input type is validated individually. Extra input validation steps, e.g. to
        ensure each input type has the same number of files, are performed in
        `validate()`.

        Raises:
            PipelineConfigError: The run config inputs fail schema validation.
        """
        try:
            # first pass validation
            self._yaml["inputs"].revalidate(yaml.Map(self._SCHEMA_INPUTS))

            for input_type in self._yaml["inputs"]:
                input_yaml = self._yaml["inputs"][input_type]
                if input_yaml.is_mapping():
                    # inputs are either epoch-mode, glob expressions, or both
                    if "glob" in input_yaml:
                        # validate globs
                        input_yaml.revalidate(
                            yaml.Map(self._SCHEMA_GLOB_INPUTS))
                    else:
                        # validate epoch mode which may also contain glob expressions
                        input_yaml.revalidate(
                            yaml.MapPattern(
                                yaml.Str(),
                                yaml.UniqueSeq(yaml.Str())
                                | yaml.Map(self._SCHEMA_GLOB_INPUTS),
                            ))
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)

    def validate(self, user: User = None):
        """Perform extra validation steps not covered by the default schema validation.
        The following checks are performed in order. If a check fails, an exception is
        raised and no further checks are performed.

        1. All input files have the same number of epochs and the same number of files
            per epoch.
        2. The number of input files does not exceed the configured pipeline maximum.
            This is only enforced if a regular user (not staff/admin) created the run.
        3. There are at least two input images.
        4. Background input images are required is source monitoring is turned on.
        5. All input files exist.

        Args:
            user: Optional. The User of the request if made through the UI. Defaults to
                None.

        Raises:
            PipelineConfigError: a validation check failed.
        """
        # run standard base schema validation
        try:
            self._yaml.revalidate(self.SCHEMA)
        except yaml.YAMLValidationError as e:
            raise PipelineConfigError(e)

        # epochs defined for images only, used as the reference list of epochs
        epochs_image = self["inputs"]["image"].keys()
        # map input type to a set of epochs
        epochs_by_input_type = {
            input_type: set(self["inputs"][input_type].keys())
            for input_type in self["inputs"].keys()
        }
        # map input type to total number of files from all epochs
        n_files_by_input_type = {}
        for input_type, epochs_set in epochs_by_input_type.items():
            n_files_by_input_type[input_type] = 0
            for epoch in epochs_set:
                n_files_by_input_type[input_type] += len(
                    self["inputs"][input_type][epoch])
        n_files = 0  # total number of input files
        # map input type to a mapping of epoch to file count
        epoch_n_files: Dict[str, Dict[str, int]] = {}
        for input_type in self["inputs"].keys():
            epoch_n_files[input_type] = {}
            for epoch in self["inputs"][input_type].keys():
                n = len(self["inputs"][input_type][epoch])
                epoch_n_files[input_type][epoch] = n
                n_files += n

        # Note by this point the input files have been converted to a mapping regardless
        # of the user's input format.
        # Ensure all input file types have the same epochs.
        try:
            for input_type in self["inputs"].keys():
                self._yaml["inputs"][input_type].revalidate(
                    yaml.Map({
                        epoch: yaml.Seq(yaml.Str())
                        for epoch in epochs_image
                    }))
        except yaml.YAMLValidationError:
            # number of epochs could be different or the name of the epochs may not match
            # find out which by counting the number of unique epochs per input type
            n_epochs_per_input_type = [
                len(epochs_set)
                for epochs_set in epochs_by_input_type.values()
            ]
            if len(set(n_epochs_per_input_type)) > 1:
                if self.epoch_based:
                    error_msg = "The number of epochs must match for all input types.\n"
                else:
                    error_msg = "The number of files must match for all input types.\n"
            else:
                error_msg = "The name of the epochs must match for all input types.\n"
            counts_str = ""
            if self.epoch_based:
                for input_type in epoch_n_files.keys():
                    n = len(epoch_n_files[input_type])
                    counts_str += (
                        f"{input_type} has {n} epoch{'s' if n > 1 else ''}:"
                        f" {', '.join(epoch_n_files[input_type].keys())}\n")
            else:
                for input_type, n in n_files_by_input_type.items():
                    counts_str += f"{input_type} has {n} file{'s' if n > 1 else ''}\n"

            counts_str = counts_str[:-1]
            raise PipelineConfigError(error_msg + counts_str)

        # Ensure all input file type epochs have the same number of files per epoch.
        # This could be combined with the number of epochs validation above, but we want
        # to give specific feedback to the user on failure.
        try:
            for input_type in self["inputs"].keys():
                self._yaml["inputs"][input_type].revalidate(
                    yaml.Map({
                        epoch: yaml.FixedSeq([
                            yaml.Str()
                            for _ in range(epoch_n_files["image"][epoch])
                        ])
                        for epoch in epochs_image
                    }))
        except yaml.YAMLValidationError:
            # map input type to a mapping of epoch to file count
            file_counts_str = ""
            for input_type in self["inputs"].keys():
                file_counts_str += f"{input_type}:\n"
                for epoch in sorted(self["inputs"][input_type].keys()):
                    file_counts_str += (
                        f"  {epoch}: {len(self['inputs'][input_type][epoch])}\n"
                    )
            file_counts_str = file_counts_str[:-1]
            raise PipelineConfigError(
                "The number of files per epoch does not match between input types.\n"
                + file_counts_str)

        # ensure the number of input files is less than the user limit
        if user and n_files > settings.MAX_PIPERUN_IMAGES:
            if user.is_staff:
                logger.warning(
                    "Maximum number of images"
                    f" ({settings.MAX_PIPERUN_IMAGES}) rule bypassed with"
                    " admin status.")
            else:
                raise PipelineConfigError(
                    f"The number of images entered ({n_files})"
                    " exceeds the maximum number of images currently"
                    f" allowed ({settings.MAX_PIPERUN_IMAGES}). Please ask"
                    " an administrator for advice on processing your run.")

        # ensure at least two inputs are provided
        check = [
            n_files_by_input_type[input_type] < 2
            for input_type in self["inputs"].keys()
        ]
        if any(check):
            raise PipelineConfigError(
                "Number of image files must to be larger than 1")

        # ensure background files are provided if source monitoring is requested
        if self["source_monitoring"]["monitor"]:
            inputs_schema = yaml.Map({
                k: yaml.UniqueSeq(yaml.Str())
                | yaml.MapPattern(yaml.Str(), yaml.UniqueSeq(yaml.Str()))
                for k in self._REQUIRED_INPUT_TYPES
            })
            try:
                self._yaml["inputs"].revalidate(inputs_schema)
            except yaml.YAMLValidationError:
                raise PipelineConfigError(
                    "Background files must be provided if source monitoring is enabled."
                )

        # ensure the input files all exist
        for input_type in self["inputs"].keys():
            for epoch, file_list in self["inputs"][input_type].items():
                for file in file_list:
                    if not os.path.exists(file):
                        raise PipelineConfigError(f"{file} does not exist.")

    def check_prev_config_diff(self) -> bool:
        """
        Checks if the previous config file differs from the current config file. Used in
        add mode. Only returns true if the images are different and the other general
        settings are the same (the requirement for add mode). Otherwise False is returned.

        Returns:
            True if images are different but general settings are the same, otherwise
            False is returned.
        """
        prev_config = PipelineConfig.from_file(
            os.path.join(self["run"]["path"], "config_prev.yaml"),
            label="previous run config",
        )
        if self._yaml == prev_config._yaml:
            return True

        # are the input image files different?
        images_changed = self["inputs"]["image"] != prev_config["inputs"][
            "image"]

        # are all the non-input file configs the same?
        config_dict = self._yaml.data
        prev_config_dict = prev_config._yaml.data
        _ = config_dict.pop("inputs")
        _ = prev_config_dict.pop("inputs")
        settings_check = config_dict == prev_config_dict

        if images_changed and settings_check:
            return False
        return True
Пример #14
0
_ENTITY_INSTANCE_REGEX = '^[A-Z][A-Z0-9\\-]+:'
_ENTITY_INSTANCE_PATTERN = re.compile(_ENTITY_INSTANCE_REGEX)

_IGNORE_PATTERN = re.compile(r'^(\W)*#|\n')
# number of entities to validate per batch
_ENTITIES_PER_BATCH = 1
_COMPLIANT_REGEX = u'^COMPLIANT$'
_TRANSLATION = 'translation'
_FIELD_REGEX = u'^[a-z]+[a-z0-9]*(?:_[a-z]+[a-z0-9]*)*(?:_[0-9]+)*$'

"""Schema separately parses translation to account for multiple valid formats
github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md
#defining-translations"""
_TRANSLATION_SCHEMA = syaml.Regex(_COMPLIANT_REGEX) | syaml.MapPattern(
    syaml.Regex(_FIELD_REGEX),
    syaml.Str() | syaml.Map({'present_value': syaml.Str(),
                             syaml.Optional('states'): syaml.MapPattern(
                                 syaml.Regex(u'^[A-Z][A-Z_]+'), syaml.Str()),
                             syaml.Optional('units'): syaml.Map(
                                 {'key': syaml.Str(),
                                  'values': syaml.MapPattern(syaml.Str(),
                                                             syaml.Str())
                                  }),
                             syaml.Optional('unit_values'): syaml.MapPattern(
                                 syaml.Str(), syaml.Str())
                             }))

"""strictyaml schema parses a YAML instance from its first level of keys
github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md
#config-format"""
_SCHEMA = syaml.MapPattern(syaml.Str(),
Пример #15
0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Parses and validates YAML instance files for syntax"""

import strictyaml as syaml

_COMPLIANT = 'COMPLIANT'
_TRANSLATION = 'translation'

"""Schema separately parses translation to account for multiple valid formats
github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md#defining-translations"""
_TRANSLATION_SCHEMA = syaml.Str() | syaml.Any()

# TODO check valid ontological content in next validation steps
"""strictyaml schema parses a YAML instance from its first level of keys
github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md#config-format"""
_SCHEMA = syaml.MapPattern(syaml.Str(),
                           syaml.Map({
                               'type': syaml.Str(),
                               'id': syaml.Str(),
                               syaml.Optional('connections'):
                               syaml.MapPattern(syaml.Str(),
                                                syaml.Str()) |
                               syaml.Seq(
                                   syaml.MapPattern(syaml.Str(),
                                                    syaml.Str())),
                               syaml.Optional('links'): syaml.MapPattern(
Пример #16
0
from pathlib import Path
from operator import itemgetter

from pelican import signals
import strictyaml as yaml

SCHEMA = yaml.Seq(
    yaml.Map({
        'date':
        yaml.Datetime(),
        'title':
        yaml.Str(),
        'event':
        yaml.Str(),
        yaml.Optional('lang', default='cs'):
        yaml.Enum([
            'cs',
            'en',
        ]),
        yaml.Optional('type', default='talk'):
        yaml.Enum([
            'talk',
            'workshop',
            'interview',
            'text',
        ]),
        yaml.Optional('url'):
        yaml.Url(),
        yaml.Optional('resources_type', default='slides'):
        yaml.Enum([
            'slides',
Пример #17
0
class HierarchicalCategorization(Categorization):
    """In a hierarchical categorization, descendants and ancestors (parents and
    children) are defined for each category.

    Attributes
    ----------
    total_sum : bool
        If the sum of the values of children equals the value of the parent for
        extensive quantities. For example, a Categorization containing the Countries in
        the EU and the EU could set `total_sum = True`, because the emissions of all
        parts of the EU must equal the emissions of the EU. On the contrary, a
        categorization of Industries with categories `Power:Fossil Fuels` and
        `Power:Gas` which are both children of `Power` must set `total_sum = False`
        to avoid double counting of fossil gas.
    canonical_top_level_category : HierarchicalCategory
        The level of a category is calculated with respect to the canonical top level
        category. Commonly, this will be the world total or a similar category. If the
        canonical top level category is not set (i.e. is ``None``), levels are not
        defined for categories.
    """

    hierarchical = True

    _strictyaml_schema = sy.Map({
        "name":
        sy.Str(),
        "title":
        sy.Str(),
        "comment":
        sy.Str(),
        "references":
        sy.Str(),
        "institution":
        sy.Str(),
        "last_update":
        sy.Str(),
        "hierarchical":
        sy.Bool(),
        sy.Optional("version"):
        sy.Str(),
        "total_sum":
        sy.Bool(),
        sy.Optional("canonical_top_level_category"):
        sy.Str(),
        "categories":
        sy.MapPattern(sy.Str(), HierarchicalCategory._strictyaml_schema),
    })

    def _add_categories(self, categories: typing.Dict[str, typing.Dict]):
        for code, spec in categories.items():
            cat = HierarchicalCategory.from_spec(code=code,
                                                 spec=spec,
                                                 categorization=self)

            self._primary_code_map[code] = cat
            self._graph.add_node(cat)
            for icode in cat.codes:
                self._all_codes_map[icode] = cat

        for code, spec in categories.items():
            if "children" in spec:
                parent = self._all_codes_map[code]
                for i, child_set in enumerate(spec["children"]):
                    for child_code in child_set:
                        self._graph.add_edge(parent,
                                             self._all_codes_map[child_code],
                                             set=i)

    def __init__(
        self,
        *,
        categories: typing.Dict[str, typing.Dict],
        name: str,
        title: str,
        comment: str,
        references: str,
        institution: str,
        last_update: datetime.date,
        version: typing.Optional[str] = None,
        total_sum: bool,
        canonical_top_level_category: typing.Optional[str] = None,
    ):
        self._graph = nx.MultiDiGraph()
        Categorization.__init__(
            self,
            categories=categories,
            name=name,
            title=title,
            comment=comment,
            references=references,
            institution=institution,
            last_update=last_update,
            version=version,
        )
        self.total_sum = total_sum
        if canonical_top_level_category is None:
            self.canonical_top_level_category: typing.Optional[
                HierarchicalCategory] = None
        else:
            self.canonical_top_level_category = self._all_codes_map[
                canonical_top_level_category]

    def __getitem__(self, code: str) -> HierarchicalCategory:
        """Get the category for a code."""
        return self._all_codes_map[code]

    def values(self) -> typing.ValuesView[HierarchicalCategory]:
        """Iterate over the categories."""
        return self._primary_code_map.values()

    def items(self) -> typing.ItemsView[str, HierarchicalCategory]:
        """Iterate over (primary code, category) pairs."""
        return self._primary_code_map.items()

    @classmethod
    def from_spec(
            cls,
            spec: typing.Dict[str,
                              typing.Any]) -> "HierarchicalCategorization":
        """Create Categorization from a Dictionary specification."""
        if spec["hierarchical"] != cls.hierarchical:
            raise ValueError(
                "Specification is for a non-hierarchical categorization, use"
                "Categorization.from_spec.")
        last_update = datetime.date.fromisoformat(spec["last_update"])
        return cls(
            categories=spec["categories"],
            name=spec["name"],
            title=spec["title"],
            comment=spec["comment"],
            references=spec["references"],
            institution=spec["institution"],
            last_update=last_update,
            version=spec.get("version", None),
            total_sum=spec["total_sum"],
            canonical_top_level_category=spec.get(
                "canonical_top_level_category", None),
        )

    def to_spec(self) -> typing.Dict[str, typing.Any]:
        """Turn this categorization into a specification dictionary ready to be written
        to a yaml file.

        Returns
        -------
        spec: dict
            Specification dictionary understood by `from_spec`.
        """
        # we can't call Categorization.to_spec here because we need to control ordering
        # in the returned dict so that we get nicely ordered yaml files.
        spec = {
            "name": self.name,
            "title": self.title,
            "comment": self.comment,
            "references": self.references,
            "institution": self.institution,
            "hierarchical": self.hierarchical,
            "last_update": self.last_update.isoformat(),
        }
        if self.version is not None:
            spec["version"] = self.version
        spec["total_sum"] = self.total_sum
        if self.canonical_top_level_category is not None:
            spec[
                "canonical_top_level_category"] = self.canonical_top_level_category.codes[
                    0]

        spec["categories"] = {}
        for cat in self.values():
            code, cat_spec = cat.to_spec()
            spec["categories"][code] = cat_spec

        return spec

    @property
    def _canonical_subgraph(self) -> nx.DiGraph:
        # TODO: from python 3.8 on, there is functools.cached_property to
        # automatically cache this - as soon as we drop python 3.7 support, we can
        # easily add it.
        return nx.DiGraph(
            self._graph.edge_subgraph(
                ((u, v, 0) for (u, v, s) in self._graph.edges(data="set")
                 if s == 0)))

    def _show_subtree_children(
        self,
        children: typing.Iterable[HierarchicalCategory],
        format_func: typing.Callable,
        prefix: str,
        maxdepth: typing.Optional[int],
    ) -> str:
        children_sorted = natsort.natsorted(children, key=format_func)
        r = "".join(
            self._show_subtree(
                node=child,
                prefix=prefix + "│",
                format_func=format_func,
                maxdepth=maxdepth,
            ) for child in children_sorted[:-1])
        # Last child needs to be called slightly differently
        r += self._show_subtree(
            node=children_sorted[-1],
            prefix=prefix + " ",
            last=True,
            format_func=format_func,
            maxdepth=maxdepth,
        )
        return r

    @staticmethod
    def _render_node(
        node: HierarchicalCategory,
        last: bool,
        prefix: str,
        format_func: typing.Callable[[HierarchicalCategory], str],
    ):
        formatted = format_func(node)
        if prefix:
            if last:
                return f"{prefix[:-1]}╰{formatted}\n"
            else:
                return f"{prefix[:-1]}├{formatted}\n"
        else:
            return f"{formatted}\n"

    def _show_subtree(
        self,
        *,
        node: HierarchicalCategory,
        prefix="",
        last=False,
        format_func: typing.Callable[[HierarchicalCategory], str] = str,
        maxdepth: typing.Optional[int],
    ) -> str:
        """Recursively-called function to show a subtree starting at the given node."""

        r = self._render_node(node,
                              last=last,
                              prefix=prefix,
                              format_func=format_func)

        if maxdepth is not None:
            maxdepth -= 1
            if maxdepth == 0:  # maxdepth reached, nothing more to do
                return r

        child_sets = node.children
        if len(child_sets) == 1:
            children = child_sets[0]
            if children:
                r += self._show_subtree_children(
                    children=children,
                    format_func=format_func,
                    maxdepth=maxdepth,
                    prefix=prefix,
                )
        elif len(child_sets) > 1:
            prefix += "║"
            i = 1
            for children in child_sets:
                if children:
                    if i == 1:
                        r += (
                            f"{prefix[:-1]}╠╤══ ('{format_func(node)}'s children,"
                            f" option 1)\n")
                    else:
                        r += (
                            f"{prefix[:-1]}╠╕ ('{format_func(node)}'s children,"
                            f" option {i})\n")

                    r += self._show_subtree_children(
                        children=children,
                        format_func=format_func,
                        maxdepth=maxdepth,
                        prefix=prefix,
                    )
                    i += 1

            r += f"{prefix[:-1]}╚═══\n"

        return r

    def show_as_tree(
        self,
        *,
        format_func: typing.Callable[[HierarchicalCategory], str] = str,
        maxdepth: typing.Optional[int] = None,
        root: typing.Optional[typing.Union[HierarchicalCategory, str]] = None,
    ) -> str:
        """Format the hierarchy as a tree.

        Starting from the given root, or - if no root is given - the top-level
        categories (i.e. categories without parents), the tree of categories that are
        transitive children of the root is show, with children connected to their
        parents using lines. If a parent category has one set of children, the children
        are connected to each other and the parent with a simple line. If a parent
        category has multiple sets of children, the sets are connected to parent with
        double lines and the children in a set are connected to each other with simple
        lines.

        Parameters
        ----------
        format_func: callable, optional
            Function to call to format categories for display. Each category is
            formatted for display using format_func(category), so format_func should
            return a string without line breaks, otherwise the tree will look weird.
            By default, str() is used, so that the first code and the title of the
            category are used.
        maxdepth: int, optional
            Maximum depth to show in the tree. By default, goes to arbitrary depth.
        root: HierarchicalCategory or str, optional
            HierarchicalCategory object or code to use as the top-most category.
            If not given, the whole tree is shown, starting from all categories without
            parents.

        Returns
        -------
        tree_str: str
            Representation of the hierarchy as formatted string. print() it for optimal
            viewing.
        """
        if root is None:
            top_level_nodes = (node for node in self.values()
                               if not node.parents)
        else:
            if not isinstance(root, HierarchicalCategory):
                root = self[root]
            top_level_nodes = [root]
        return "\n".join((self._show_subtree(
            node=top_level_node, format_func=format_func, maxdepth=maxdepth))
                         for top_level_node in top_level_nodes)

    def extend(
        self,
        *,
        categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
        alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
        children: typing.Optional[typing.List[tuple]] = None,
        name: str,
        title: typing.Optional[str] = None,
        comment: typing.Optional[str] = None,
        last_update: typing.Optional[datetime.date] = None,
    ) -> "HierarchicalCategorization":
        """Extend the categorization with additional categories and relationships,
        yielding a new categorization.

        Metadata: the ``name``, ``title``, ``comment``, and ``last_update`` are updated
        automatically (see below), the ``institution`` and ``references`` are deleted
        and the values for ``version``, ``hierarchical``, ``total_sum``, and
        ``canonical_top_level_category`` are kept.
        You can set more accurate metadata (for example, your institution) on the
        returned object if needed.

        Parameters
        ----------
        categories: dict, optional
           Map of new category codes to their specification. The specification is a
           dictionary with the keys "title", optionally "comment", and optionally
           "alternative_codes".
        alternative_codes: dict, optional
           Map of new alternative codes. A dictionary with the new alternative code
           as key and existing code as value.
        children: list, optional
           List of ``(parent, (child1, child2, …))`` pairs. The given relationships will
           be inserted in the extended categorization.
        name : str
           The name of your extension. The returned Categorization will have a name
           of "{old_name}_{name}", indicating that it is an extension of the underlying
           Categorization.
        title : str, optional
           A string to add to the original title. If not provided, " + {name}" will be
           used.
        comment : str, optional
           A string to add to the original comment. If not provided,
           " extended by {name}" will be used.
        last_update : datetime.date, optional
           The date of the last update to this extension. Today will be used if not
           provided.

        Returns
        -------
        Extended categorization : HierarchicalCategorization
        """
        spec = self._extend_prepare(
            name=name,
            categories=categories,
            title=title,
            comment=comment,
            last_update=last_update,
            alternative_codes=alternative_codes,
        )

        if children is not None:
            for parent, child_set in children:
                if "children" not in spec["categories"][parent]:
                    spec["categories"][parent]["children"] = []
                spec["categories"][parent]["children"].append(child_set)

        return HierarchicalCategorization.from_spec(spec)

    @property
    def df(self) -> "pandas.DataFrame":
        """All category codes as a pandas dataframe."""
        titles = []
        comments = []
        alternative_codes = []
        children = []
        for cat in self.values():
            titles.append(cat.title)
            comments.append(cat.comment)
            alternative_codes.append(cat.codes[1:])
            children.append(
                tuple(
                    tuple(sorted(c.codes[0] for c in cs))
                    for cs in cat.children))
        return pandas.DataFrame(
            index=self.keys(),
            data={
                "title": titles,
                "comment": comments,
                "alternative_codes": alternative_codes,
                "children": children,
            },
        )

    def level(self, cat: typing.Union[str, HierarchicalCategory]) -> int:
        """The level of the given category.

        The canonical top-level category has level 1 and its children have level 2 etc.

        To calculate the level, first only the first ("canonical") set of children is
        considered. Only if no path from the canonical top-level category to the
        given category can be found all other sets of children are considered to
        calculate the level.
        """
        if not isinstance(cat, HierarchicalCategory):
            return self.level(self[cat])
        if not isinstance(self.canonical_top_level_category,
                          HierarchicalCategory):
            raise ValueError(
                "Can not calculate the level without a canonical_top_level_category."
            )

        # first use the canonical subgraph for shortest paths
        csg = self._canonical_subgraph
        try:
            sp = nx.shortest_path_length(csg,
                                         self.canonical_top_level_category,
                                         cat)
        except (nx.NetworkXNoPath, nx.NodeNotFound):
            try:
                sp = nx.shortest_path_length(self._graph,
                                             self.canonical_top_level_category,
                                             cat)
            except (nx.NetworkXNoPath, nx.NodeNotFound):
                raise ValueError(
                    f"{cat.codes[0]!r} is not a transitive child of the "
                    f"canonical top level "
                    f"{self.canonical_top_level_category.codes[0]!r}.")

        return sp + 1

    def parents(
        self, cat: typing.Union[str, HierarchicalCategory]
    ) -> typing.Set[HierarchicalCategory]:
        """The direct parents of the given category."""
        if not isinstance(cat, HierarchicalCategory):
            return self.parents(self._all_codes_map[cat])

        return set(self._graph.predecessors(cat))

    def ancestors(
        self, cat: typing.Union[str, HierarchicalCategory]
    ) -> typing.Set[HierarchicalCategory]:
        """All ancestors of the given category, i.e. the direct parents and their
        parents, etc."""
        if not isinstance(cat, HierarchicalCategory):
            return self.ancestors(self._all_codes_map[cat])

        return set(nx.ancestors(self._graph, cat))

    def children(
        self, cat: typing.Union[str, HierarchicalCategory]
    ) -> typing.List[typing.Set[HierarchicalCategory]]:
        """The list of sets of direct children of the given category."""
        if not isinstance(cat, HierarchicalCategory):
            return self.children(self._all_codes_map[cat])

        children_dict = {}
        for (_, child, setno) in self._graph.edges(cat, "set"):
            if setno not in children_dict:
                children_dict[setno] = []
            children_dict[setno].append(child)

        return [set(children_dict[x]) for x in sorted(children_dict.keys())]

    def descendants(self, cat: typing.Union[str, HierarchicalCategory]):
        """All descendants of the given category, i.e. the direct children and their
        children, etc."""
        if not isinstance(cat, HierarchicalCategory):
            return self.descendants(self._all_codes_map[cat])

        return set(nx.descendants(self._graph, cat))
_ENTITY_MODE_KEY = 'operation'
_ENTITY_TYPE_KEY = 'type'

# Value for a udmi compliant translation
_COMPLIANT_REGEX = u'^COMPLIANT$'

# A valid device field must match this
_FIELD_REGEX = u'^[a-z]+[a-z0-9]*(?:_[a-z]+[a-z0-9]*)*(?:_[0-9]+)*$'
"""Schema separately parses translation to account for multiple valid formats

github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md
#defining-translations
"""
_TRANSLATION_SCHEMA = syaml.Regex(_COMPLIANT_REGEX) | syaml.MapPattern(
    syaml.Regex(_FIELD_REGEX),
    syaml.Str() | syaml.Map({
        'present_value':
            syaml.Str(),
        syaml.Optional('states'):
            syaml.MapPattern(syaml.Regex(u'^[A-Z][A-Z_]+'), syaml.Str()),
        syaml.Optional('units'):
            syaml.Map({
                'key': syaml.Str(),
                'values': syaml.MapPattern(syaml.Str(), syaml.Str())
            }),
        syaml.Optional('unit_values'):
            syaml.MapPattern(syaml.Str(), syaml.Str())
    }))

_METADATA_SCHEMA = syaml.Map(
    {syaml.Optional(_CONFIG_MODE_KEY): EnumToRegex(ConfigMode)})
Пример #19
0
import codecs
import os

from functools import lru_cache

import strictyaml as sy

ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(ROOT_PATH, 'data')
LABS_FILENAME = 'labs.yaml'
PROJECTS_FILENAME = 'projects.yaml'

LABS_SCHEMA = sy.Map({
    "labs":
    sy.MapPattern(
        sy.Str(),
        sy.Map({
            "name":
            sy.Str(),
            "prof":
            sy.Map({
                "name": sy.Seq(sy.Str()),
                "email": sy.Email(),
            }),
            "url":
            sy.Url(),
            "contacts":
            sy.EmptyList() | sy.Seq(
                sy.Map({
                    "name": sy.Str(),
                    sy.Optional("email"): sy.Email(),
Пример #20
0
class HierarchicalCategory(Category):
    """A single category from a HierarchicalCategorization."""

    _strictyaml_schema = sy.Map({
        "title":
        sy.Str(),
        sy.Optional("comment"):
        sy.Str(),
        sy.Optional("alternative_codes"):
        sy.Seq(sy.Str()),
        sy.Optional("info"):
        sy.MapPattern(sy.Str(), sy.Any()),
        sy.Optional("children"):
        sy.Seq(sy.Seq(sy.Str())),
    })

    def __init__(
        self,
        codes: typing.Tuple[str],
        categorization: "HierarchicalCategorization",
        title: str,
        comment: typing.Optional[str] = None,
        info: typing.Optional[dict] = None,
    ):
        Category.__init__(self, codes, categorization, title, comment, info)
        self.categorization = categorization

    def to_spec(
            self) -> (str, typing.Dict[str, typing.Union[str, dict, list]]):
        """Turn this category into a specification ready to be written to a yaml file.

        Returns
        -------
        (code: str, spec: dict)
            Primary code and specification dict
        """
        code, spec = Category.to_spec(self)
        children = []
        for child_set in self.children:
            children.append(list(sorted((c.codes[0] for c in child_set))))
        if children:
            spec["children"] = children
        return code, spec

    @property
    def children(self) -> typing.List[typing.Set["HierarchicalCategory"]]:
        """The sets of subcategories comprising this category.

        The first set is canonical, the other sets are alternative.
        Only the canonical sets are used to calculate the level of a category."""
        return self.categorization.children(self)

    @property
    def parents(self) -> typing.Set["HierarchicalCategory"]:
        """The super-categories where this category is a member of any set of children.

        Note that all possible parents are returned, not "canonical" parents.
        """
        return self.categorization.parents(self)

    @property
    def ancestors(self) -> typing.Set["HierarchicalCategory"]:
        """The super-categories where this category or any of its parents is a member
        of any set of children, transitively.

        Note that all possible ancestors are returned, not only "canonical" ones.
        """
        return self.categorization.ancestors(self)

    @property
    def descendants(self) -> typing.Set["HierarchicalCategory"]:
        """The sets of subcategories comprising this category directly or indirectly.

        Note that all possible descendants are returned, not only "canonical" ones."""
        return self.categorization.descendants(self)

    @property
    def level(self) -> int:
        """The level of the category.

        The canonical top-level category has level 1 and its children have level 2 etc.

        To calculate the level, only the first ("canonical") set of children is
        considered for intermediate categories.
        """
        return self.categorization.level(self)
Пример #21
0
from pathlib import Path
from operator import itemgetter

from pelican import signals
import strictyaml as yaml


SCHEMA = yaml.Seq(
    yaml.Map({
        'date': yaml.Datetime(),
        'title': yaml.Str(),
        'event': yaml.Str(),
        yaml.Optional('lang', default='cs'): yaml.Enum([
            'cs',
            'en',
        ]),
        yaml.Optional('type', default='talk'): yaml.Enum([
            'talk',
            'workshop',
            'interview',
        ]),
        yaml.Optional('url'): yaml.Url(),
        yaml.Optional('resources_type', default='slides'): yaml.Enum([
            'slides',
            'text',
        ]),
        yaml.Optional('resources_url'): yaml.Url(),
    }),
)

Пример #22
0
import time
import itertools
import logging
from datetime import datetime, timezone
from pathlib import Path
from operator import itemgetter

from pelican import signals
import feedparser
import strictyaml as yaml

logger = logging.getLogger(__name__)

SCHEMA = yaml.Seq(
    yaml.Map({
        'title': yaml.Str(),
        'lang': yaml.Enum(['en', 'cs']),
        'link': yaml.Url(),
        'href': yaml.Url(),
    }), )


def register():
    signals.article_generator_finalized.connect(load_feeds)


def load_feeds(article_generator):
    settings = article_generator.settings
    articles = article_generator.articles

    # prepare blog articles
Пример #23
0
from datetime import datetime, timezone
from pathlib import Path
from operator import attrgetter

from pelican import signals, generators
import feedparser
import strictyaml as yaml


logger = logging.getLogger(__name__)


SCHEMA = yaml.Seq(
    yaml.Map({
        'href': yaml.Url(),
        yaml.Optional('if'): yaml.Str(),
    }),
)


def register():
    signals.get_generators.connect(get_generators)


def get_generators(pelican_object):
    return PocketFeedGenerator


class Item():
    def __init__(self, entry):
        self.title = entry.title
Пример #24
0
_CONFIG_METADATA_PATTERN = re.compile(_CONFIG_METADATA_REGEX)
# Key that marks the mode to parse file in.
_CONFIG_MODE_KEY = 'operation'

# A valid device field must match this
_FIELD_REGEX = u'^[a-z][a-z0-9]*(?:_[a-z][a-z0-9]*)*(?:_[0-9]+)*$'
"""Schema separately parses translation to account for multiple valid formats

github.com/google/digitalbuildings/blob/master/ontology/docs/building_config.md
#defining-translations
"""
_TRANSLATION_SCHEMA = syaml.MapPattern(
    syaml.Regex(_FIELD_REGEX),
    # Note: This block is somewhat permissive as the logic was difficult to
    # implement in syaml.  Additional validation occurs in EntityInstance
    syaml.Str() | syaml.Map({
        PRESENT_VALUE_KEY:
        syaml.Str(),
        syaml.Optional(STATES_KEY):
        syaml.MapPattern(syaml.Regex(u'^[A-Z][A-Z_]+'),
                         syaml.Str() | syaml.Seq(syaml.Str())),
        syaml.Optional(UNITS_KEY):
        syaml.Map({
            UNIT_NAME_KEY: syaml.Str(),
            UNIT_VALUES_KEY: syaml.MapPattern(syaml.Str(), syaml.Str())
        }),
    }))

_METADATA_SCHEMA = syaml.Map({
    syaml.Optional(_CONFIG_MODE_KEY):
    EnumToRegex(ConfigMode, [ConfigMode.EXPORT])
Пример #25
0
INTERCHANGE_FORMAT_COLUMN_ORDER = [
    "source",
    "scenario",
    "provenance",
    "model",
    "area",
    "entity",
    "unit",
    "category",
    "orig_cat_name",
    "cat_name_translation",
]

INTERCHANGE_FORMAT_STRICTYAML_SCHEMA = sy.Map({
    sy.Optional("data_file"):
    sy.Str(),
    "attrs":
    sy.Map({
        "area": sy.Str(),
        sy.Optional("cat"): sy.Str(),
        sy.Optional("sec_cats"): sy.Seq(sy.Str()),
        sy.Optional("scen"): sy.Str(),
        sy.Optional("references"): sy.Str(),
        sy.Optional("rights"): sy.Str(),
        sy.Optional("contact"): sy.Str(),
        sy.Optional("title"): sy.Str(),
        sy.Optional("comment"): sy.Str(),
        sy.Optional("institution"): sy.Str(),
        sy.Optional("history"): sy.Str(),
        sy.Optional("entity_terminology"): sy.Str(),
        sy.Optional("publication_date"): sy.Datetime(),
Пример #26
0
 def _yaml_schema(cls) -> strictyaml.Validator:
     return strictyaml.Map({
         "path": strictyaml.Str(),
         "toc": TocConfig._yaml_schema(),
     })
Пример #27
0
class Category:
    """A single category."""

    _strictyaml_schema = sy.Map({
        "title":
        sy.Str(),
        sy.Optional("comment"):
        sy.Str(),
        sy.Optional("alternative_codes"):
        sy.Seq(sy.Str()),
        sy.Optional("info"):
        sy.MapPattern(sy.Str(), sy.Any()),
    })

    def __init__(
        self,
        codes: typing.Tuple[str],
        categorization: "Categorization",
        title: str,
        comment: typing.Optional[str] = None,
        info: typing.Optional[dict] = None,
    ):
        self.codes = codes
        self.title = title
        self.comment = comment
        self.categorization = categorization
        if info is None:
            self.info = {}
        else:
            self.info = info
        self._hash = None

    @classmethod
    def from_spec(cls, code: str, spec: typing.Dict,
                  categorization: "Categorization"):
        codes = [code]
        if "alternative_codes" in spec:
            codes += spec["alternative_codes"]
            del spec["alternative_codes"]
        return cls(
            codes=tuple(codes),
            categorization=categorization,
            title=spec["title"],
            comment=spec.get("comment", None),
            info=spec.get("info", None),
        )

    def to_spec(
            self) -> (str, typing.Dict[str, typing.Union[str, dict, list]]):
        """Turn this category into a specification ready to be written to a yaml file.

        Returns
        -------
        (code: str, spec: dict)
            Primary code and specification dict
        """
        code = self.codes[0]
        spec = {"title": self.title}
        if self.comment is not None:
            spec["comment"] = self.comment
        if len(self.codes) > 1:
            spec["alternative_codes"] = list(self.codes[1:])
        if self.info:
            spec["info"] = self.info
        return code, spec

    def __str__(self) -> str:
        return f"{self.codes[0]} {self.title}"

    def __eq__(self, other: "Category"):
        if not isinstance(other, Category):
            return False
        return any((x in other.codes for x in self.codes)) and (
            self.categorization is other.categorization or self.categorization.
            name.startswith(f"{other.categorization.name}_") or other.
            categorization.name.startswith(f"{self.categorization.name}_")
            or self.categorization.name == other.categorization.name)

    def __repr__(self) -> str:
        return f"<{self.categorization.name}: {self.codes[0]!r}>"

    def __hash__(self):
        if self._hash is None:
            self._hash = hash(self.categorization.name + self.codes[0])
        return self._hash

    def __lt__(self, other):
        s = natsort.natsorted((self.codes[0], other.codes[0]))
        return s[0] == self.codes[0] and self != other
Пример #28
0
import copy
import json
import os
import re
import sys

import strictyaml

SCHEMA_MANIFEST = strictyaml.Seq(
    strictyaml.Map({
        "name": strictyaml.Str(),
        "description": strictyaml.Str(),
        "src": strictyaml.Str(),
    }))

SCHEMA_RULE = strictyaml.Seq(
    strictyaml.Map({
        "process":
        strictyaml.Str(),
        strictyaml.Optional("action"):
        strictyaml.Enum(["allow", "deny", "ask"]),
        strictyaml.Optional("codeSignature"):
        strictyaml.Enum(["ignore"]),
        strictyaml.Optional("direction"):
        strictyaml.Enum(["incoming", "outgoing"]),
        strictyaml.Optional("disabled"):
        strictyaml.Bool(),
        strictyaml.Optional("notes"):
        strictyaml.Str(),
        strictyaml.Optional("ports"):
        strictyaml.Regex("^(any|\d+((\s+)?\-(\s+)?\d+)?)$"),
Пример #29
0
def get_testid(testcase):
    """Convert a testcase filename into a test case identifier."""
    name = os.path.splitext(os.path.basename(testcase))[0]

    return name


def get_testcases(path):
    for f in sorted(os.listdir(path)):
        if f.endswith(testext):
            yield os.path.join(path, f)


options_schema = strictyaml.Map({
    strictyaml.Optional('directive'):
    strictyaml.Str(),
    strictyaml.Optional('directive-arguments'):
    strictyaml.Seq(strictyaml.Str()),
    strictyaml.Optional('directive-options'):
    strictyaml.Map({
        strictyaml.Optional('clang'):
        strictyaml.Seq(strictyaml.Str()),
        strictyaml.Optional('compat'):
        strictyaml.Str(),
        strictyaml.Optional('file'):
        strictyaml.Str(),
        strictyaml.Optional('members'):
        strictyaml.Seq(strictyaml.Str()) | strictyaml.EmptyList(),
        strictyaml.Optional('transform'):
        strictyaml.Str(),
    }),
Пример #30
0
class Categorization:
    """A single categorization system.

    A categorization system comprises a set of categories, and their relationships as
    well as metadata describing the categorization system itself.

    Use the categorization object like a dictionary, where codes can be translated
    to their meaning using ``cat[code]`` and all codes are available using
    ``cat.keys()``. Metadata about the categorization is provided in attributes.
    If `pandas` is available, you can access a `pandas.DataFrame` with all
    category codes, and their meanings at ``cat.df``.

    Attributes
    ----------
    name : str
        The unique name/code
    references : str
        Citable reference(s)
    title : str
        A short, descriptive title for humans
    comment : str
        Notes and explanations for humans
    institution : str
        Where the categorization originates
    last_update : datetime.date
        The date of the last change
    version : str, optional
        The version of the Categorization, if there are multiple versions
    hierarchical : bool
        True if descendants and ancestors are defined
    """

    hierarchical: bool = False

    _strictyaml_schema = sy.Map({
        "name":
        sy.Str(),
        "title":
        sy.Str(),
        "comment":
        sy.Str(),
        "references":
        sy.Str(),
        "institution":
        sy.Str(),
        "last_update":
        sy.Str(),
        "hierarchical":
        sy.Bool(),
        sy.Optional("version"):
        sy.Str(),
        "categories":
        sy.MapPattern(sy.Str(), Category._strictyaml_schema),
    })

    def _add_categories(self, categories: typing.Dict[str, typing.Dict]):
        for code, spec in categories.items():
            cat = Category.from_spec(code=code, spec=spec, categorization=self)

            self._primary_code_map[code] = cat
            for icode in cat.codes:
                self._all_codes_map[icode] = cat

    def __init__(
        self,
        *,
        categories: typing.Dict[str, typing.Dict],
        name: str,
        title: str,
        comment: str,
        references: str,
        institution: str,
        last_update: datetime.date,
        version: typing.Optional[str] = None,
    ):
        self._primary_code_map = {}
        self._all_codes_map = {}
        self.name = name
        self.references = references
        self.title = title
        self.comment = comment
        self.institution = institution
        self.last_update = last_update
        self.version = version

        self._add_categories(categories)

        # is filled in __init__.py to contain all categorizations
        self._cats: typing.Dict[str, "Categorization"] = {}

    def __hash__(self):
        return hash(self.name)

    @classmethod
    def from_yaml(
        cls, filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
    ) -> "Categorization":
        """Read Categorization from a StrictYaml file."""
        try:
            yaml = sy.load(filepath.read(), schema=cls._strictyaml_schema)
        except AttributeError:
            with open(filepath) as fd:
                yaml = sy.load(fd.read(), schema=cls._strictyaml_schema)
        return cls.from_spec(yaml.data)

    @classmethod
    def from_spec(cls, spec: typing.Dict[str, typing.Any]) -> "Categorization":
        """Create Categorization from a Dictionary specification."""
        if spec["hierarchical"] != cls.hierarchical:
            raise ValueError(
                "Specification is for a hierarchical categorization, use"
                "HierarchicalCategorization.from_spec.")
        last_update = datetime.date.fromisoformat(spec["last_update"])
        return cls(
            categories=spec["categories"],
            name=spec["name"],
            title=spec["title"],
            comment=spec["comment"],
            references=spec["references"],
            institution=spec["institution"],
            last_update=last_update,
            version=spec.get("version", None),
        )

    @staticmethod
    def from_pickle(
        filepath: typing.Union[str, pathlib.Path, typing.IO[bytes]]
    ) -> "Categorization":
        """De-serialize Categorization from a file written by to_pickle.

        Note that this uses the pickle module, which executes arbitrary code in the
        provided file. Only load from pickle files that you trust."""
        return from_pickle(filepath)

    def to_spec(self) -> typing.Dict[str, typing.Any]:
        """Turn this categorization into a specification dictionary ready to be written
        to a yaml file.

        Returns
        -------
        spec: dict
            Specification dictionary understood by `from_spec`.
        """
        spec = {
            "name": self.name,
            "title": self.title,
            "comment": self.comment,
            "references": self.references,
            "institution": self.institution,
            "hierarchical": self.hierarchical,
            "last_update": self.last_update.isoformat(),
        }
        if self.version is not None:
            spec["version"] = self.version
        spec["categories"] = {}
        for cat in self.values():
            code, cat_spec = cat.to_spec()
            spec["categories"][code] = cat_spec

        return spec

    def to_yaml(self, filepath: typing.Union[str, pathlib.Path]) -> None:
        """Write to a YAML file."""
        spec = self.to_spec()
        yaml = YAML()
        yaml.default_flow_style = False
        with open(filepath, "w") as fd:
            yaml.dump(spec, fd)

    def to_pickle(self, filepath: typing.Union[str, pathlib.Path]) -> None:
        """Serialize to a file using python's pickle."""
        spec = self.to_spec()
        with open(filepath, "wb") as fd:
            pickle.dump(spec, fd, protocol=4)

    def keys(self) -> typing.KeysView[str]:
        """Iterate over the codes for all categories."""
        return self._primary_code_map.keys()

    def values(self) -> typing.ValuesView[Category]:
        """Iterate over the categories."""
        return self._primary_code_map.values()

    def items(self) -> typing.ItemsView[str, Category]:
        """Iterate over (primary code, category) pairs."""
        return self._primary_code_map.items()

    def all_keys(self) -> typing.KeysView[str]:
        """Iterate over all codes for all categories."""
        return self._all_codes_map.keys()

    def __iter__(self) -> typing.Iterable[str]:
        return iter(self._primary_code_map)

    def __getitem__(self, code: str) -> Category:
        """Get the category for a code."""
        return self._all_codes_map[code]

    def __contains__(self, code: str) -> bool:
        """Can the code be mapped to a category?"""
        return code in self._all_codes_map

    def __len__(self) -> int:
        return len(self._primary_code_map)

    def __repr__(self) -> str:
        return (
            f"<Categorization {self.name} {self.title!r} with {len(self)} categories>"
        )

    def __str__(self) -> str:
        return self.name

    @property
    def df(self) -> "pandas.DataFrame":
        """All category codes as a pandas dataframe."""
        titles = []
        comments = []
        alternative_codes = []
        for cat in self.values():
            titles.append(cat.title)
            comments.append(cat.comment)
            alternative_codes.append(cat.codes[1:])
        return pandas.DataFrame(
            index=self.keys(),
            data={
                "title": titles,
                "comment": comments,
                "alternative_codes": alternative_codes,
            },
        )

    def _extend_prepare(
        self,
        *,
        categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
        alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
        name: str,
        title: typing.Optional[str] = None,
        comment: typing.Optional[str] = None,
        last_update: typing.Optional[datetime.date] = None,
    ) -> typing.Dict[str, typing.Any]:
        spec = self.to_spec()

        spec["name"] = f"{self.name}_{name}"
        spec["references"] = ""
        spec["institution"] = ""

        if title is None:
            spec["title"] = f"{self.title} + {name}"
        else:
            spec["title"] = self.title + title

        if comment is None:
            spec["comment"] = f"{self.comment} extended by {name}"
        else:
            spec["comment"] = self.comment + comment

        if last_update is None:
            spec["last_update"] = datetime.date.today().isoformat()
        else:
            spec["last_update"] = last_update.isoformat()

        if categories is not None:
            spec["categories"].update(categories)

        if alternative_codes is not None:
            for alias, primary in alternative_codes.items():
                if "alternative_codes" not in spec["categories"][primary]:
                    spec["categories"][primary]["alternative_codes"] = []

                spec["categories"][primary]["alternative_codes"].append(alias)

        return spec

    def extend(
        self,
        *,
        categories: typing.Optional[typing.Dict[str, typing.Dict]] = None,
        alternative_codes: typing.Optional[typing.Dict[str, str]] = None,
        name: str,
        title: typing.Optional[str] = None,
        comment: typing.Optional[str] = None,
        last_update: typing.Optional[datetime.date] = None,
    ) -> "Categorization":
        """Extend the categorization with additional categories, yielding a new
        categorization.

        Metadata: the ``name``, ``title``, ``comment``, and ``last_update`` are updated
        automatically (see below), the ``institution`` and ``references`` are deleted
        and the values for ``version`` and ``hierarchical`` are kept.
        You can set more accurate metadata (for example, your institution) on the
        returned object if needed.

        Parameters
        ----------
        categories: dict, optional
           Map of new category codes to their specification. The specification is a
           dictionary with the keys "title", optionally "comment", and optionally
           "alternative_codes".
        alternative_codes: dict, optional
           Map of new alternative codes. A dictionary with the new alternative code
           as key and existing code as value.
        name : str
           The name of your extension. The returned Categorization will have a name
           of "{old_name}_{name}", indicating that it is an extension of the underlying
           Categorization.
        title : str, optional
           A string to add to the original title. If not provided, " + {name}" will be
           used.
        comment : str, optional
           A string to add to the original comment. If not provided,
           " extended by {name}" will be used.
        last_update : datetime.date, optional
           The date of the last update to this extension. Today will be used if not
           provided.

        Returns
        -------
        Extended categorization : Categorization
        """
        spec = self._extend_prepare(
            name=name,
            categories=categories,
            title=title,
            comment=comment,
            last_update=last_update,
            alternative_codes=alternative_codes,
        )

        return Categorization.from_spec(spec)

    def __eq__(self, other):
        if not isinstance(other, Categorization):
            return False
        if self.name != other.name:
            return False
        return self._primary_code_map == other._primary_code_map

    def conversion_to(
            self, other: typing.Union["Categorization", str]) -> Conversion:
        """Get conversion to other categorization.

        If conversion rules for this conversion are not included, raises
        NotImplementedError."""
        if isinstance(other, str):
            other_name = other
        else:
            other_name = other.name

        forward_csv_name = f"conversion.{self.name}.{other_name}.csv"
        if importlib.resources.is_resource(data, forward_csv_name):
            fd = importlib.resources.open_text(data, forward_csv_name)
            return ConversionSpec.from_csv(fd).hydrate(cats=self._cats)
        reversed_csv_name = f"conversion.{other_name}.{self.name}.csv"
        if importlib.resources.is_resource(data, reversed_csv_name):
            fd = importlib.resources.open_text(data, reversed_csv_name)
            return ConversionSpec.from_csv(fd).hydrate(
                cats=self._cats).reversed()

        raise NotImplementedError(
            f"Conversion between {self.name} and {other_name} not yet included."
        )