Example #1
0
def barChart(size: typing.Tuple[float, float, int], data: typing.Mapping,
             output: typing.BinaryIO) -> None:
    d = data['x']
    ind = np.arange(len(d))
    ys = data['y']

    width = 0.60
    fig: Figure = Figure(figsize=(size[0], size[1]),
                         dpi=size[2])  # type: ignore
    FigureCanvas(fig)  # Stores canvas on fig.canvas

    axis = fig.add_subplot(111)
    axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5)

    bottom = np.zeros(len(ys[0]['data']))
    for y in ys:
        axis.bar(ind, y['data'], width, bottom=bottom, label=y.get('label'))
        bottom += np.array(y['data'])

    axis.set_title(data.get('title', ''))
    axis.set_xlabel(data['xlabel'])
    axis.set_ylabel(data['ylabel'])

    if data.get('allTicks', True) is True:
        axis.set_xticks(ind)

    if 'xtickFnc' in data:
        axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()])

    axis.legend()

    fig.savefig(output, format='png', transparent=True)
    def __init__(self, sim_spec: typing.Mapping, *, backend: str):
        """Construct from a simulation specification and the backend fixture."""
        self.domain = sim_spec.get("domain", analytical.DOMAIN)
        self.time_step = 1e-3
        self.max_time = 1e-2
        self.shape = sim_spec.get("shape", (16, 16, 16))
        self.backend_name = backend
        self.tolerance = sim_spec["tolerance"]
        dspace = numpy.array(analytical.DOMAIN,
                             dtype=numpy.float64) / numpy.array(
                                 self.shape, dtype=numpy.float64)
        stencil_args = {
            "backend": self.backend_name,
            "shape": self.shape,
            "dspace": dspace,
            "time_step": self.time_step,
        }
        stencil_args.update(sim_spec.get("extra-args", {}))
        self.extra_args = sim_spec.get("extra-args", {})
        self.stencil = sim_spec["stencil"](**stencil_args)
        self.reference = sim_spec["reference"]
        storage_b = self.stencil.storage_builder().default_origin(
            self.stencil.min_origin())

        self.data = storage_b.from_array(
            numpy.fromfunction(self.get_reference, shape=self.shape))
        self.data1 = copy.deepcopy(self.data)
        self._initial_state = copy.deepcopy(self.data)
        self._expected = numpy.fromfunction(functools.partial(
            self.get_reference, time=self.max_time),
                                            shape=self.shape)
Example #3
0
def compare_all_but(dict_a: t.Mapping,
                    dict_b: t.Mapping,
                    keys_to_ignore: t.Optional[t.Iterable] = None) -> bool:
    """
    Compare two dictionaries, with the possibility to ignore some fields.

    :arg dict_a: First dictionary to compare
    :arg dict_b: Second dictionary to compare
    :kwarg keys_to_ignore: An iterable of keys whose values in the dictionaries will not be
        compared.
    :returns: True if the dictionaries have matching values for all of the keys which were not
        ignored.  False otherwise.
    """
    if keys_to_ignore is None:
        return dict_a == dict_b

    if not isinstance(keys_to_ignore, Set):
        keys_to_ignore = frozenset(keys_to_ignore)

    length_a = len(frozenset(dict_a.keys()) - keys_to_ignore)
    length_b = len(frozenset(dict_b.keys()) - keys_to_ignore)

    if length_a != length_b:
        return False

    sentinel = object()

    for key, value in ((k, v) for k, v in dict_a.items()
                       if k not in keys_to_ignore):
        if value != dict_b.get(key, sentinel):
            return False

    return True
Example #4
0
def lineChart(size: typing.Tuple[float, float, int], data: typing.Mapping,
              output: typing.BinaryIO) -> None:
    x = data['x']
    y = data['y']

    fig: Figure = Figure(figsize=(size[0], size[1]),
                         dpi=size[2])  # type: ignore
    FigureCanvas(fig)  # Stores canvas on fig.canvas

    axis = fig.add_subplot(111)
    axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5)

    for i in y:
        yy = i['data']
        axis.plot(x, yy, label=i.get('label'), marker='.', color='orange')
        axis.fill_between(x, yy, 0)

    axis.set_title(data.get('title', ''))
    axis.set_xlabel(data['xlabel'])
    axis.set_ylabel(data['ylabel'])

    if data.get('allTicks', True) is True:
        axis.set_xticks(x)

    if 'xtickFnc' in data:
        axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()])

    axis.legend()

    fig.savefig(output, format='png', transparent=True)
Example #5
0
def replace_qubits(
    circuit: cirq.Circuit,
    qubit_map: typing.Mapping
):
    old_qubits = set(qubit_map.keys())
    new_qubits = set(qubit_map.values())
    if len(old_qubits) != len(new_qubits):
        raise ValueError(
            "`qubit_map` must be a bijective mapping."
        )

    qubits_in_circuit = circuit.all_qubits()
    unknown_old_qubits = old_qubits - qubits_in_circuit
    if unknown_old_qubits:
        raise ValueError(
            "Some qubits in `old_qubits` do not exist in the original circuit: "
            f"{unknown_old_qubits}"
        )

    new_circuit = cirq.Circuit()
    for moment in circuit:
        new_moment = cirq.Moment()
        for operation in moment:
            new_operation = operation.gate.on(
                *(qubit_map[q] for q in operation.qubits)
            )
            new_moment += new_operation
        new_circuit += new_moment

    return new_circuit
Example #6
0
    def extract_value(
        self,
        track: typing.Mapping,
        context: typing.MutableMapping,
    ) -> typing.Optional[T]:
        """Extract the property value from a given track."""
        for name in self.names:
            names = name.split('.')
            value = track.get(names[0], {}).get(
                names[1]) if len(names) == 2 else track.get(name)
            if value is None:
                if self.default is None:
                    continue

                value = self.default

            if isinstance(value, bytes):
                value = value.decode()

            if isinstance(value, str):
                value = value.translate(_visible_chars_table).strip()
                if _is_unknown(value):
                    continue
                value = self._deduplicate(value)

            result = self.handle(value, context)
            if result is not None and not _is_unknown(result):
                return result

        return None
Example #7
0
    def oauth2client_from_k8s(cls, obj: typing.Mapping) -> api_pb2.Client:
        keep_keys = set(cls.OAuth2Client.DESCRIPTOR.fields_by_name.keys())
        for key in list(obj.keys()):
            if key not in keep_keys:
                obj.pop(key)

        return cls.OAuth2Client(**obj)
Example #8
0
    def update(self, headers: typing.Mapping) -> None:
        """ Update current rate limits. """

        self.limit = headers.get(self.HEADER_LIMIT)
        self.left = headers.get(self.HEADER_REMAINING)
        timestamp = headers.get(self.HEADER_RESET)
        self.reset = arrow.get(
            int(timestamp)) if timestamp is not None else None
Example #9
0
def surfaceChart(size: typing.Tuple[float, float, int], data: typing.Mapping,
                 output: typing.BinaryIO) -> None:
    x = data['x']
    y = data['y']
    z = data['z']

    logger.debug('X: %s', x)
    logger.debug('Y: %s', y)
    logger.debug('Z: %s', z)

    x, y = np.meshgrid(x, y)
    z = np.array(z)

    logger.debug('X\': %s', x)
    logger.debug('Y\': %s', y)
    logger.debug('Z\': %s', z)

    fig: Figure = Figure(figsize=(size[0], size[1]),
                         dpi=size[2])  # type: ignore
    FigureCanvas(fig)  # Stores canvas on fig.canvas

    axis = fig.add_subplot(111, projection='3d')
    # axis.grid(color='r', linestyle='dotted', linewidth=0.1, alpha=0.5)

    if data.get('wireframe', False) is True:
        axis.plot_wireframe(
            x,
            y,
            z,
            rstride=1,
            cstride=1,
            cmap=cm.coolwarm  # type: ignore
        )
    else:
        axis.plot_surface(
            x,
            y,
            z,
            rstride=1,
            cstride=1,
            cmap=cm.coolwarm  # type: ignore
        )

    axis.set_title(data.get('title', ''))
    axis.set_xlabel(data['xlabel'])
    axis.set_ylabel(data['ylabel'])
    axis.set_zlabel(data['zlabel'])

    if data.get('allTicks', True) is True:
        axis.set_xticks(data['x'])
        axis.set_yticks(data['y'])

    if 'xtickFnc' in data:
        axis.set_xticklabels([data['xtickFnc'](v) for v in axis.get_xticks()])
    if 'ytickFnc' in data:
        axis.set_yticklabels([data['ytickFnc'](v) for v in axis.get_yticks()])

    fig.savefig(output, format='png', transparent=True)
Example #10
0
    def _iter_fields(
        self, data: typing.Mapping, files: typing.Mapping
    ) -> typing.Iterator[typing.Union["FileField", "DataField"]]:
        for name, value in data.items():
            if isinstance(value, list):
                for item in value:
                    yield self.DataField(name=name, value=item)
            else:
                yield self.DataField(name=name, value=value)

        for name, value in files.items():
            yield self.FileField(name=name, value=value)
Example #11
0
    def __init__(
        self,
        context: PyTorchSAClientContext,
        layers_config: typing.List[typing.Mapping],
        optimizer_config: types.SimpleNamespace,
        loss_config: typing.Mapping,
    ):
        super().__init__()
        self.save_hyperparameters()
        self.context = context

        # model
        layers = []
        for layer_config in layers_config:
            layer_name = layer_config["layer"]
            layer_kwargs = {k: v for k, v in layer_config.items() if k != "layer"}
            layers.append(get_layer_fn(layer_name, layer_kwargs))
        self.model = nn.Sequential(*layers)

        # loss
        loss_name = loss_config["loss"]
        loss_kwargs = {k: v for k, v in loss_config.items() if k != "loss"}
        self.loss_fn, self.expected_label_type = get_loss_fn(loss_name, loss_kwargs)

        # optimizer
        self._optimizer_name = optimizer_config.optimizer
        self._optimizer_kwargs = optimizer_config.kwargs

        self._num_data_consumed = 0
        self._all_consumed_data_aggregated = True

        self._should_early_stop = False
Example #12
0
def _map_allotypes(df: pd.DataFrame, df_name: str,
                   mapping: t.Mapping) -> pd.DataFrame:
    """
    A helper function helping to map allotype names in df having `accession` field to accessions.
    Logging will warn a user about unmapped allotypes.
    :param df: DataFrame with `allotype` field present.
    :param df_name: Name of the df (used for logging)
    :param mapping: Mapping between allotype names and accessions.
    :return: DataFrame with a new `accession` field.
    """
    # Prevent SettingWithCopyWarning
    df = df.copy()
    # Map allotypes accessions
    df['accession'] = df['allotype'].map(mapping)
    # Warn about unmapped allotypes
    unmapped_allotypes_loc = ~df['accession'].isin(set(mapping.values()))
    if unmapped_allotypes_loc.any():
        df.loc[unmapped_allotypes_loc, 'accession'] = np.nan
        unmapped_allotypes = df.loc[unmapped_allotypes_loc,
                                    "allotype"].sort_values().unique()
        logging.warning(
            f'{df_name} -- could not map {len(unmapped_allotypes)} '
            f'allotypes {unmapped_allotypes} corresponding to '
            f'{unmapped_allotypes_loc.sum()} records')
    # Filter out unmapped allotypes
    df = df[~df['accession'].isna()]
    logging.info(
        f'{df_name} -- filtered out unmapped allotypes; records: {len(df)}')
    return df
Example #13
0
def map_effects(mapping: t.Mapping, aw: t.Awaitable):
    mapping = {_map_kind(k): v for (k, v) in mapping.items()}

    def mapping_handler(eff: Effect):
        handle = mapping.get(eff.kind, None)

        if handle is None:
            return handle

        is_callable = isinstance(handle, t.Callable)
        is_mapping = isinstance(handle, t.Mapping)

        if is_mapping and is_callable:
            raise ValueError(
                "Expected either mapping or callable but "
                "got value implementing both", handle)

        if is_mapping:
            return lambda *a, **kw: map_effects(handle,
                                                eff.default_handle(*a, **kw))

        if is_callable:
            return handle

        raise ValueError(f"Handler mapping should contain either "
                         f"another mapping or callable, got {handle}")

    return resolve_effects(mapping_handler, aw)
Example #14
0
def dict2items(
    value: typing.Mapping,
    key_name: str = 'key',
    value_name: str = 'value',
) -> typing.List[typing.Dict[str, typing.Any]]:
    """Convert a mapping to a list of its items.

    For example, converts

    .. code-block:: yaml

        milk: 1
        eggs: 10
        bread: 2

    into

    .. code-block:: yaml

        - key: milk
          value: 1
        - key: eggs
          value: 10
        - key: bread
          value: 2

    .. versionadded:: 1.1

    :param value: Any mapping.
    :param key_name: Key name for input keys.
    :param value_name: Key name for input values.
    :returns: A list of dicts.
    """
    return [{key_name: key, value_name: value} for key, value in value.items()]
Example #15
0
    def saturate_mapping(self, value: t.Mapping) -> None:
        try:
            assert isinstance(value, t.Mapping), "must be a mapping"
            assert value, "must not be empty"
            assert dict_depth(value) == 1, "must be a mapping of depth 1"
            assert all(isinstance(_, int) for _ in value.values()), "must have int values"
        except AssertionError as e:
            raise LayerMisconfigured("saturate_mapping {}, got {}".format(*e.args, value))

        if any(isinstance(_, str) for _ in value.keys()):
            _ = {}
            for k, v in value.items():
                _[int(k)] = v
            value = _

        self._saturate_mapping = dict(sorted(value.items(), key=lambda i: i[0]))
Example #16
0
    def _build_values(self, settings: typing.Mapping) -> None:
        """Build up self._values from the values in the given dict."""
        errors = []
        for name, yaml_values in settings.items():
            if not isinstance(yaml_values, dict):
                errors.append(configexc.ConfigErrorDesc(
                    "While parsing {!r}".format(name), "value is not a dict"))
                continue

            values = configutils.Values(configdata.DATA[name])
            if 'global' in yaml_values:
                values.add(yaml_values.pop('global'))

            for pattern, value in yaml_values.items():
                if not isinstance(pattern, str):
                    errors.append(configexc.ConfigErrorDesc(
                        "While parsing {!r}".format(name),
                        "pattern is not of type string"))
                    continue
                try:
                    urlpattern = urlmatch.UrlPattern(pattern)
                except urlmatch.ParseError as e:
                    errors.append(configexc.ConfigErrorDesc(
                        "While parsing pattern {!r} for {!r}"
                        .format(pattern, name), e))
                    continue
                values.add(value, urlpattern)

            self._values[name] = values

        if errors:
            raise configexc.ConfigFileErrors('autoconfig.yml', errors)
Example #17
0
    def signal(self, value: t.Mapping) -> None:
        """Set the signal mapping

        Args:
            value (t.Mapping): a mapping with symbols and a 'carrier'

        Raises:
            LayerMisconfigured: If incompatible value is provided
        """
        try:
            assert isinstance(value, t.Mapping), "must be a mapping"
            assert "carrier" in value, "must have a 'carrier' key"
            mapping = dict(filter(lambda x: isinstance(x[0], int), value.items()))
            _0, *_1 = mapping.values()
            assert all(len(_) == len(_0) for _ in _1), "must have same-length mappings"

            self._mapping = mapping
            self._carrier = value["carrier"]
            self._signal = value
            self._subsymbol_count = len(_0)

            if self._subsymbol_count < 2:
                raise LayerMisconfigured(
                    "generic line mapping works only on signals with at " "least 2 subsymbols"
                )

        except AssertionError as e:
            raise LayerMisconfigured("signal {}, got {}".format(*e.args, value))
Example #18
0
    class Foo(SimpleBase):
        bar: Tuple[Mapping, ...]

        __coerce__ = {
            "bar": (List[Dict[str, Any]],
                    lambda val: tuple(Mapping(**values) for values in val))
        }
Example #19
0
def _sub_chars(string: str, probability: float,
               mapping: typing.Mapping) -> str:
    """Replace substrings with a given probability.

    Given a mapping, search string one by one for keys and replace with
    the appropriate value, with some probability. If your keys are not mutually
    exclusive (e.g. some part of them overlaps), the order in which they appear
    in the mapping becomes important.

    Args:
        string: text
        probability: probability of replacing a group of characters
        mapping: map of substring -> replacement

    Returns:
        enriched text
    """
    for pattern, sub in mapping.items():
        index = 0
        while 0 <= index < len(string):
            index = string.lower().find(pattern, index)
            if index < 0:
                break
            elif random.binomial(1, probability):
                string = string[:index] + sub + string[index + len(pattern):]
                index += len(sub)
            else:
                index += len(pattern)
    return string
Example #20
0
 def update(self, d: typing.Mapping) -> None:  # type: ignore
     for key in d.keys():
         # Shallow-merge context
         if key == "context":
             self["context"].update(Config.transform_val(d["context"]))
         else:
             self[key] = d[key]
Example #21
0
def onedict_value(d: t.Mapping) -> t.Any:
    """
    Get the value of a single-entry dictionary.

    Parameters
    ----------
    d : mapping
        A single-entry mapping.

    Returns
    -------
    object
        Unwrapped value.

    Raises
    ------
    ValueError
        If ``d`` has more than a single element.

    Notes
    -----
    This function is basically ``next(iter(d.values()))`` with a safeguard.

    Examples
    --------
    >>> onedict_value({"foo": "bar"})
    "bar"
    """

    if len(d) != 1:
        raise ValueError(f"dictionary has wrong length (expected 1, got {len(d)}")

    return next(iter(d.values()))
Example #22
0
    def _build_values(self, settings: typing.Mapping) -> None:
        """Build up self._values from the values in the given dict."""
        errors = []
        for name, yaml_values in settings.items():
            if not isinstance(yaml_values, dict):
                errors.append(configexc.ConfigErrorDesc(
                    "While parsing {!r}".format(name), "value is not a dict"))
                continue

            values = configutils.Values(configdata.DATA[name])
            if 'global' in yaml_values:
                values.add(yaml_values.pop('global'))

            for pattern, value in yaml_values.items():
                if not isinstance(pattern, str):
                    errors.append(configexc.ConfigErrorDesc(
                        "While parsing {!r}".format(name),
                        "pattern is not of type string"))
                    continue
                try:
                    urlpattern = urlmatch.UrlPattern(pattern)
                except urlmatch.ParseError as e:
                    errors.append(configexc.ConfigErrorDesc(
                        "While parsing pattern {!r} for {!r}"
                        .format(pattern, name), e))
                    continue
                values.add(value, urlpattern)

            self._values[name] = values

        if errors:
            raise configexc.ConfigFileErrors('autoconfig.yml', errors)
Example #23
0
File: abap.py Project: sahwar/abap
def build_rss(
    directory: pathlib.Path,
    abook: typing.Mapping,
    reverse_url=lambda n, *a: n,
    renderers: typing.Optional[typing.Mapping[str,
                                              typing.Type[XMLRenderer]]] = None
) -> ET.Element:
    renderers = renderers or load_renderers()

    extensions = collections.OrderedDict([(n, cls(reverse_url))
                                          for n, cls in renderers.items()])

    for ext_name, ext in extensions.items():
        LOG.debug(f'Registering XML namespaces for renderer: {ext_name}')
        for ns in ext.namespaces:
            ET.register_namespace(ns.prefix, ns.uri)

    rss = ET.Element('rss', attrib={'version': RSS_VERSION})
    channel = ET.SubElement(rss, 'channel')

    for ext_name, ext in extensions.items():
        LOG.debug(f'Rendering channel elements with renderer: {ext_name}')
        for el in ext.render_channel(abook):
            channel.append(el)

    for idx, item in enumerate(abook.get('items', []), start=1):
        item_elem = ET.SubElement(channel, 'item')
        for ext_name, ext in extensions.items():
            LOG.debug(
                f'Rendering item #{idx} elements with renderer: {ext_name}')
            for elem in ext.render_item(abook, item, sequence=idx):
                item_elem.append(elem)

    return rss
Example #24
0
    def from_dict(cls, d: ty.Mapping) -> _Renderer:
        """Instantiate a new renderer from a dictionary of instructions."""
        # raise error if invalid
        _validate_renderer(d)

        pkg_manager = d["pkg_manager"]
        users = d.get("existing_users", None)

        # create new renderer object
        renderer = cls(pkg_manager=pkg_manager, users=users)

        for mapping in d["instructions"]:
            method_or_template = mapping["name"]
            kwds = mapping["kwds"]
            this_instance_method = getattr(renderer, method_or_template, None)
            # Method exists and is something like 'copy', 'env', 'run', etc.
            if this_instance_method is not None:
                try:
                    this_instance_method(**kwds)
                except Exception as e:
                    raise RendererError(
                        f"Error on step '{method_or_template}'. Please see the"
                        " traceback above for details.") from e
            # This is actually a template.
            else:
                try:
                    renderer.add_registered_template(method_or_template,
                                                     **kwds)
                except TemplateError as e:
                    raise RendererError(
                        f"Error on template '{method_or_template}'. Please see above"
                        " for more information.") from e
        return renderer
Example #25
0
def parse_params(prefix: str, params: typing.Mapping) -> typing.Mapping:
    result = {}
    for key, value in params.items():
        if key.startswith(prefix):
            name = key.strip(prefix)[1:-1]
            result[name] = value
    return result
Example #26
0
File: konch.py Project: brl0/konch
 def update(self, d: typing.Mapping) -> None:  # type: ignore
     for key in d.keys():
         # Shallow-merge context
         if key == "context":
             self["context"].update(Config.transform_val(d["context"]))
         else:
             self[key] = d[key]
Example #27
0
def create_contexts(args: t.Optional[argparse.Namespace] = None,
                    cfg: t.Mapping = ImmutableDict(),
                    use_extra: bool = True) -> ContextReturn:
    """
    Create new contexts appropriate for setting the app and lib context.

    This function takes values from the application arguments and configuration and sets them on
    the context.  It validates, normalizes, and sets defaults for the contexts based on what is
    available in the arguments and configuration.

    :kwarg args: An :python:obj:`argparse.Namespace` holding the program's command line
        arguments.  See the warning below about working with :python:mod:`argpase`.
    :kwarg cfg: A dictionary holding the program's configuration.
    :kwarg use_extra: When True, the default, all extra arguments and config values will be set as
        fields in ``app_ctx.extra``.  When False, the extra arguments and config values will be
        returned as part of the ContextReturn.
    :returns: A ContextReturn NamedTuple.

    .. warning::
        We cannot tell whether a user set a value via the command line if :python:mod:`argparse`
        sets the field to a default value.  That means when you specify the field in the
        :obj:`AppContext` or :obj:`LibContext` models, you must tell :python:mod:`argparse` not to
        set the field to a default like this::

            parser.add_argument('--breadcrumbs', default=argparse.SUPPRESS)

        If the field is only used via the :attr:`AppContext.extra` mechanism (not explictly set),
        then you should ignore this section and use :python:mod:`argparse`'s default mechanism.
    """
    lib_values = _extract_lib_context_values(args, cfg)
    app_values = _extract_app_context_values(args, cfg)

    #
    # Save the unused values
    #
    known_fields = _FIELDS_IN_APP_CTX.union(_FIELDS_IN_LIB_CTX)

    unused_cfg = {}
    if cfg:
        unused_cfg = {k: v for k, v in cfg.items() if k not in known_fields}

    unused_args = {}
    if args:
        unused_args = {k: v for k, v in vars(args).items() if k not in known_fields}

    # Unused values are saved in app_ctx.extra when use_extra is set
    if use_extra:
        unused_cfg.update(unused_args)
        app_values['extra'] = unused_cfg
        unused_cfg = {}
        unused_args = {}

    unused_args = argparse.Namespace(**unused_args)

    # create new app and lib ctxt from the application's arguments and config.
    app_ctx = AppContext(**app_values)
    lib_ctx = LibContext(**lib_values)

    return ContextReturn(app_ctx=app_ctx, lib_ctx=lib_ctx, args=unused_args, cfg=unused_cfg)
Example #28
0
def associate_data(root_map: typing.Mapping,
                   *args: typing.Mapping) -> typing.List[typing.List]:
    """
    Convert a number of maps key->value to a list of lists
    [[key, map1[key], map2[key] map3[key] ...] ...]

    The list will be sorted in key order
    Returned inner lists will be in the same order as they are passed as arguments.

    The first map passed is considered the reference point for the list of keys,
    :param root_map: The first map to associate
    :param args: Additional maps to associate to the first one
    :return:
    """
    if len(args) <= 0:
        # Nothing to associate, flatten the root map and return
        return sorted([k, v] for k, v in root_map.items())
    root_keys = set(root_map.keys())
    all_same = True
    # First, check if all the maps have the same list of keys
    for other_map in args:
        if set(other_map.keys()) != root_keys:
            all_same = False
            break
    if all_same:
        # All the maps have the same set of keys, just flatten them
        return sorted([key, root_map[key]] +
                      [other_map[key] for other_map in args]
                      for key in root_keys)
    else:
        # We need to associate the maps, the timestamps are a little out
        rekeyed_maps = []
        for other_map in args:
            matches = ass.associate(root_map,
                                    other_map,
                                    offset=0,
                                    max_difference=3)
            rekeyed_map = {
                root_key: other_map[other_key]
                for root_key, other_key in matches
            }
            root_keys &= set(rekeyed_map.keys())
            rekeyed_maps.append(rekeyed_map)
        return sorted([key, root_map[key]] +
                      [rekeyed_map[key] for rekeyed_map in rekeyed_maps]
                      for key in root_keys)
Example #29
0
    def update(self, other: _Mapping, **kwargs):
        """It is important to pass all values through formatters
        """
        for k, v in other.items():
            self[k] = v

        for k, v in kwargs.items():
            self[k] = v
Example #30
0
def _get_url_open_arg(name: str, args: typing.List, kwargs: typing.Mapping):
    arg_idx = _URL_OPEN_ARG_TO_INDEX_MAPPING.get(name)
    if arg_idx is not None:
        try:
            return args[arg_idx]
        except IndexError:
            pass
    return kwargs.get(name)
Example #31
0
def override(target_dict: typing.MutableMapping,
             override_dict: typing.Mapping):
    """Apply the updates in override_dict to the dict target_dict. This is like
  dict.update, but recursive. i.e. if the existing element is a dict, then
  override elements of the sub-dict rather than wholesale replacing.

  One special case is added. If a key within override dict starts with '!' then
  it is interpretted as follows:
     - if the associated value is "REMOVE", the key is removed from the parent
       dict
     - use !! for keys that actually start with ! and shouldn't be removed.

  e.g.
  override(
    {
      'outer': { 'inner': { 'key': 'oldValue', 'existingKey': True } }
    },
    {
      'outer': { 'inner': { 'key': 'newValue' } },
      'newKey': { 'newDict': True },
    }
  )
  yields:
    {
      'outer': {
        'inner': {
           'key': 'newValue',
           'existingKey': True
        }
      },
      'newKey': { newDict: True }
    }
  """

    for key, value in override_dict.items():
        #
        # Handle special ! syntax:
        #   "!keyname" : "REMOVE",   --> remove the key 'keyname' from target_dict
        #
        if key[0:1] == "!" and key[1:2] != "!":
            key = key[1:]
            if value == "REMOVE":
                target_dict.pop(key, None)
                continue

        current_value = target_dict.get(key)
        if not isinstance(current_value, Mapping):
            # Thing or Mapping overrides Thing or None
            target_dict[key] = value
        elif isinstance(value, Mapping):
            # Mapping overrides mapping, recurse
            target_dict[key] = override(current_value, value)
        else:
            # Thing overrides Mapping
            target_dict[key] = value

    return target_dict
Example #32
0
def create_contexts(args: t.Optional[argparse.Namespace] = None,
                    cfg: t.Mapping = ImmutableDict(),
                    use_extra: bool = True) -> ContextReturn:
    """
    Create new contexts appropriate for setting the app and lib context.

    This function takes values from the application arguments and configuration and sets them on
    the context.  It validates, normalizes, and sets defaults for the contexts based on what is
    available in the arguments and configuration.

    :kwarg args: An :python:obj:`argparse.Namespace` holding the program's command line arguments.
        Note argparse's ability to add default values should not be used with fields which are fully
        expressed in the :obj:`AppContext` or :obj:`LibContext` models.  Instead, set a default in
        the context model.  You can use argpase defaults with fields that get set in
        :attr:`AppContext.extra`.
    :kwarg cfg: A dictionary holding the program's configuration.
    :kwarg use_extra: When True, the default, all extra arguments and config values will be set as
        fields in ``app_ctx.extra``.  When False, the extra arguments and config values will be
        returned as part of the ContextReturn.
    :returns: A ContextReturn NamedTuple.
    """
    lib_values = _extract_lib_context_values(args, cfg)
    app_values = _extract_app_context_values(args, cfg)

    #
    # Save the unused values
    #
    known_fields = _FIELDS_IN_APP_CTX.union(_FIELDS_IN_LIB_CTX)

    unused_cfg = {}
    if cfg:
        unused_cfg = {k: v for k, v in cfg.items() if k not in known_fields}

    unused_args = {}
    if args:
        unused_args = {
            k: v
            for k, v in vars(args).items() if k not in known_fields
        }

    # Unused values are saved in app_ctx.extra when use_extra is set
    if use_extra:
        unused_cfg.update(unused_args)
        app_values['extra'] = unused_cfg
        unused_cfg = {}
        unused_args = {}

    unused_args = argparse.Namespace(**unused_args)

    # create new app and lib ctxt from the application's arguments and config.
    app_ctx = AppContext(**app_values)
    lib_ctx = LibContext(**lib_values)

    return ContextReturn(app_ctx=app_ctx,
                         lib_ctx=lib_ctx,
                         args=unused_args,
                         cfg=unused_cfg)
Example #33
0
    def validate(self, data: typing.Mapping):
        # validate projects (access check)
        for project in data.get("projects", []):
            if self.context[
                    "request"].user.iaso_profile.account != project.account:
                raise serializers.ValidationError(
                    {"project_ids": "Invalid project ids"})

        return data
    def fetch_raw_metadata(cls, samples: typing.Mapping, libraries: typing.Mapping, studies: typing.Mapping) -> SeqscapeRawMetadata:
        """

        :param samples: a dict containing: key = name of the identifier type, value = set of identifier values
        :param libraries: same
        :param studies: same
        :return:
        """
        raw_meta = SeqscapeRawMetadata()
        ss_connection = cls._get_connection(config.SEQSC_HOST, config.SEQSC_PORT, config.SEQSC_DB_NAME,
                                            config.SEQSC_USER)
        if samples:
            samples_fetched_by_names, samples_fetched_by_ids, samples_fetched_by_accession_nrs = \
                cls._fetch_samples(ss_connection, samples.get('name'), samples.get('internal_id'), samples.get('accession_number'))
            raw_meta.add_fetched_entities(samples_fetched_by_names)
            raw_meta.add_fetched_entities(samples_fetched_by_accession_nrs)
            raw_meta.add_fetched_entities(samples_fetched_by_ids)

            samples_set = raw_meta.get_entities_without_duplicates_by_entity_type('sample')
            studies_for_samples = cls._fetch_studies_for_samples(ss_connection, samples_set)
            raw_meta.add_fetched_entities_by_association(studies_for_samples)

        if studies:
            studies_fetched_by_names, studies_fetched_by_ids, studies_fetched_by_accession_nrs = \
                cls._fetch_studies(ss_connection, studies.get('name'), studies.get('internal_id'), studies.get('accession_number'))
            raw_meta.add_fetched_entities(studies_fetched_by_accession_nrs)
            raw_meta.add_fetched_entities(studies_fetched_by_ids)
            raw_meta.add_fetched_entities(studies_fetched_by_names)

            # Getting the sample-study associations:
            studies_set = raw_meta.get_entities_without_duplicates_by_entity_type('study')
            samples_for_study = cls._fetch_samples_for_studies(ss_connection, studies_set)
            raw_meta.add_fetched_entities_by_association(samples_for_study)

        if libraries:
            libraries_fetched_by_names, libraries_fetched_by_ids = \
                cls._fetch_libraries(ss_connection, libraries.get('name'), libraries.get('internal_id'))
            raw_meta.add_fetched_entities(libraries_fetched_by_names)
            raw_meta.add_fetched_entities(libraries_fetched_by_ids)
        return raw_meta
Example #35
0
def attributes_with_annotations(namespace: typing.Mapping):
    for name, value in namespace.items():
        yield from _if_annotation(name, value)
Example #36
0
 def fromdict(cls, mapping: t.Mapping, ncols=1):
     iterable = ["{}: {}".format(*i) for i in mapping.items()]
     return cls.new(iterable, ncols)