Example #1
0
def get_len_safely(value: Any, default: int = 1, return_copy: bool = False):
    # noinspection PyUnresolvedReferences
    """ Количество элементов в последовательности
        (Возможно передать единственное значение, тогда очевидно длина = 1)

        :param value: Единичное значение или последовательность
        :param default: Значение, возвращаемое если последовательность пустая, либо передано <None>
        :param return_copy: Вернуть копию, в случае если был передан генератор

        >>> gen_len, gen_copy = get_len_safely((x for x in range(3)), return_copy=True)
        >>> print("Generator len: {}".format(gen_len), '|', "Generator copy: {}".format(list(gen_copy)))
        Generator len: 3 | Generator copy: [0, 1, 2]

        >>> lst_len = get_len_safely(list(range(3)))
        >>> print("Iterable len: {}".format(lst_len))
        Iterable len: 3

        >>> one_value_len = get_len_safely(3)
        >>> print("One value len: {}".format(one_value_len))
        One value len: 1
    """

    iterable, iter_copy = tee(always_iterable(value)) if (
        is_lazy_sequence(value)
        and return_copy) else [always_iterable(value), None]
    iter_len = ilen(iterable) or default

    result = (iter_len, iter_copy) if iter_copy else iter_len

    return result
 def __init__(self,
              base_region,
              data_files,
              overlap_files=None,
              domain_id=-1):
     if overlap_files is None:
         overlap_files = []
     self.field_data = YTFieldData()
     self.field_parameters = {}
     self.data_files = list(always_iterable(data_files))
     self.overlap_files = list(always_iterable(overlap_files))
     self.ds = self.data_files[0].ds
     self._last_mask = None
     self._last_selector_id = None
     self._current_particle_type = "all"
     # self._current_fluid_type = self.ds.default_fluid_type
     if hasattr(base_region, "base_selector"):
         self.base_selector = base_region.base_selector
         self.base_region = base_region.base_region
     else:
         self.base_region = base_region
         self.base_selector = base_region.selector
     self._octree = None
     self._temp_spatial = False
     if isinstance(base_region, ParticleContainer):
         self._temp_spatial = base_region._temp_spatial
         self._octree = base_region._octree
     # To ensure there are not domains if global octree not used
     self.domain_id = -1
Example #3
0
    def _find_all_paths(
            self, starts: Union[Hashable, Sequence[Hashable]],
            ends: Union[Hashable, Sequence[Hashable]]) -> List[List[Hashable]]:
        """[summary]

        Args:
            start (Union[Hashable, Sequence[Hashable]]): starting points for 
                paths through the Graph.
            ends (Union[Hashable, Sequence[Hashable]]): endpoints for paths 
                through the Graph.

        Returns:
            List[List[Hashable]]: list of all paths through the Graph from all
                'starts' to all 'ends'.
            
        """
        all_paths = []
        for start in more_itertools.always_iterable(starts):
            for end in more_itertools.always_iterable(ends):
                paths = self.find_paths(start=start, end=end)
                if paths:
                    if all(isinstance(path, Hashable) for path in paths):
                        all_paths.append(paths)
                    else:
                        all_paths.extend(paths)
        return all_paths
Example #4
0
    def visitBoot_stanza(
            self, ctx: RefindConfigParser.Boot_stanzaContext) -> BootStanza:
        menu_entry_context = ctx.menu_entry()
        menu_entry = menu_entry_context.accept(MenuEntryVisitor())
        main_options = OptionVisitor.map_to_options_dict(
            checked_cast(list[ParserRuleContext], ctx.main_option()))
        volume = only(always_iterable(main_options.get(RefindOption.VOLUME)))
        loader = only(always_iterable(main_options.get(RefindOption.LOADER)))
        initrd = only(always_iterable(main_options.get(RefindOption.INITRD)))
        icon = only(always_iterable(main_options.get(RefindOption.ICON)))
        os_type = only(always_iterable(main_options.get(RefindOption.OS_TYPE)))
        graphics = only(
            always_iterable(main_options.get(RefindOption.GRAPHICS)))
        boot_options = only(
            always_iterable(main_options.get(RefindOption.BOOT_OPTIONS)))
        firmware_bootnum = only(
            always_iterable(main_options.get(RefindOption.FIRMWARE_BOOTNUM)))
        disabled = only(always_iterable(main_options.get(
            RefindOption.DISABLED)),
                        default=False)
        sub_menus = always_iterable(
            main_options.get(RefindOption.SUB_MENU_ENTRY))

        return BootStanza(
            menu_entry,
            volume,
            loader,
            initrd,
            icon,
            os_type,
            graphics,
            BootOptions(boot_options),
            firmware_bootnum,
            disabled,
        ).with_sub_menus(sub_menus)
Example #5
0
 def __init__(cls, name, b, d):
     type.__init__(cls, name, b, d)
     if cls.name is None:
         return
     if cls.subparser not in _subparsers:
         try:
             description = _subparsers_description[cls.subparser]
         except KeyError:
             description = cls.subparser
         parent_parser = argparse.ArgumentParser(add_help=False)
         p = subparsers.add_parser(
             cls.subparser,
             help=description,
             description=description,
             parents=[parent_parser],
         )
         _subparsers[cls.subparser] = p.add_subparsers(title=cls.subparser,
                                                       dest=cls.subparser)
     sp = _subparsers[cls.subparser]
     for name in always_iterable(cls.name):
         sc = sp.add_parser(name,
                            description=cls.description,
                            help=cls.description)
         sc.set_defaults(func=cls.run)
         for arg in cls.args:
             _add_arg(sc, arg)
Example #6
0
    def needify(cls, instance: object) -> Mapping[str, Any]:
        """[summary]

        Args:
            instance (object): [description]

        Raises:
            KeyError: [description]

        Returns:
            Mapping[str, Any]: [description]
            
        """
        kwargs = {}
        for need in more_itertools.always_iterable(cls.needs):
            try:
                kwargs[need] = getattr(instance, need)
            except AttributeError:
                try:
                    kwargs[need] = instance.contents[need]
                except (AttributeError, KeyError):
                    raise KeyError(
                        f'{need} could not be found in order to call a '
                        f'method of {cls.__name__}')
        return kwargs
Example #7
0
    def needify(cls, instance: object) -> Mapping[str, Any]:
        """Populates keywords from 'instance' based on 'needs'.

        Args:
            instance (object): instance with attributes or items in its 
                'contents' attribute with data to compose arguments to be
                passed to the 'create' classmethod.

        Raises:
            KeyError: if data could not be found for an argument.

        Returns:
            Mapping[str, Any]: keyword parameters and arguments to pass to the
                'create' classmethod.
            
        """
        kwargs = {}
        for need in more_itertools.always_iterable(cls.needs):
            if need in ['self']:
                key = amicus.tools.snakify(instance.__class__.__name__)
                kwargs[key] = instance
            else:
                try:
                    kwargs[need] = getattr(instance, need)
                except AttributeError:
                    try:
                        kwargs[need] = instance.contents[need]
                    except (AttributeError, KeyError):
                        raise KeyError(
                            f'{need} could not be found in order to call a '
                            f'method of {cls.__name__}')
        return kwargs
Example #8
0
    def inject(self,
               instance: object,
               additional: Union[Sequence[str], str] = None,
               overwrite: bool = False) -> object:
        """Injects appropriate items into 'instance' from 'contents'.

        Args:
            instance (object): sourdough class instance to be modified.
            additional (Union[Sequence[str], str]]): other section(s) in 
                'contents' to inject into 'instance'. Defaults to None.
            overwrite (bool]): whether to overwrite a local attribute in 
                'instance' if there are values stored in that attribute. 
                Defaults to False.

        Returns:
            instance (object): sourdough class instance with modifications made.

        """
        sections = ['general']
        try:
            sections.append(instance.name)
        except AttributeError:
            pass
        if additional:
            sections.extend(more_itertools.always_iterable(additional))
        for section in sections:
            try:
                for key, value in self.contents[section].items():
                    instance = self._inject(instance=instance,
                                            attribute=key,
                                            value=value,
                                            overwrite=overwrite)
            except KeyError:
                pass
        return instance
Example #9
0
def get_consecutive_segments(
        data: TABLE_DATA_TYPE,
        columns: Union[str, List[str], None] = None) -> pd.Series:
    """ Получить индексы последовательностей с одинаковыми идущими подряд элементами

        >>> df = pd.DataFrame({'A': [1,1,1, 4,4,4,4, 3,3], 'B': [1,1,4, 4,4,4,3, 3,3]})
        >>> df_res = df.reset_index().groupby(['A', 'B'])['index'].apply(np.array)
        >>> print(df_res)
                A  B
        1  1       [0, 1]
           4          [2]
        3  3       [7, 8]
        4  3          [6]
           4    [3, 4, 5]
    """
    if not isinstance(data, pd.DataFrame):
        data = pd.DataFrame(data)

    ### Если переданы конкретные столбцы для группировки - используем их, иначе - используем все столбцы
    if columns is not None:
        columns = data.columns.intersection(always_iterable(columns))
    else:
        columns = data.columns.tolist()

    results = data.reset_index().groupby(columns)['index'].apply(np.array)

    return results
Example #10
0
    def __init__(self,
                 neural_module,
                 action_space,
                 observation_space,
                 policy_ctor=librl.nn.policy.RepeatedNormal):
        super(IndependentNormalActor, self).__init__()
        self.policy_ctor = policy_ctor

        self.input_dimension = list(
            more_itertools.always_iterable(neural_module.output_dimension))
        self._input_size = functools.reduce(lambda x, y: x * y,
                                            self.input_dimension, 1)
        self.neural_module = neural_module
        self.output_dimension = action_space.shape
        self._output_size = functools.reduce(lambda x, y: x * y,
                                             self.output_dimension, 1)

        # Our output layers are used as the seed for some set of random number generators.
        # These random number generators are used to generate edge pairs.
        self.mu_layer = nn.Linear(self._input_size, self._output_size)
        self.cov_diag = nn.Linear(self._input_size, self._output_size)
        self.make_sane = torch.nn.Softsign()  # type: ignore

        # Initialize NN
        for x in self.parameters():
            if x.dim() > 1:
                nn.init.kaiming_normal_(x)
Example #11
0
    def __init__(self, neural_module, action_space, observation_space):
        super(BiCategoricalActor, self).__init__()

        self.input_dimension = list(
            more_itertools.always_iterable(neural_module.output_dimension))
        self._input_size = functools.reduce(lambda x, y: x * y,
                                            self.input_dimension, 1)
        self.neural_module = neural_module
        self.output_dimension = 2
        assert len(action_space.shape) == 2 and action_space.shape[-1] == 2
        assert len(
            observation_space.shape
        ) == 2 and observation_space.shape[0] == observation_space.shape[1]

        # Our output layers are used as the seed for some set of random number generators.
        # These random number generators are used to generate edge pairs.
        self.output_layers = {}
        self.output_layers["first"] = nn.Linear(self._input_size,
                                                observation_space.shape[0])
        self.output_layers["second"] = nn.Linear(self._input_size,
                                                 observation_space.shape[0])
        self.output_layers = nn.ModuleDict(self.output_layers)

        # Must pass output layers through softmax in order for them to be a proper PDF.
        self.softmax = nn.Softmax(dim=0)

        # Initialize NN
        for x in self.parameters():
            if x.dim() > 1:
                nn.init.kaiming_normal_(x)
Example #12
0
def test_detection_func(lines, expected):
    lines = more_itertools.peekable(
        enumerate(more_itertools.always_iterable(lines), start=1)
    )

    actual = doctest.detection_func(lines)
    assert actual == expected
Example #13
0
def _chain_from_iterable_beside(
        iterable: Iterable,
        ignored_type: type = str,
        is_cached: bool = True,
        is_error_suppress: bool = True) -> CachedIterWrapper:
    """ Раскрывает последовательности вложенные в <iterable> в единую последательность,
            если только они не являются экземплярами <ignored_type> (по-умолчанию строки)
        Важно: Внутренние последовательности должны быть однородными (вывод о содержании <iterable> делается по первому элементу)

        >>> print('Sequence of strings: ', list(_chain_from_iterable_beside(['first', 'second'])))
        Sequence of strings:  ['first', 'second']

        >>> print('Sequence of generators: ', list(_chain_from_iterable_beside([range(3), range(5)])))
        Sequence of generators:  [0, 1, 2, 0, 1, 2, 3, 4]

        >>> print('Sequence of numbers: ', list(_chain_from_iterable_beside([1, 2, 3])))
        Sequence of numbers:  [1, 2, 3]
    """

    head, results = spy(
        always_iterable(iterable)
    )  # можно передавать не только последовательность, но и одно значение

    if head and isinstance(
            head[0], Iterable) and (not ignored_type
                                    or not isinstance(head[0], ignored_type)):
        results = chain.from_iterable(iterable)

    return CachedIterWrapper(results, is_cached, is_error_suppress)
Example #14
0
def validate_elementary_item(key: Any, value: Any) -> None:
    if not isinstance(key, str):
        raise ValueError(
            f"Invalid schema: received key '{key}' with type '{type(key)}', "
            "expected a str"
        )
    if len(key) == 0:
        raise ValueError("Invalid schema: received an empty str as key")
    if _uses_invalid_chars(key):
        raise ValueError(
            f"Invalid schema: received key {key!r}, "
            "expected only alphanumeric characters or '-'"
        )
    if not key[0].isalpha():
        raise ValueError(
            f"Invalid schema: received key {key!r}, "
            "keys are expected to start with a letter"
        )
    if not isinstance(value, (*SCALAR_TYPES, list)):
        raise ValueError(
            f"Invalid schema: received value with type '{type(value)}', "
            "exepected an int, float, bool, str, or list"
        )
    for ev in always_iterable(value):
        if not isinstance(ev, SCALAR_TYPES):
            raise ValueError(
                f"Invalid schema: reveived value '{value}' with type '{type(value)}', "
                "exepected a int, float, bool or str"
            )
Example #15
0
    def predicate(self, name, attr, meta):
        """
        Return true if the given attribute should be included in this section.

        Arguments:
            name (str): The name of the attribute.  In most cases, this is 
                identical to :attr:`attr.__name__`.
            attr (object): The attribute object itself.
            meta (dict): Any `:meta:`__ fields present in the attribute's 
                docstring, as parsed by 
                :func:`sphinx.util.docstrings.separate_metadata()`. 

        __ https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#info-field-lists

        See Also:
            `is_method`
            `is_data_attr`
            `is_public`
            `is_private`
            `is_special`
        """
        if does_match(name, self.exclude_pattern):
            return False

        for section_cls in always_iterable(self.exclude_section):
            section = section_cls(self.state, self.cls)
            if section.predicate(name, attr, meta):
                return False

        return True
Example #16
0
    def drop_infrequently_true(self,
                               columns: Optional[Union[List[str], str]] = None,
                               threshold: Optional[float] = 0) -> None:
        """Drops boolean columns that rarely are True.

        This differs from the sklearn VarianceThreshold class because it is only
        concerned with rare instances of True and not False. This enables
        users to set a different variance threshold for rarely appearing
        information. 'threshold' is defined as the percentage of total rows (and
        not the typical variance formulas used in sklearn).

        Args:
            data (amicus.base.Dataset): instance storing a pandas DataFrame.
            columns (list or str): columns to check.
            threshold (float): the percentage of True values in a boolean column
                that must exist for the column to be kept.
        """
        if columns is None:
            columns = self.booleans
        infrequents = []
        for column in more_itertools.always_iterable(columns):
            try:
                if self.data[column].mean() < threshold:
                    infrequents.append(column)
            except KeyError:
                raise KeyError(' '.join([column, 'is not in data']))
        self.drop_columns(columns=infrequents)
        return self
Example #17
0
 def _timeout_embed(self):
     answer = next(iter(always_iterable(self._current_question.answer)))
     return (discord.Embed(description=f'The answer was **{answer}**',
                           colour=0xFF0000).set_author(
                               name='Times up!',
                               icon_url=TIMEOUT_ICON).set_footer(
                                   text='No one got any points :('))
Example #18
0
    def wait(self, state, interval=0.1, channel=None):
        """Poll for the given state(s) at intervals; publish to channel."""
        states = set(always_iterable(state))

        while self.state not in states:
            time.sleep(interval)
        self.publish(channel)
Example #19
0
File: io.py Project: clairekope/yt
def read_amrvac_namelist(parfiles):
    """Read one or more parfiles, and return a unified f90nml.Namelist object.

    This function replicates the patching logic of MPI-AMRVAC where redundant parameters
    only retain last-in-line values, with the exception of `&filelist:base_filename`,
    which is accumulated. When passed a single file, this function acts as a mere
    wrapper of f90nml.read().

    Parameters
    ----------
    parfiles : str, os.Pathlike, byte, or an iterable returning those types
        A file path, or a list of file paths to MPI-AMRVAC configuration parfiles.

    Returns
    -------
    unified_namelist : f90nml.Namelist
        A single namelist object. The class inherits from ordereddict.

    """
    parfiles = (os.path.expanduser(pf) for pf in always_iterable(parfiles))

    # first merge the namelists
    namelists = [f90nml.read(parfile) for parfile in parfiles]
    unified_namelist = f90nml.Namelist()
    for nml in namelists:
        unified_namelist.patch(nml)

    # accumulate `&filelist:base_filename`
    base_filename = "".join(
        nml.get("filelist", {}).get("base_filename", "") for nml in namelists)
    unified_namelist["filelist"]["base_filename"] = base_filename

    return unified_namelist
Example #20
0
    def link_channels(self, table_id, channels=0):
        r"""Link an image channel to a field table.

        Once a field table has been added, it can be linked against a channel (any
        one of the six -- red, green, blue, red absorption, green absorption, blue
        absorption) and then the value calculated for that field table will be
        added to the integration for that channel.  Not all tables must be linked
        against channels.

        Parameters
        ----------
        table_id : int
            The 0-indexed table to link.
        channels : int or list of ints
            The channel or channels to link with this table's calculated value.


        Examples
        --------
        This example shows how to link a new transfer function against field 0, and
        then link that table against all three RGB channels.  Typically an
        absorption (or 'alpha') channel is also linked.

        >>> mv = MultiVariateTransferFunction()
        >>> tf = TransferFunction((-10.0, -5.0))
        >>> tf.add_gaussian(-7.0, 0.01, 1.0)
        >>> mv.add_field_table(tf, 0)
        >>> mv.link_channels(0, [0, 1, 2])
        """
        for c in always_iterable(channels):
            self.field_table_ids[c] = table_id
Example #21
0
    def extend(self,
               nodes: Sequence[Hashable],
               start: Union[Hashable, Sequence[Hashable]] = None) -> None:
        """Adds 'nodes' to the stored data structure.

        Args:
            nodes (Sequence[Hashable]): names of items to add.
            start (Union[Hashable, Sequence[Hashable]]): where to add new node 
                to. If there are multiple nodes in 'start', 'node' will be added 
                to each of the starting points. If 'start' is None, 'endpoints'
                will be used. Defaults to None.
                
        """
        if any(isinstance(n, (list, tuple)) for n in nodes):
            nodes = tuple(more_itertools.collapse(nodes))
        if start is None:
            start = self.endpoints
        if start:
            for starting in more_itertools.always_iterable(start):
                self.connect(start=starting, stop=nodes[0])
        else:
            self.add(nodes[0])
        edges = more_itertools.windowed(nodes, 2)
        for edge_pair in edges:
            self.connect(start=edge_pair[0], stop=edge_pair[1])
        return self
Example #22
0
def always_iterable(item):
    """
    Given an object, always return an iterable. If the item is not
    already iterable, return a tuple containing only the item. If item is
    None, an empty iterable is returned.

    >>> always_iterable([1,2,3])
    <list_iterator...>
    >>> always_iterable('foo')
    <tuple_iterator...>
    >>> always_iterable(None)
    <tuple_iterator...>
    >>> always_iterable(range(10))
    <range_iterator...>
    >>> def _test_func(): yield "I'm iterable"
    >>> print(next(always_iterable(_test_func())))
    I'm iterable

    Although mappings are iterable, treat each like a singleton, as
    it's more like an object than a sequence.

    >>> next(always_iterable(dict(a=1)))
    {'a': 1}
    """
    base_types = str, bytes, collections.abc.Mapping
    return more_itertools.always_iterable(item, base_type=base_types)
Example #23
0
    def wait(self, state, interval=0.1, channel=None):
        """Poll for the given state(s) at intervals; publish to channel."""
        states = set(always_iterable(state))

        while self.state not in states:
            time.sleep(interval)
            self.publish(channel)
Example #24
0
    def with_boot_files_check_result(self, subvolume: Subvolume,
                                     include_sub_menus: bool) -> BootStanza:
        normalized_name = self.normalized_name
        all_boot_file_paths = self.all_boot_file_paths
        logical_path = subvolume.logical_path
        matched_boot_files: list[str] = []
        unmatched_boot_files: list[str] = []
        sources = [BootFilePathSource.BOOT_STANZA]

        if include_sub_menus:
            sources.append(BootFilePathSource.SUB_MENU)

        for source in sources:
            boot_file_paths = always_iterable(all_boot_file_paths.get(source))

            for boot_file_path in boot_file_paths:
                append_func = (matched_boot_files.append
                               if logical_path in boot_file_path else
                               unmatched_boot_files.append)

                append_func(boot_file_path)

        self._boot_files_check_result = BootFilesCheckResult(
            normalized_name, logical_path, matched_boot_files,
            unmatched_boot_files)

        return self
Example #25
0
def iter_fields(field_or_fields):
    """
    Create an iterator for field names, specified as single strings or tuples(fname,
    ftype) alike.
    This can safely be used in places where we accept a single field or a list as input.

    Parameters
    ----------
    obj: str, tuple(str, str), or any iterable of the previous types.

    Examples
    --------

    >>> fields = "density"
    >>> for field in iter_fields(fields):
    ...     print(field)
    density

    >>> fields = ("gas", "density")
    >>> for field in iter_fields(fields):
    ...     print(field)
    ('gas', 'density')

    >>> fields = ["density", "temperature", ("index", "dx")]
    >>> for field in iter_fields(fields):
    ...     print(field)
    density
    temperature
    ('index', 'dx')
    """
    return always_iterable(field_or_fields, base_type=(tuple, str, bytes))
Example #26
0
    def branchify(self,
                  nodes: Sequence[Sequence[Hashable]],
                  start: Union[Hashable, Sequence[Hashable]] = None) -> None:
        """Adds parallel paths to the stored data structure.

        Subclasses should ordinarily provide their own methods.

        Args:
            nodes (Sequence[Sequence[Hashable]]): a list of list of nodes which
                should have a Cartesian product determined and extended to
                the stored data structure.
            start (Union[Hashable, Sequence[Hashable]]): where to add new node 
                to. If there are multiple nodes in 'start', 'node' will be added 
                to each of the starting points. If 'start' is None, 'endpoints'
                will be used. Defaults to None.
                
        """
        if start is None:
            start = copy.deepcopy(self.endpoints)
        paths = list(map(list, itertools.product(*nodes)))
        for path in paths:
            if start:
                for starting in more_itertools.always_iterable(start):
                    self.add_edge(start=starting, stop=path[0])
            elif path[0] not in self.contents:
                self.add_node(path[0])
            edges = more_itertools.windowed(path, 2)
            for edge_pair in edges:
                self.add_edge(start=edge_pair[0], stop=edge_pair[1])
        return self
Example #27
0
    def __init__(
        self,
        data_source,
        conditionals,
        ds=None,
        field_parameters=None,
        base_object=None,
        locals=None,
    ):
        if locals is None:
            locals = {}
        validate_object(data_source, YTSelectionContainer)
        validate_sequence(conditionals)
        for condition in conditionals:
            validate_object(condition, str)
        validate_object(ds, Dataset)
        validate_object(field_parameters, dict)
        validate_object(base_object, YTSelectionContainer)

        self.conditionals = list(always_iterable(conditionals))
        if isinstance(data_source, YTCutRegion):
            # If the source is also a cut region, add its conditionals
            # and set the source to be its source.
            # Preserve order of conditionals.
            self.conditionals = data_source.conditionals + self.conditionals
            data_source = data_source.base_object

        super().__init__(
            data_source.center, ds, field_parameters, data_source=data_source
        )
        self.filter_fields = self._check_filter_fields()
        self.base_object = data_source
        self.locals = locals
        self._selector = None
Example #28
0
 def select(cls, name: Union[str, Sequence[str]]) -> Type[Keystone]:
     """Returns matching subclass from 'subclasses.
     
     Args:
         name (Union[str, Sequence[str]]): name of item in 'subclasses' to
             return
         
     Raises:
         KeyError: if no match is found for 'name' in 'subclasses'.
         
     Returns:
         Type[Keystone]: stored Keystone subclass.
         
     """
     item = None
     for key in more_itertools.always_iterable(name):
         try:
             item = cls.subclasses[key]
             break
         except KeyError:
             pass
     if item is None:
         raise KeyError(f'No matching item for {str(name)} was found')
     else:
         return item
def main() -> None:
    args = parse_arguments()
    dfs = prepare_eval(args)
    sorted_metrics = sorted(dfs[0].columns)

    num_metric = len(sorted_metrics)
    args.height *= num_metric
    fig, axes = plt.subplots(num_metric, 1, figsize=(args.width, args.height))

    base = dfs[0]
    dfs = dfs[1:]
    for x in dfs:
        for metric in sorted_metrics:
            x[metric] = x[metric] - base[metric]

    palette = sns.color_palette(args.palette)
    for metric, ax in zip(sorted_metrics, always_iterable(axes)):
        df = pd.concat(dfs,
                       names=["Sys"],
                       keys=[f"{x}-{args.names[0]}" for x in args.names[1:]])
        df.index = df.index.set_names(["Sys", "Qid"])
        df = df.reset_index()
        df = df[df[metric] != 0]
        df.loc[:, "Change"] = "Good"
        df.loc[df[metric] < 0, "Change"] = "Bad"
        if args.sort:
            df = df.sort_values(metric, ascending=args.sort == "ascending")
        df.loc[df[metric] < 0, metric] = -df[metric]

        dfgood = df.loc[df["Change"] == "Good"]
        dfgood = (dfgood.sort_values(
            metric, ascending=args.sort == "ascending").reset_index(
                drop=True).reset_index())
        dfbad = df.loc[df["Change"] == "Bad"]
        dfbad = (dfbad.sort_values(
            metric, ascending=args.sort == "ascending").reset_index(
                drop=True).reset_index())
        df = pd.concat([dfgood, dfbad], names=["Change"], keys=["Good", "Bad"])

        sns.lineplot(
            x="index",
            y=metric,
            hue="Change",
            data=df,
            ax=ax,
            palette=args.palette,
            alpha=0.7,
            sort=False,
        )
        ax.fill_between(dfgood["index"], dfgood[metric], alpha=0.5)
        ax.fill_between(dfbad["index"], dfbad[metric], alpha=0.5)
        ax.set_xticks([dfbad["index"].max(), dfgood["index"].max()])
        ax.axhline(0, ls="--", color=palette[-1])
        label_metric = metric.replace("_cut_", "@").upper()
        ax.set_ylabel(fr"$\Delta${label_metric}")

    if isinstance(args.save, str):
        fig.tight_layout()
        fig.savefig(args.save)
Example #30
0
    def _block_device_partition_table(
        self, block_device: BlockDevice
    ) -> PartitionTable:
        logger = self._logger
        findmnt_columns = [
            FindmntColumn.PART_UUID,
            FindmntColumn.PART_LABEL,
            FindmntColumn.FS_UUID,
            FindmntColumn.DEVICE_NAME,
            FindmntColumn.FS_TYPE,
            FindmntColumn.FS_LABEL,
            FindmntColumn.FS_MOUNT_POINT,
            FindmntColumn.FS_MOUNT_OPTIONS,
        ]
        device_name = block_device.name
        output = constants.COLUMN_SEPARATOR.join(
            [findmnt_column_key.value.upper() for findmnt_column_key in findmnt_columns]
        )
        findmnt_command = f"findmnt --json --mtab --real --nofsroot --output {output}"

        try:
            logger.info(
                f"Initializing the live partition table for device '{device_name}' using findmnt."
            )
            logger.debug(f"Running command '{findmnt_command}'.")

            findmnt_process = subprocess.run(
                findmnt_command.split(), capture_output=True, check=True, text=True
            )
        except CalledProcessError as e:
            stderr = checked_cast(str, e.stderr)

            if is_none_or_whitespace(stderr):
                message = "findmnt execution failed!"
            else:
                message = f"findmnt execution failed: '{stderr.rstrip()}'!"

            logger.exception(message)
            raise PartitionError(
                f"Could not initialize the live partition table for '{device_name}'!"
            ) from e

        findmnt_parsed_output = json.loads(findmnt_process.stdout)
        findmnt_partitions = (
            findmnt_partition
            for findmnt_partition in always_iterable(
                findmnt_parsed_output.get(FindmntJsonKey.FILESYSTEMS.value)
            )
            if block_device.is_matched_with(
                default_if_none(
                    findmnt_partition.get(FindmntColumn.DEVICE_NAME.value),
                    constants.EMPTY_STR,
                )
            )
        )

        return PartitionTable(
            constants.EMPTY_HEX_UUID, constants.MTAB_PT_TYPE
        ).with_partitions(FindmntCommand._map_to_partitions(findmnt_partitions))
Example #31
0
    def __init__(self, neural_modules):
        super(SequentialKernel, self).__init__()
        self._neural_modules_list = list(
            more_itertools.always_iterable(neural_modules))
        self.input_dimensions = self._neural_modules_list[0].input_dimensions
        self.output_dimension = self._neural_modules_list[-1].output_dimension

        self.neural_modules = nn.Sequential(*self._neural_modules_list)
Example #32
0
def send_response(req, status, headers, body, stream=False):
    # Set response status
    req.status = int(status[:3])

    # Set response headers
    req.content_type = 'text/plain'
    for header, value in headers:
        if header.lower() == 'content-type':
            req.content_type = value
            continue
        req.headers_out.add(header, value)

    if stream:
        # Flush now so the status and headers are sent immediately.
        req.flush()

    # Set response body
    for seg in always_iterable(body):
        req.write(seg)
Example #33
0
    def __init__(self, urls, status=None, encoding=None):
        self.urls = abs_urls = [
            # Note that urljoin will "do the right thing" whether url is:
            #  1. a complete URL with host (e.g. "http://www.example.com/test")
            #  2. a URL relative to root (e.g. "/dummy")
            #  3. a URL relative to the current path
            # Note that any query string in cherrypy.request is discarded.
            urllib.parse.urljoin(
                cherrypy.url(),
                tonative(url, encoding or self.encoding),
            )
            for url in always_iterable(urls)
        ]

        status = (
            int(status)
            if status is not None
            else self.default_status
        )
        if not 300 <= status <= 399:
            raise ValueError('status must be between 300 and 399.')

        CherryPyException.__init__(self, abs_urls, status)