Exemple #1
0
 def getFunctionDict(self) -> Mapping[str, Callable]:
     return MappingProxyType(self.__functions)
Exemple #2
0
 def __init__(self, domain, service, data=None, call_id=None):
     """Initialize a service call."""
     self.domain = domain.lower()
     self.service = service.lower()
     self.data = MappingProxyType(data or {})
     self.call_id = call_id
Exemple #3
0
import warnings
from collections.abc import Mapping
from email.headerregistry import Address
from functools import partial, reduce
from itertools import chain
from types import MappingProxyType
from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set,
                    Tuple, Type, Union)

from setuptools._deprecation_warning import SetuptoolsDeprecationWarning

if TYPE_CHECKING:
    from setuptools._importlib import metadata  # noqa
    from setuptools.dist import Distribution  # noqa

EMPTY: Mapping = MappingProxyType({})  # Immutable dict-like
_Path = Union[os.PathLike, str]
_DictOrStr = Union[dict, str]
_CorrespFn = Callable[["Distribution", Any, _Path], None]
_Correspondence = Union[str, _CorrespFn]

_logger = logging.getLogger(__name__)


def apply(dist: "Distribution", config: dict,
          filename: _Path) -> "Distribution":
    """Apply configuration dict read with :func:`read_configuration`"""

    if not config:
        return dist  # short-circuit unrelated pyproject.toml file
Exemple #4
0
    "URLConfig", "create_url", "KNOWN_SCHEME_PORTS", "DEFAULT_CONNECT_TIMEOUT"
]

if TYPE_CHECKING:
    # We refer to the Config class in a docstring and
    # without this import, Sphinx can't resolve the reference
    # noinspection PyUnresolvedReferences
    from . import Config

DEFAULT_CONNECT_TIMEOUT = timedelta(seconds=30)

# The set of schemes we understand, and the known ports that map to those schemes. The first port
# in the list is the default value for the scheme.
KNOWN_SCHEME_PORTS = MappingProxyType({
    "http": (80, 7575, 8080),
    "https": (443, 8443),
    "grpc": (6865, ),
    "grpcs": ()
})

# some environment variables that we frequently refer to
DAML_LEDGER_URL = "DAML_LEDGER_URL"
DAML_LEDGER_HOST = "DAML_LEDGER_HOST"
DAML_LEDGER_PORT = "DAML_LEDGER_PORT"
DAML_LEDGER_SCHEME = "DAML_LEDGER_SCHEME"


def create_url(
    *,
    url: Optional[str] = None,
    host: Optional[str] = None,
    port: Optional[int] = None,
Exemple #5
0
def singledispatch(func):
    """Single-dispatch generic function decorator.

    Transforms a function into a generic function, which can have different
    behaviours depending upon the type of its first argument. The decorated
    function acts as the default implementation, and additional
    implementations can be registered using the register() attribute of the
    generic function.

    """
    registry = {}
    dispatch_cache = WeakKeyDictionary()
    cache_token = None

    def dispatch(cls):
        """generic_func.dispatch(cls) -> <function implementation>

        Runs the dispatch algorithm to return the best available implementation
        for the given *cls* registered on *generic_func*.

        """
        nonlocal cache_token
        if cache_token is not None:
            current_token = get_cache_token()
            if cache_token != current_token:
                dispatch_cache.clear()
                cache_token = current_token
        try:
            impl = dispatch_cache[cls]
        except KeyError:
            try:
                impl = registry[cls]
            except KeyError:
                impl = _find_impl(cls, registry)
            dispatch_cache[cls] = impl
        return impl

    def register(cls, func=None):
        """generic_func.register(cls, func) -> func

        Registers a new implementation for the given *cls* on a *generic_func*.

        """
        nonlocal cache_token
        if func is None:
            return lambda f: register(cls, f)
        registry[cls] = func
        if cache_token is None and hasattr(cls, '__abstractmethods__'):
            cache_token = get_cache_token()
        dispatch_cache.clear()
        return func

    def wrapper(*args, **kw):
        if not args:
            raise TypeError(f'{funcname} requires at least '
                            '1 positional argument')

        return dispatch(args[0].__class__)(*args, **kw)

    funcname = getattr(func, '__name__', 'singledispatch function')
    registry[object] = func
    wrapper.register = register
    wrapper.dispatch = dispatch
    wrapper.registry = MappingProxyType(registry)
    wrapper._clear_cache = dispatch_cache.clear
    update_wrapper(wrapper, func)
    return wrapper
Exemple #6
0
from mirumon.api.dependencies.devices.devices_services import (
    get_devices_auth_service,
    get_devices_service,
)
from mirumon.api.dependencies.users.users_services import get_auth_users_service
from mirumon.application.devices.auth_service import DevicesAuthService
from mirumon.application.devices.device_service import DevicesService
from mirumon.application.users.auth_service import AuthUsersService

ServiceTypes = Type[Union[AuthUsersService, DevicesService,
                          DevicesAuthService]]


def get_service(  # type: ignore
        service_type: ServiceTypes, ) -> Callable[..., object]:
    for registered_service_type, factory in SERVICE_FACTORIES.items():
        if issubclass(service_type, registered_service_type):
            return factory  # type:ignore
    raise ValueError(f"{service_type} not found in registered repos")


SERVICE_FACTORIES = MappingProxyType({
    AuthUsersService:
    get_auth_users_service,
    DevicesService:
    get_devices_service,
    DevicesAuthService:
    get_devices_auth_service,
})
Exemple #7
0
def _gradient_descent(
    objective: Callable[[np.ndarray, ...], Tuple[int, np.ndarray]],
    p0: np.ndarray,
    it: int,
    n_iter: int,
    objective_error: Optional[Callable[[np.ndarray, ...], float]] = None,
    n_iter_check: int = 1,
    n_iter_without_progress: int = 50,
    momentum: float = 0.5,
    learning_rate: float = 1000.0,
    min_gain: float = 0.01,
    min_grad_norm: float = 1e-7,
    min_error_diff: float = 1e-7,
    verbose: int = 0,
    args: Iterable[Any] = (),
    kwargs: Mapping[str, Any] = MappingProxyType({}),
) -> Tuple[np.ndarray, float, int]:
    """\
    Batch gradient descent with momentum and individual gains.

    Parameters
    ----------
    objective
        Should return a tuple of cost and gradient for a given parameter
        vector. When expensive to compute, the cost can optionally
        be None and can be computed every n_iter_check steps using
        the objective_error function.
    p0
        Initial parameter vector. shape (n_params,)
    it
        Current number of iterations (this function will be called more than
        once during the optimization).
    n_iter
        Maximum number of gradient descent iterations.
    objective_error
        Should return error for a given parameter vector.
    n_iter_check
        Number of iterations before evaluating the global error. If the error
        is sufficiently low, we abort the optimization.
    n_iter_without_progress
        Maximum number of iterations without progress before we abort the
        optimization.
    momentum
        The momentum generates a weight for previous gradients that decays
        exponentially. within (0.0, 1.0)
    learning_rate
        The learning rate should be extremely high for t-SNE! Values in the
        range [100.0, 1000.0] are common.
    min_gain
        Minimum individual gain for each parameter.
    min_grad_norm
        If the gradient norm is below this threshold, the optimization will
        be aborted.
    min_error_diff
        If the absolute difference of two successive cost function values
        is below this threshold, the optimization will be aborted.
    verbose
        Verbosity level.
    args
        Arguments to pass to objective function.
    kwargs
        Keyword arguments to pass to objective function.
    Returns
    -------
    p
        Optimum parameters. shape (n_params,)
    error
        Optimum.
    i
        Last iteration.
    """

    p = p0.copy().ravel()
    update = np.zeros_like(p)
    gains = np.ones_like(p)
    error = np.finfo(np.float).max
    best_error = np.finfo(np.float).max
    best_iter = 0

    for i in range(it, n_iter):
        new_error, grad = objective(p, *args, **kwargs)
        grad_norm = linalg.norm(grad)

        inc = update * grad < 0.0
        dec = np.invert(inc)
        gains[inc] += 0.2
        gains[dec] *= 0.8
        np.clip(gains, min_gain, np.inf, out=gains)
        grad *= gains
        update = momentum * update - learning_rate * grad
        p += update

        if (i + 1) % n_iter_check == 0:
            if new_error is None:
                new_error = objective_error(p, *args)
            error_diff = np.abs(new_error - error)
            error = new_error

            if verbose >= 2:
                m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
                print(m % (i + 1, error, grad_norm))

            if error < best_error:
                best_error = error
                best_iter = i
            elif i - best_iter > n_iter_without_progress:
                if verbose >= 2:
                    print("[t-SNE] Iteration %d: did not make any progress "
                          "during the last %d episodes. Finished." %
                          (i + 1, n_iter_without_progress))
                break
            if grad_norm <= min_grad_norm:
                if verbose >= 2:
                    print("[t-SNE] Iteration %d: gradient norm %f. Finished." %
                          (i + 1, grad_norm))
                break
            if error_diff <= min_error_diff:
                if verbose >= 2:
                    m = "[t-SNE] Iteration %d: error difference %f. Finished."
                    print(m % (i + 1, error_diff))
                break

        if new_error is not None:
            error = new_error

    return p, error, i
Exemple #8
0
 def recipients(self) -> '_typing.Mapping[int, dt_user.User]':
     """
     :return: A mapping of int -> :class:`.User` for the recipients of this private chat.
     """
     return MappingProxyType(self._recipients)
Exemple #9
0
 def overwrites(self) -> '_typing.Mapping[int, dt_permissions.Overwrite]':
     """
     :return: A mapping of target_id -> :class:`.Overwrite` for this channel.
     """
     return MappingProxyType(self._overwrites)
Exemple #10
0
KNOWN_PLUGINS = MappingProxyType({
    'dlint':
    'https://github.com/dlint-py/dlint',
    'flake8-2020':
    'https://github.com/asottile/flake8-2020',
    'flake8-alfred':
    'https://github.com/datatheorem/flake8-alfred',
    'flake8-annotations-complexity':
    'https://github.com/best-doctor/flake8-annotations-complexity',
    'flake8-bandit':
    'https://github.com/tylerwince/flake8-bandit',
    'flake8-black':
    'https://github.com/peterjc/flake8-black',
    'flake8-broken-line':
    'https://github.com/sobolevn/flake8-broken-line',
    'flake8-builtins':
    'https://github.com/gforcada/flake8-builtins',
    'flake8-coding':
    'https://github.com/tk0miya/flake8-coding',
    'flake8-cognitive-complexity':
    'https://github.com/Melevir/flake8-cognitive-complexity',
    'flake8-comprehensions':
    'https://github.com/adamchainz/flake8-comprehensions',
    'flake8-debugger':
    'https://github.com/JBKahn/flake8-debugger',
    'flake8-docstrings':
    'https://gitlab.com/pycqa/flake8-docstrings',  # pydocstyle
    'flake8-eradicate':
    'https://github.com/sobolevn/flake8-eradicate',
    'flake8-executable':
    'https://github.com/xuhdev/flake8-executable',
    'flake8-expression-complexity':
    'https://github.com/best-doctor/flake8-expression-complexity',
    'flake8-fixme':
    'https://github.com/tommilligan/flake8-fixme',
    'flake8-functions':
    'https://github.com/best-doctor/flake8-functions',
    'flake8-logging-format':
    'https://github.com/globality-corp/flake8-logging-format',
    'flake8-mutable':
    'https://github.com/ebeweber/flake8-mutable',
    'flake8-mypy':
    'https://github.com/ambv/flake8-mypy',
    'flake8-pep3101':
    'https://github.com/gforcada/flake8-pep3101',
    'flake8-pie':
    'https://github.com/sbdchd/flake8-pie',
    'flake8-print':
    'https://github.com/JBKahn/flake8-print',
    'flake8-printf-formatting':
    'https://github.com/atugushev/flake8-printf-formatting',
    'flake8-pyi':
    'https://github.com/ambv/flake8-pyi',
    'flake8-quotes':
    'https://github.com/zheller/flake8-quotes',
    'flake8-requirements':
    'https://github.com/Arkq/flake8-requirements',
    'flake8-rst-docstrings':
    'https://github.com/peterjc/flake8-rst-docstrings',
    'flake8-spellcheck':
    'https://github.com/MichaelAquilina/flake8-spellcheck',
    'flake8-sql':
    'https://github.com/pgjones/flake8-sql',
    'flake8-strict':
    'https://github.com/smarkets/flake8-strict',
    'flake8-string-format':
    'https://github.com/xZise/flake8-string-format',
    'flake8-todo':
    'https://github.com/schlamar/flake8-todo',
    'flake8-use-fstring':
    'https://github.com/MichaelKim0407/flake8-use-fstring',
    'flake8-variables-names':
    'https://github.com/best-doctor/flake8-variables-names',

    # framework-specific
    'flake8-django':
    'https://github.com/rocioar/flake8-django',
    'flake8-scrapy':
    'https://github.com/stummjr/flake8-scrapy',
    'pandas-vet':
    'https://github.com/deppen8/pandas-vet',

    # tests
    'flake8-aaa':
    'https://github.com/jamescooke/flake8-aaa',
    'flake8-mock':
    'https://github.com/aleGpereira/flake8-mock',
    'flake8-pytest':
    'https://github.com/vikingco/flake8-pytest',
    'flake8-pytest-style':
    'https://github.com/m-burst/flake8-pytest-style',

    # PyCQA
    'flake8-bugbear':
    'https://github.com/PyCQA/flake8-bugbear',
    'flake8-commas':
    'https://github.com/PyCQA/flake8-commas',
    'mccabe':
    'https://github.com/PyCQA/mccabe',
    'pep8-naming':
    'https://github.com/PyCQA/pep8-naming',
    'pylint':
    'https://github.com/PyCQA/pylint',

    # imports
    'flake8-future-import':
    'https://github.com/xZise/flake8-future-import',
    'flake8-import-order':
    'https://github.com/PyCQA/flake8-import-order',
    'flake8-isort':
    'https://github.com/gforcada/flake8-isort',
    'flake8-absolute-import':
    'https://github.com/bskinn/flake8-absolute-import',
    'flake8-tidy-imports':
    'https://github.com/adamchainz/flake8-tidy-imports',

    # built-in in flake8
    'pycodestyle':
    'https://github.com/PyCQA/pycodestyle',
    'pyflakes':
    'https://github.com/PyCQA/pyflakes',
})
Exemple #11
0
CONTEXT = MappingProxyType({
    'ams': 'http://datacatalogus.amsterdam.nl/term/',
    'ckan': 'https://ckan.org/terms/',
    'class': 'ams:class#',
    'dc': 'http://purl.org/dc/elements/1.1/',
    'dcat': 'http://www.w3.org/ns/dcat#',
    'dct': 'http://purl.org/dc/terms/',
    'foaf': 'http://xmlns.com/foaf/0.1/',
    'lang1': 'http://id.loc.gov/vocabulary/iso639-1/',
    'lang2': 'http://id.loc.gov/vocabulary/iso639-2/',
    'org': 'ams:org#',
    # Volgens dcat-ap-nl '.../term', maar dat kan niet. Zucht...
    # Volgens allerlei andere overheidsdocumenten:
    'overheid': 'http://standaarden.overheid.nl/owms/terms/',
    # Zelf verzonnen door data.overheid.nl; juiste waarde nog opzoeken [--PvB]
    'overheidds': 'http://standaarden.overheid.nl/owms/terms/ds#',
    'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
    'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
    'void': 'http://rdfs.org/ns/void#',
    'skos': 'http://www.w3.org/2004/02/skos/core#',
    'theme': 'ams:theme#',
    'time': 'http://www.w3.org/2006/time#',
    'vcard': 'http://www.w3.org/2006/vcard/ns#',
    'ams:purl': {'@type': '@id'},
    'dcat:dataset': {'@container': '@list'},
    'dcat:distribution': {'@container': '@set'},
    'dcat:keyword': {'@container': '@set'},
    'dcat:landingpage': {'@type': '@id'},
    'dcat:theme': {'@container': '@set', '@type': '@id'},
    'dct:issued': {'@type': 'xsd:date'},
    'dct:language': {'@type': '@id'},
    'dct:modified': {'@type': 'xsd:date'},
    'foaf:homepage': {'@type': '@id'},
    'foaf:mbox': {'@type': '@id'},
    'vcard:hasEmail': {'@type': '@id'},
    'vcard:hasURL': {'@type': '@id'},
    'vcard:hasLogo': {'@type': '@id'}
})
 def params(self):
     return MappingProxyType(self._params)
Exemple #13
0
def build_data(
    graph_sets: Iterable[GraphSet],
    table_defns: Mapping[str, Tuple[Column, ...]],
) -> Mapping[str, List[Tuple[Optional[Any], ...]]]:
    pk_counters: DefaultDict[str, int] = defaultdict(int)
    arns_pks: Dict[str, int] = {}
    table_names_datas: DefaultDict[str, List[Tuple[Optional[Any],
                                                   ...]]] = defaultdict(list)
    for graph_set in graph_sets:
        for resource in graph_set.resources:
            resource_data: List[Optional[Any]] = []
            table_name = normalize_name(resource.type)
            arn = resource.resource_id
            # build a primary key for this resource
            pk = arns_pks.get(arn)
            if pk is None:
                pk_counters[table_name] += 1
                pk = pk_counters[table_name]
                arns_pks[arn] = pk
            # add tag data
            if resource.link_collection.tag_links:
                for link in resource.link_collection.tag_links:
                    tag_key, tag_value = link.pred, link.obj
                    table_names_datas[TAG_TABLE_NAME].append(
                        (pk, tag_key, tag_value))
            # iterate over the columns we expect the corresponding resource for this
            # table and fill them by looking for corresponding values in the resource
            for column in table_defns[table_name]:
                if isinstance(column, PKColumn):
                    if not pk:
                        raise Exception(
                            f"BUG: No pk found for {table_name} : {table_defns[table_name]}"
                        )
                    resource_data.append(pk)
                elif isinstance(column, FKColumn):
                    column_link_name = column.name.lstrip(
                        "_")[:-3]  # strip leading _, and _id
                    fk = None
                    for candidate_fk_link in (
                            resource.link_collection.resource_links
                            if resource.link_collection.resource_links else
                        () + resource.link_collection.transient_resource_links
                            if resource.link_collection.
                            transient_resource_links else ()):
                        if candidate_fk_link.pred == column_link_name:
                            f_table_name = normalize_name(
                                candidate_fk_link.pred)
                            f_arn = candidate_fk_link.obj
                            fk = arns_pks.get(f_arn)
                            if fk is None:
                                pk_counters[f_table_name] += 1
                                fk = pk_counters[f_table_name]
                                arns_pks[f_arn] = fk
                            break
                    resource_data.append(fk)
                elif isinstance(column, TextColumn):
                    text_data = None
                    if column.name == "arn":
                        text_data = resource.resource_id
                    else:
                        if resource.link_collection.simple_links:
                            for candidate_simple_link in resource.link_collection.simple_links:
                                if candidate_simple_link.pred == column.name:
                                    text_data = str(candidate_simple_link.obj)
                                    break
                    resource_data.append(text_data)
                elif type(column) in (IntColumn, BoolColumn, TimestampColumn):
                    bool_or_int_or_timestamp_data = None
                    if resource.link_collection.simple_links:
                        for candidate_simple_link in resource.link_collection.simple_links:
                            if candidate_simple_link.pred == column.name:
                                bool_or_int_or_timestamp_data = candidate_simple_link.obj
                                break
                    resource_data.append(bool_or_int_or_timestamp_data)
                else:
                    raise NotImplementedError(
                        f"Column type {type(column)} not implemented")
            table_names_datas[table_name].append(tuple(resource_data))
            # now look for MultiLinks in this resource.  Each of these represent a table
            if resource.link_collection.multi_links:
                for multi_link in resource.link_collection.multi_links:
                    build_multilink_data(
                        pk_counters=pk_counters,
                        arns_pks=arns_pks,
                        table_names_datas=table_names_datas,
                        table_defns=table_defns,
                        multi_link=multi_link,
                        parent_table_name=table_name,
                        parent_pk=pk,
                    )
    return MappingProxyType(table_names_datas)
Exemple #14
0
 def __init__(self, mapping):
     super().__init__(mapping)
     self._mappingproxy = MappingProxyType(self._mapping)  # read-only
Exemple #15
0
    def __init__(self, waveforms: Sequence[WaveformEntry] = ()):
        self._waveforms_by_name = {wf.name: wf for wf in waveforms}
        self._by_name = MappingProxyType(self._waveforms_by_name)

        self._waveforms_by_data = {wf.waveform: wf for wf in waveforms}
        self._by_data = MappingProxyType(self._waveforms_by_data)
Exemple #16
0
def gene_trends(
    adata: AnnData,
    model: _model_type,
    genes: Union[str, Sequence[str]],
    lineages: Optional[Union[str, Sequence[str]]] = None,
    data_key: str = "X",
    final: bool = True,
    start_lineage: Optional[Union[str, Sequence[str]]] = None,
    end_lineage: Optional[Union[str, Sequence[str]]] = None,
    conf_int: bool = True,
    same_plot: bool = False,
    hide_cells: bool = False,
    perc: Optional[Union[Tuple[float, float], Sequence[Tuple[float,
                                                             float]]]] = None,
    lineage_cmap: Optional[matplotlib.colors.ListedColormap] = None,
    abs_prob_cmap: matplotlib.colors.ListedColormap = cm.viridis,
    cell_color: str = "black",
    color: str = "black",
    cell_alpha: float = 0.6,
    lineage_alpha: float = 0.2,
    size: float = 15,
    lw: float = 2,
    show_cbar: bool = True,
    margins: float = 0.015,
    sharey: bool = False,
    figsize: Optional[Tuple[float, float]] = None,
    dpi: Optional[int] = None,
    ncols: int = 2,
    n_jobs: Optional[int] = 1,
    backend: str = "multiprocessing",
    ext: str = "png",
    suptitle: Optional[str] = None,
    save: Optional[Union[str, Path]] = None,
    dirname: Optional[str] = None,
    plot_kwargs: Mapping = MappingProxyType({}),
    show_progres_bar: bool = True,
    **kwargs,
) -> None:
    """
    Plot gene expression trends along lineages.

    Each lineage is defined via it's lineage weights which we compute using :func:`cellrank.tl.lineages`. This
    function accepts any `scikit-learn` model wrapped in :class:`cellrank.ul.models.SKLearnModel`
    to fit gene expression, where we take the lineage weights into account in the loss function.

    .. image:: https://raw.githubusercontent.com/theislab/cellrank/master/resources/images/gene_trends.png
       :width: 400px
       :align: center

    Params
    ------
    adata : :class:`anndata.AnnData`
        Annotated data object.
    genes
        Genes in :paramref:`adata` `.var_names` to plot.
    model
        Model to fit.

        - If a :class:`dict`, gene and lineage specific models can be specified. Use `'*'` to indicate
        all genes or lineages, for example `{'Map2': {'*': ...}, 'Dcx': {'Alpha': ..., '*': ...}}`.
    lineage_names
        Lineages names for which to show the gene expression.
    data_key
        Key in :paramref:`adata` `.layers` or `'X'` for :paramref:`adata` `.X` where the data is stored.
    final
        Whether to consider cells going to final states or vice versa.
    start_lineage
        Lineage from which to select cells with lowest pseudotime as starting points.
        If specified, the trends start at the earliest pseudotime within that lineage,
        otherwise they start from time `0`.
    end_lineage
        Lineage from which to select cells with highest pseudotime as endpoints.
        If specified, the trends end at the latest pseudotime within that lineage,
        otherwise, it is determined automatically.
    conf_int
        Whether to compute and show confidence intervals.
    same_plot
        Whether to plot all lineages for each gene in the same plot.
    hide_cells
        If `True`, hide all the cells.
    perc
        Percentile for colors. Valid values are in range `[0, 100]`.
        This can improve visualization. Can be specified separately for each lineage separately.
    lineage_cmap
        Colormap to use when coloring in the lineages.
        Only used when :paramref:`same_plot` `=True`.
    abs_prob_cmap
        Colormap to use when visualizing the absorption probabilities for each lineage.
        Only used when :paramref:`same_plot` `=False`.
    cell_color
        Color of the cells when not visualizing absorption probabilities.
        Only used when :paramref:`same_plot` `=True`.
    color
        Color for the lineages, when each lineage is on
        separate plot, otherwise according to :paramref:`lineage_cmap`.
    cell_alpha
        Alpha channel for cells.
    lineage_alpha
        Alpha channel for lineage confidence intervals.
    size
        Size of the points.
    lw
        Line width of the smoothed values.
    show_cbar
        Whether to show colorbar. Always shown when percentiles for lineages differ.
    margins
        Margins around the plot.
    sharey
        Whether to share y-axis.
        Only used when :paramref:`same_plot` `=False`.
    figsize
        Size of the figure.
    dpi
        Dots per inch.
    ncols
        Number of columns of the plot when plotting multiple genes.
        Only used when :paramref:`same_plot` `=True`.
    suptitle
        Suptitle of the figure.
        Only used when :paramref:`same_plot` `=True`.
    n_jobs
        Number of parallel jobs. If `-1`, use all available cores. If `None` or `1`, the execution is sequential.
    backend
        Which backend to use for multiprocessing.
        See :class:`joblib.Parallel` for valid options.
    ext
        Extension to use when saving files, such as `'pdf'`.
        Only used when :paramref:`same_plot` `=False`.
    save
        Filename where to save the plots.
        If `None`, just show the plots.
    dirname
        Directory where to save the plots, one per gene in :paramref:`genes`.
        If `None`, just show the plots.
        Only used when :paramref:`same_plot` `=False`.
        The figures will be saved as :paramref:`dirname` /`{gene}`. :paramref:`ext`.
    plot_kwargs:
        Keyword arguments for :meth:`cellrank.ul.models.Model.plot`.
    kwargs
        Keyword arguments for :meth:`cellrank.ul.models.Model.prepare`.

    Returns
    -------
    None
        Nothings just plots and optionally saves the plots.
    """

    if isinstance(genes, str):
        genes = [genes]
    genes = _make_unique(genes)

    if data_key != "obs":
        check_collection(adata, genes, "var_names")
    else:
        check_collection(adata, genes, "obs")

    nrows = int(np.ceil(len(genes) / ncols))
    fig = None
    axes = [None] * len(genes)

    if same_plot:
        fig, axes = plt.subplots(
            nrows=nrows,
            ncols=ncols,
            sharey=sharey,
            figsize=(15 * ncols, 10 * nrows) if figsize is None else figsize,
        )
        axes = np.ravel(axes)
    elif dirname is not None:
        figdir = sc.settings.figdir
        if figdir is None:
            raise RuntimeError(
                f"Invalid combination: figures directory `cellrank.settings.figdir` is `None`, "
                f"but `dirname={dirname}`.")
        if os.path.isabs(dirname):
            if not os.path.isdir(dirname):
                os.makedirs(dirname, exist_ok=True)
        elif not os.path.isdir(os.path.join(figdir, dirname)):
            os.makedirs(os.path.join(figdir, dirname), exist_ok=True)
    elif save is not None:
        logg.warning(
            "No directory specified for saving. Ignoring `save` argument")

    ln_key = str(LinKey.FORWARD if final else LinKey.BACKWARD)
    if ln_key not in adata.obsm:
        raise KeyError(f"Lineages key `{ln_key!r}` not found in `adata.obsm`.")

    if lineages is None:
        lineages = adata.obsm[ln_key].names
    elif isinstance(lineages, str):
        lineages = [lineages]
    elif all(map(lambda ln: ln is None,
                 lineages)):  # no lineage, all the weights are 1
        lineages = [None]
        show_cbar = False
        logg.debug("DEBUG: All lineages are `None`, setting weights to be `1`")
    lineages = _make_unique(lineages)

    for ln in filter(lambda ln: ln is not None, lineages):
        _ = adata.obsm[ln_key][ln]
    n_lineages = len(lineages)

    if isinstance(start_lineage, (str, type(None))):
        start_lineage = [start_lineage] * n_lineages
    if isinstance(end_lineage, (str, type(None))):
        end_lineage = [end_lineage] * n_lineages

    if len(start_lineage) != n_lineages:
        raise ValueError(
            f"Expected the number of start lineages to be the same as number of lineages "
            f"({n_lineages}), found `{len(start_lineage)}`.")
    if len(end_lineage) != n_lineages:
        raise ValueError(
            f"Expected the number of end lineages to be the same as number of lineages "
            f"({n_lineages}), found `{len(start_lineage)}`.")

    kwargs["models"] = _create_models(model, genes, lineages)
    kwargs["data_key"] = data_key
    kwargs["final"] = final
    kwargs["conf_int"] = conf_int

    plot_kwargs = dict(plot_kwargs)
    if plot_kwargs.get("xlabel", None) is None:
        plot_kwargs["xlabel"] = kwargs.get("time_key", None)

    if _is_any_gam_mgcv(kwargs["models"]):
        logg.debug(
            "DEBUG: Setting backend to multiprocessing because model is `GamMGCV`"
        )
        backend = "multiprocessing"

    n_jobs = _get_n_cores(n_jobs, len(genes))

    start = logg.info(f"Computing trends using `{n_jobs}` core(s)")
    models = parallelize(
        _fit,
        genes,
        unit="gene" if data_key != "obs" else "obs",
        backend=backend,
        n_jobs=n_jobs,
        extractor=lambda modelss:
        {k: v
         for m in modelss for k, v in m.items()},
        show_progress_bar=show_progres_bar,
    )(lineages, start_lineage, end_lineage, **kwargs)
    logg.info("    Finish", time=start)

    logg.debug("DEBUG: Plotting trends")
    for gene, ax in zip(genes, axes):
        f = (None if (same_plot or dirname is None) else os.path.join(
            dirname, f"{gene}.{ext}"))
        _trends_helper(
            adata,
            models,
            gene=gene,
            lineage_names=lineages,
            ln_key=ln_key,
            same_plot=same_plot,
            hide_cells=hide_cells,
            perc=perc,
            cmap=lineage_cmap,
            abs_prob_cmap=abs_prob_cmap,
            cell_color=cell_color,
            color=color,
            alpha=cell_alpha,
            lineage_alpha=lineage_alpha,
            size=size,
            lw=lw,
            show_cbar=show_cbar,
            margins=margins,
            sharey=sharey,
            dpi=dpi,
            figsize=figsize,
            fig=fig,
            ax=ax,
            save=f,
            **plot_kwargs,
        )

    if same_plot:
        for j in range(len(genes), len(axes)):
            axes[j].remove()

        fig.suptitle(suptitle)

        if save is not None:
            save_fig(fig, save)
Exemple #17
0
test(d.values())
test(set(range(3)))
test([1,2,3])
test(UserList(range(3)))
test((1,2,3))
test(deque())
test(range(3))
test(array('i', range(3)))

# Stuff
g = (i for i in range(3))
test(g)                     # generator
test(g.__iter__)            # method_wrapper (?)
test(iter(range(3)))        # range_iterator
test(lambda x: 2*x)         # function
test(math)                  # module
test(list)                  # type

# Specialized dicts
test(defaultdict(int))      # Provide a default value
test(OrderedDict())         # keep insertion odrer
test(ChainMap())            # search in multiple dictionaries
test(Counter())             # Count elements
test(UserDict())            # Same as dict, but written in Python, can inherit from it
test(MappingProxyType(d))   # Readonly dictionary

# strings
test('zap')
test(UserString('zap'))
test("""xxx""")
Exemple #18
0
    "Content-Type": "application/json",
    "Content-Type": "application/x-www-form-urlencoded",
    "Authorization": f"Bearer {expected_token.decode('utf-8')}",
}

# welcome to the world of mutable/immutable dicts...since there is a bug in
# `cpython` (MappingProxy objects should JSON serialize just like a dictionary)
# affecting python 3.8 (at least), we are enforced to duplicate the
# `post_data_user`...or some of our tests will fail when trying to serialize
# data. So, imho, is easier to create a duplicate of the object than messing
# with previously wrote tests. This affects one of our test
# helpers: `get_superuser_token_headers`
#
# See also: https://bugs.python.org/issue34858
post_data_user = {"username": "******", "password": test_password}
post_data_user_in_mpt = MappingProxyType(post_data_user)
to_delete_user = {"username": "******", "password": test_password}

expected_user = {
    "username":
    "******",
    "id":
    1,
    "is_active":
    False,
    "actions": [{
        "title": "Account created",
        "id": 1,
        "owner_id": 1,
        "timestamp": "2020-05-30 17:35:55",
    }],
Exemple #19
0
    def build(
        cls,
        file_descriptors: Sequence[descriptor_pb2.FileDescriptorProto],
        package: str = '',
        opts: Options = Options(),
        prior_protos: Mapping[str, 'Proto'] = None,
    ) -> 'API':
        """Build the internal API schema based on the request.

        Args:
            file_descriptors (Sequence[~.FileDescriptorProto]): A list of
                :class:`~.FileDescriptorProto` objects describing the
                API.
            package (str): A protocol buffer package, as a string, for which
                code should be explicitly generated (including subpackages).
                Protos with packages outside this list are considered imports
                rather than explicit targets.
            opts (~.options.Options): CLI options passed to the generator.
            prior_protos (~.Proto): Previous, already processed protos.
                These are needed to look up messages in imported protos.
                Primarily used for testing.
        """
        # Save information about the overall naming for this API.
        naming = api_naming.Naming.build(*filter(
            lambda fd: fd.package.startswith(package),
            file_descriptors,
        ),
                                         opts=opts)

        def disambiguate_keyword_fname(full_path: str,
                                       visited_names: Container[str]) -> str:
            path, fname = os.path.split(full_path)
            name, ext = os.path.splitext(fname)
            if name in keyword.kwlist or full_path in visited_names:
                name += "_"
                full_path = os.path.join(path, name + ext)
                if full_path in visited_names:
                    return disambiguate_keyword_fname(full_path, visited_names)

            return full_path

        # Iterate over each FileDescriptorProto and fill out a Proto
        # object describing it, and save these to the instance.
        #
        # The first pass gathers messages and enums but NOT services or methods.
        # This is a workaround for a limitation in protobuf annotations for
        # long running operations: the annotations are strings that reference
        # message types but do not require a proto import.
        # This hack attempts to address a common case where API authors,
        # not wishing to generate an 'unused import' warning,
        # don't import the proto file defining the real response or metadata
        # type into the proto file that defines an LRO.
        # We just load all the APIs types first and then
        # load the services and methods with the full scope of types.
        pre_protos: Dict[str, Proto] = dict(prior_protos or {})
        for fd in file_descriptors:
            fd.name = disambiguate_keyword_fname(fd.name, pre_protos)
            pre_protos[fd.name] = Proto.build(
                file_descriptor=fd,
                file_to_generate=fd.package.startswith(package),
                naming=naming,
                opts=opts,
                prior_protos=pre_protos,
                # Ugly, ugly hack.
                load_services=False,
            )

        # A file descriptor's file-level resources are NOT visible to any importers.
        # The only way to make referenced resources visible is to aggregate them at
        # the API level and then pass that around.
        all_file_resources = collections.ChainMap(
            *(proto.resource_messages for proto in pre_protos.values()))

        # Second pass uses all the messages and enums defined in the entire API.
        # This allows LRO returning methods to see all the types in the API,
        # bypassing the above missing import problem.
        protos: Dict[str, Proto] = {
            name: Proto.build(
                file_descriptor=proto.file_pb2,
                file_to_generate=proto.file_to_generate,
                naming=naming,
                opts=opts,
                prior_protos=pre_protos,
                all_resources=MappingProxyType(all_file_resources),
            )
            for name, proto in pre_protos.items()
        }

        # Done; return the API.
        return cls(naming=naming, all_protos=protos)
Exemple #20
0
from aiocarbon.storage import TotalStorage  # type: ignore

from aiomisc.periodic import PeriodicCallback
from aiomisc.service import Service


log = logging.getLogger(__name__)


def strip_carbon_ns(string: str) -> str:
    return re.sub(r"[^\w\d\-]+", "_", string).strip("_").lower()


PROTOCOLS = MappingProxyType({
    "udp": UDPClient,
    "tcp": TCPClient,
    "pickle": PickleClient,
})


class CarbonSender(Service):
    host: str = "127.0.0.1"
    port: int = 2003
    send_interval: int = 5
    protocol: str = "udp"
    namespace: Iterable[str] = ("",)
    storage = TotalStorage
    _handle: PeriodicCallback

    async def start(self) -> None:
        namespace = ".".join(
 def named_resources(self) -> Mapping[str, AbstractResource]:
     return MappingProxyType(self._named_resources)
Exemple #22
0
from types import MappingProxyType

PAGINATION_CLASS: str = 'rest_framework.pagination.LimitOffsetPagination'

REST_FRAMEWORK_SETTINGS: MappingProxyType = MappingProxyType({
    'DEFAULT_AUTHENTICATION_CLASSES': (
        'core.users.middleware.ExpiringTokenAuthentication',
        'rest_framework.authentication.SessionAuthentication',
        'rest_framework.authentication.BasicAuthentication',
    ),
    'DEFAULT_SCHEMA_CLASS':
    'rest_framework.schemas.coreapi.AutoSchema',
    'DEFAULT_PERMISSION_CLASSES': (
        # 'rest_framework.permissions.AllowAny',
        # 'rest_framework.permissions.IsAuthenticatedOrReadOnly',
        'rest_framework.permissions.IsAdminUser', ),
    'DEFAULT_FILTER_BACKENDS': (),
    'DEFAULT_PAGINATION_CLASS':
    PAGINATION_CLASS,
    'PAGE_SIZE':
    8
})
Exemple #23
0
 def cached_hosts(self):
     """Read-only dict of cached DNS record."""
     return MappingProxyType(self._cached_hosts)
Exemple #24
0
 def get_proxy(self):
     return MappingProxyType({
         k: (self._views[k] if k in self._views else self._data[k].raw_data)
         for k, v in self._data.items()
     })
Exemple #25
0
 def data(self) -> InvoiceDataType:
     """Invoice data getter"""
     return MappingProxyType(self._data)
# 代理实现 不可变字典类型
from types import MappingProxyType

if __name__ == '__main__':
    d = {1: "A"}
    d_proxy = MappingProxyType(d)
    print(d_proxy)
    # d_proxy[2] = 'B' TypeError: does not support
    d[2] = 'B'

    print(d_proxy[2])
Exemple #27
0
 def freeze(self) -> None:
     """Disable all mutators, effectively transforming ``self`` into
     an immutable set.
     """
     if not isinstance(self._dict, MappingProxyType):
         self._dict = MappingProxyType(self._dict)  # type: ignore
Exemple #28
0
 def members(self) -> 'typing.Mapping[int, WidgetMember]':
     """
     :return: A read-only mapping of :class:`~.WidgetMember` representing the channels for \ 
         this guild. 
     """
     return MappingProxyType(self._members)
Exemple #29
0
 def named_resources(self):
     return MappingProxyType(self._named_resources)
Exemple #30
0
def paga_path(
    adata: AnnData,
    nodes: Sequence[Union[str, int]],
    keys: Sequence[str],
    use_raw: bool = True,
    annotations: Sequence[str] = ('dpt_pseudotime', ),
    color_map: Union[str, Colormap, None] = None,
    color_maps_annotations: Mapping[str,
                                    Union[str, Colormap]] = MappingProxyType(
                                        dict(dpt_pseudotime='Greys')),
    palette_groups: Optional[Sequence[str]] = None,
    n_avg: int = 1,
    groups_key: Optional[str] = None,
    xlim: Tuple[Optional[int], Optional[int]] = (None, None),
    title: Optional[str] = None,
    left_margin=None,
    ytick_fontsize: Optional[int] = None,
    title_fontsize: Optional[int] = None,
    show_node_names: bool = True,
    show_yticks: bool = True,
    show_colorbar: bool = True,
    legend_fontsize: Union[int, float, _FontSize, None] = None,
    legend_fontweight: Union[int, _FontWeight, None] = None,
    normalize_to_zero_one: bool = False,
    as_heatmap: bool = True,
    return_data: bool = False,
    show: Optional[bool] = None,
    save: Union[bool, str, None] = None,
    ax: Optional[Axes] = None,
) -> Optional[Axes]:
    """\
    Gene expression and annotation changes along paths in the abstracted graph.

    Parameters
    ----------
    adata
        An annotated data matrix.
    nodes
        A path through nodes of the abstracted graph, that is, names or indices
        (within `.categories`) of groups that have been used to run PAGA.
    keys
        Either variables in `adata.var_names` or annotations in
        `adata.obs`. They are plotted using `color_map`.
    use_raw
        Use `adata.raw` for retrieving gene expressions if it has been set.
    annotations
        Plot these keys with `color_maps_annotations`. Need to be keys for
        `adata.obs`.
    color_map
        Matplotlib colormap.
    color_maps_annotations
        Color maps for plotting the annotations. Keys of the dictionary must
        appear in `annotations`.
    palette_groups
        Ususally, use the same `sc.pl.palettes...` as used for coloring the
        abstracted graph.
    n_avg
        Number of data points to include in computation of running average.
    groups_key
        Key of the grouping used to run PAGA. If `None`, defaults to
        `adata.uns['paga']['groups']`.
    as_heatmap
        Plot the timeseries as heatmap. If not plotting as heatmap,
        `annotations` have no effect.
    show_node_names
        Plot the node names on the nodes bar.
    show_colorbar
        Show the colorbar.
    show_yticks
        Show the y ticks.
    normalize_to_zero_one
        Shift and scale the running average to [0, 1] per gene.
    return_data
        Return the timeseries data in addition to the axes if `True`.
    show
         Show the plot, do not return axis.
    save
        If `True` or a `str`, save the figure.
        A string is appended to the default filename.
        Infer the filetype if ending on \\{`'.pdf'`, `'.png'`, `'.svg'`\\}.
    ax
         A matplotlib axes object.

    Returns
    -------
    A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`.
    If `return_data`, return the timeseries data in addition to an axes.
    """
    ax_was_none = ax is None

    if groups_key is None:
        if 'groups' not in adata.uns['paga']:
            raise KeyError(
                'Pass the key of the grouping with which you ran PAGA, '
                'using the parameter `groups_key`.')
        groups_key = adata.uns['paga']['groups']
    groups_names = adata.obs[groups_key].cat.categories

    if 'dpt_pseudotime' not in adata.obs.keys():
        raise ValueError(
            '`pl.paga_path` requires computation of a pseudotime `tl.dpt` '
            'for ordering at single-cell resolution')

    if palette_groups is None:
        _utils.add_colors_for_categorical_sample_annotation(adata, groups_key)
        palette_groups = adata.uns[f'{groups_key}_colors']

    def moving_average(a):
        return _sc_utils.moving_average(a, n_avg)

    ax = pl.gca() if ax is None else ax
    from matplotlib import transforms
    trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
    X = []
    x_tick_locs = [0]
    x_tick_labels = []
    groups = []
    anno_dict = {anno: [] for anno in annotations}
    if isinstance(nodes[0], str):
        nodes_ints = []
        groups_names_set = set(groups_names)
        for node in nodes:
            if node not in groups_names_set:
                raise ValueError(
                    f'Each node/group needs to be in {groups_names.tolist()} '
                    f'(`groups_key`={groups_key!r}) not {node!r}.')
            nodes_ints.append(groups_names.get_loc(node))
        nodes_strs = nodes
    else:
        nodes_ints = nodes
        nodes_strs = [groups_names[node] for node in nodes]

    adata_X = adata
    if use_raw and adata.raw is not None:
        adata_X = adata.raw

    for ikey, key in enumerate(keys):
        x = []
        for igroup, group in enumerate(nodes_ints):
            idcs = np.arange(adata.n_obs)[adata.obs[groups_key].values ==
                                          nodes_strs[igroup]]
            if len(idcs) == 0:
                raise ValueError(
                    'Did not find data points that match '
                    f'`adata.obs[{groups_key!r}].values == {str(group)!r}`. '
                    f'Check whether `adata.obs[{groups_key!r}]` '
                    'actually contains what you expect.')
            idcs_group = np.argsort(adata.obs['dpt_pseudotime'].values[
                adata.obs[groups_key].values == nodes_strs[igroup]])
            idcs = idcs[idcs_group]
            if key in adata.obs_keys(): x += list(adata.obs[key].values[idcs])
            else: x += list(adata_X[:, key].X[idcs])
            if ikey == 0:
                groups += [group for i in range(len(idcs))]
                x_tick_locs.append(len(x))
                for anno in annotations:
                    series = adata.obs[anno]
                    if is_categorical_dtype(series): series = series.cat.codes
                    anno_dict[anno] += list(series.values[idcs])
        if n_avg > 1:
            old_len_x = len(x)
            x = moving_average(x)
            if ikey == 0:
                for key in annotations:
                    if not isinstance(anno_dict[key][0], str):
                        anno_dict[key] = moving_average(anno_dict[key])
        if normalize_to_zero_one:
            x -= np.min(x)
            x /= np.max(x)
        X.append(x)
        if not as_heatmap:
            ax.plot(x[xlim[0]:xlim[1]], label=key)
        if ikey == 0:
            for igroup, group in enumerate(nodes):
                if len(groups_names) > 0 and group not in groups_names:
                    label = groups_names[group]
                else:
                    label = group
                x_tick_labels.append(label)
    X = np.array(X)
    if as_heatmap:
        img = ax.imshow(X,
                        aspect='auto',
                        interpolation='nearest',
                        cmap=color_map)
        if show_yticks:
            ax.set_yticks(range(len(X)))
            ax.set_yticklabels(keys, fontsize=ytick_fontsize)
        else:
            ax.set_yticks([])
        ax.set_frame_on(False)
        ax.set_xticks([])
        ax.tick_params(axis='both', which='both', length=0)
        ax.grid(False)
        if show_colorbar:
            pl.colorbar(img, ax=ax)
        left_margin = 0.2 if left_margin is None else left_margin
        pl.subplots_adjust(left=left_margin)
    else:
        left_margin = 0.4 if left_margin is None else left_margin
        if len(keys) > 1:
            pl.legend(
                frameon=False,
                loc='center left',
                bbox_to_anchor=(-left_margin, 0.5),
                fontsize=legend_fontsize,
            )
    xlabel = groups_key
    if not as_heatmap:
        ax.set_xlabel(xlabel)
        pl.yticks([])
        if len(keys) == 1: pl.ylabel(keys[0] + ' (a.u.)')
    else:
        import matplotlib.colors
        # groups bar
        ax_bounds = ax.get_position().bounds
        groups_axis = pl.axes((
            ax_bounds[0],
            ax_bounds[1] - ax_bounds[3] / len(keys),
            ax_bounds[2],
            ax_bounds[3] / len(keys),
        ))
        groups = np.array(groups)[None, :]
        groups_axis.imshow(
            groups,
            aspect='auto',
            interpolation="nearest",
            cmap=matplotlib.colors.ListedColormap(
                # the following line doesn't work because of normalization
                # adata.uns['paga_groups_colors'])
                palette_groups[np.min(groups).astype(int):],
                N=int(np.max(groups) + 1 - np.min(groups)),
            ),
        )
        if show_yticks:
            groups_axis.set_yticklabels(['', xlabel, ''],
                                        fontsize=ytick_fontsize)
        else:
            groups_axis.set_yticks([])
        groups_axis.set_frame_on(False)
        if show_node_names:
            ypos = (groups_axis.get_ylim()[1] + groups_axis.get_ylim()[0]) / 2
            x_tick_locs = _sc_utils.moving_average(x_tick_locs, n=2)
            for ilabel, label in enumerate(x_tick_labels):
                groups_axis.text(
                    x_tick_locs[ilabel],
                    ypos,
                    x_tick_labels[ilabel],
                    fontdict=dict(
                        horizontalalignment='center',
                        verticalalignment='center',
                    ),
                )
        groups_axis.set_xticks([])
        groups_axis.grid(False)
        groups_axis.tick_params(axis='both', which='both', length=0)
        # further annotations
        y_shift = ax_bounds[3] / len(keys)
        for ianno, anno in enumerate(annotations):
            if ianno > 0: y_shift = ax_bounds[3] / len(keys) / 2
            anno_axis = pl.axes((
                ax_bounds[0],
                ax_bounds[1] - (ianno + 2) * y_shift,
                ax_bounds[2],
                y_shift,
            ))
            arr = np.array(anno_dict[anno])[None, :]
            if anno not in color_maps_annotations:
                color_map_anno = ('Vega10' if is_categorical_dtype(
                    adata.obs[anno]) else 'Greys')
            else:
                color_map_anno = color_maps_annotations[anno]
            img = anno_axis.imshow(
                arr,
                aspect='auto',
                interpolation='nearest',
                cmap=color_map_anno,
            )
            if show_yticks:
                anno_axis.set_yticklabels(['', anno, ''],
                                          fontsize=ytick_fontsize)
                anno_axis.tick_params(axis='both', which='both', length=0)
            else:
                anno_axis.set_yticks([])
            anno_axis.set_frame_on(False)
            anno_axis.set_xticks([])
            anno_axis.grid(False)
    if title is not None: ax.set_title(title, fontsize=title_fontsize)
    if show is None and not ax_was_none: show = False
    else: show = settings.autoshow if show is None else show
    _utils.savefig_or_show('paga_path', show=show, save=save)
    if return_data:
        df = pd.DataFrame(data=X.T, columns=keys)
        df['groups'] = moving_average(
            groups)  # groups is without moving average, yet
        if 'dpt_pseudotime' in anno_dict:
            df['distance'] = anno_dict['dpt_pseudotime'].T
        return ax, df if ax_was_none and not show else df
    else:
        return ax if ax_was_none and not show else None