Пример #1
0
    def selected(self, val):
        skids = utils.eval_skids(val)

        if not isinstance(skids, np.ndarray):
            skids = np.array(skids)

        neurons = self.neurons

        logger.debug('{0} neurons selected ({1} previously)'.format(
            len(skids), len(self.selected)))

        # First un-highlight neurons no more selected
        for s in [s for s in self.__selected if s not in set(skids)]:
            for v in neurons[s]:
                if isinstance(v, vp.scene.visuals.Mesh):
                    v.color = v._stored_color
                else:
                    v.set_data(color=v._stored_color)

        # Highlight new additions
        for s in skids:
            if s not in self.__selected:
                for v in neurons[s]:
                    # Keep track of old colour
                    v.unfreeze()
                    v._stored_color = v.color
                    v.freeze()
                    if isinstance(v, vp.scene.visuals.Mesh):
                        v.color = self.highlight_color
                    else:
                        v.set_data(color=self.highlight_color)

        self.__selected = skids

        self.update_legend()
Пример #2
0
    def hide_neurons(self, n):
        """ Hide given neuron(s). """
        skids = utils.eval_skids(n)

        neurons = self.neurons

        for s in skids:
            for v in neurons[s]:
                if v.visible:
                    v.visible = False

        self.update_legend()
Пример #3
0
    def toggle_neurons(self, n):
        """ Toggle neuron(s) visibility. """

        n = utils._make_iterable(n)

        if False not in [isinstance(u, uuid.UUID) for u in n]:
            obj = self._neuron_obj
        else:
            n = utils.eval_skids(n)
            obj = self.neurons

        for s in n:
            for v in obj[s]:
                v.visible = v.visible == False

        self.update_legend()
Пример #4
0
    def toggle_select(self, n):
        """ Toggle selected of given neuron. """
        skids = utils.eval_skids(n)

        neurons = self.neurons

        for s in skids:
            if self.selected != s:
                self.selected = s
                for v in neurons[s]:
                    self._selected_color = v.color
                    v.set_data(color=self.highlight_color)
            else:
                self.selected = None
                for v in neurons[s]:
                    v.set_data(color=self._selected_color)

        self.update_legend()
Пример #5
0
    def unhide_neurons(self, n=None, check_alpha=False):
        """ Unhide given neuron(s). Use ``n`` to unhide specific neurons. """
        if not isinstance(n, type(None)):
            skids = utils.eval_skids(n)
        else:
            skids = list(self.neurons.keys())

        neurons = self.neurons

        for s in skids:
            for v in neurons[s]:
                if not v.visible:
                    v.visible = True
            if check_alpha:
                c = list(mcl.to_rgba(neurons[s][0].color))
                if c[3] != 1:
                    c[3] = 1
                    self.set_colors({s: c})

        self.update_legend()
Пример #6
0
    def select(self, x, *args):
        """ Select given neurons.

        Parameters
        ----------
        x :     list of skeleton IDs | CatmaidNeuron/List | pd Dataframe

        Returns
        -------
        :class:`pymaid.b3d.object_list` :  containing requested neurons

        Examples
        --------
        >>> selection = handler.select( [123456,7890] )
        >>> # Get only connectors
        >>> cn = selection.connectors
        >>> # Hide everything else
        >>> cn.hide_others()
        >>> # Change color of presynapses
        >>> selection.presynapses.color( 0, 1, 0 )
        """

        skids = utils.eval_skids(x)

        if not skids:
            logger.error('No skids found.')

        names = []

        for ob in bpy.data.objects:
            ob.select = False
            if 'skeleton_id' in ob:
                if ob['skeleton_id'] in skids:
                    ob.select = True
                    names.append(ob.name)
        return object_list(names, handler=self)
Пример #7
0
def get_time_invested(x, remote_instance=None, minimum_actions=10,
                      treenodes=True, connectors=True, mode='SUM',
                      max_inactive_time=3, start_date=None, end_date=None):
    """ Takes a list of neurons and calculates the time individual users
    have spent working on this set of neurons.

    Parameters
    ----------
    x
                        Which neurons to check. Can be either:

                        1. skeleton IDs (int or str)
                        2. neuron name (str, must be exact match)
                        3. annotation: e.g. 'annotation:PN right'
                        4. CatmaidNeuron or CatmaidNeuronList object

                        If you pass a CatmaidNeuron/List, its data is used
                        calculate time invested. You can exploit this to get
                        time invested into a given compartment of a neurons,
                        e.g. by pruning it to a volume.
    remote_instance :   CatmaidInstance, optional
                        Either pass explicitly or define globally.
    minimum_actions :   int, optional
                        Minimum number of actions per minute to be counted as
                        active.
    treenodes :         bool, optional
                        If False, treenodes will not be taken into account
    connectors :        bool, optional
                        If False, connectors will not be taken into account
    mode :              'SUM' | 'OVER_TIME' | 'ACTIONS', optional
                        (1) 'SUM' will return total time invested (in minutes)
                            per user.
                        (2) 'OVER_TIME' will return minutes invested/day over
                            time.
                        (3) 'ACTIONS' will return actions
                            (node/connectors placed/edited) per day.
    max_inactive_time : int, optional
                        Maximal time inactive in minutes.
    start_date :        None | tuple | datetime.date, optional
    end_date :          None | tuple | datetime.date, optional
                        Restricts time invested to window. Applies to creation
                        but not edition time!

    Returns
    -------
    pandas.DataFrame
        If ``mode='SUM'``, values represent minutes invested.

        >>> df
        ...       total  creation  edition  review
        ... user1
        ... user2

        If ``mode='OVER_TIME'`` or ``mode='ACTIONS'``:

        >>> df
        ...       date date date ...
        ... user1
        ... user2

        For `OVER_TIME`, values respresent minutes invested on that day. For
        `ACTIONS`, values represent actions (creation, edition, review) on that
        day.


    Important
    ---------
    Creation/Edition/Review times can overlap! This is why total time spent
    is not just creation + edition + review.

    Please note that this does currently not take placement of
    pre-/postsynaptic nodes into account!

    Be aware of the ``minimum_actions`` parameter: at low settings even
    a single actions (e.g. connecting a node) will add considerably to time
    invested. To keep total reconstruction time comparable to what Catmaid
    calculates, you should consider about 10 actions/minute (= a click every
    6 seconds) and ``max_inactive_time`` of 3 mins.

    CATMAID gives reconstruction time across all users. Here, we calculate
    the time spent tracing for individuals. This may lead to a discrepancy
    between sum of time invested over of all users from this function vs.
    CATMAID's reconstruction time.

    Examples
    --------
    Plot pie chart of contributions per user using Plotly. This example
    assumes that you have already imported and set up pymaid.

    >>> import plotly
    >>> stats = pymaid.get_time_invested(skids, remote_instance)
    >>> # Use plotly to generate pie chart
    >>> fig = {"data": [{"values": stats.total.tolist(),
    ...        "labels": stats.user.tolist(), "type" : "pie" }]}
    >>> plotly.offline.plot(fig)

    Plot reconstruction efforts over time:

    >>> stats = pymaid.get_time_invested(skids, mode='OVER_TIME')
    >>> # Plot time invested over time
    >>> stats.T.plot()
    >>> # Plot cumulative time invested over time
    >>> stats.T.cumsum(axis=0).plot()
    >>> # Filter for major contributors
    >>> stats[stats.sum(axis=1) > 20].T.cumsum(axis=0).plot()

    """

    def _extract_timestamps(ts, desc='Calc'):
        grouped = ts.set_index('timestamp',
                               drop=False).groupby(['user',
                                                    pd.Grouper(freq=bin_width)]).count() >= minimum_actions
        temp_stats = {}
        for u in config.tqdm(set(ts.user.unique()) & set(relevant_users),
                             desc=desc, disable=config.pbar_hide, leave=False):
            temp_stats[u] = sum(grouped.loc[u].values)[0] * interval
        return temp_stats

    if mode not in ['SUM', 'OVER_TIME', 'ACTIONS']:
        raise ValueError('Unknown mode "%s"' % str(mode))

    remote_instance = utils._eval_remote_instance(remote_instance)

    skids = utils.eval_skids(x, remote_instance)

    # Maximal inactive time is simply translated into binning
    # We need this later for pandas.TimeGrouper() anyway
    interval = max_inactive_time
    bin_width = '%iMin' % interval

    # Update minimum_actions to reflect actions/interval instead of actions/minute
    minimum_actions *= interval

    user_list = fetch.get_user_list(remote_instance).set_index('id')

    if not isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
        x = fetch.get_neuron(skids, remote_instance=remote_instance)

    if isinstance(x, core.CatmaidNeuron):
        skdata = core.CatmaidNeuronList(x)
    elif isinstance(x, core.CatmaidNeuronList):
        skdata = x

    if not isinstance(end_date, (datetime.date, type(None))):
        end_date = datetime.date(*end_date)

    if not isinstance(start_date, (datetime.date, type(None))):
        start_date = datetime.date(*start_date)

    # Extract connector and node IDs
    node_ids = []
    connector_ids = []
    for n in skdata.itertuples():
        if treenodes:
            node_ids += n.nodes.treenode_id.tolist()
        if connectors:
            connector_ids += n.connectors.connector_id.tolist()

    # Get node details
    node_details = fetch.get_node_details(
        node_ids + connector_ids, remote_instance=remote_instance)

    # Get details for links
    link_details = fetch.get_connector_links(skdata)

    # link_details contains all links. We have to subset this to existing
    # connectors in case the input neurons have been pruned
    link_details = link_details[link_details.connector_id.isin(connector_ids)]

    # Remove timestamps outside of date range (if provided)
    if start_date:
        node_details = node_details[node_details.creation_time >= np.datetime64(
            start_date)]
        link_details = link_details[link_details.creation_time >= np.datetime64(
            start_date)]
    if end_date:
        node_details = node_details[node_details.creation_time <= np.datetime64(
            end_date)]
        link_details = link_details[link_details.creation_time <= np.datetime64(
            end_date)]

    # Dataframe for creation (i.e. the actual generation of the nodes)
    creation_timestamps = np.append(node_details[['creator', 'creation_time']].values,
                                    link_details[['creator_id', 'creation_time']].values,
                                    axis=0)
    creation_timestamps = pd.DataFrame(creation_timestamps,
                                       columns=['user', 'timestamp'])

    # Dataframe for edition times - can't use links as there is no editor
    edition_timestamps = node_details[['editor', 'edition_time']]
    edition_timestamps.columns = ['user', 'timestamp']

    # Generate dataframe for reviews
    reviewers = [u for l in node_details.reviewers.values for u in l]
    timestamps = [ts for l in node_details.review_times.values for ts in l]
    review_timestamps = pd.DataFrame([[u, ts] for u, ts in zip(
        reviewers, timestamps)], columns=['user', 'timestamp'])

    # Merge all timestamps
    all_timestamps = pd.concat(
        [creation_timestamps, edition_timestamps, review_timestamps], axis=0)

    all_timestamps.sort_values('timestamp', inplace=True)

    relevant_users = all_timestamps.groupby('user').count()
    relevant_users = relevant_users[relevant_users.timestamp >= minimum_actions].index.values

    if mode == 'SUM':
        stats = {
            'total': {u: 0 for u in relevant_users},
            'creation': {u: 0 for u in relevant_users},
            'edition': {u: 0 for u in relevant_users},
            'review': {u: 0 for u in relevant_users}
        }
        stats['total'].update(_extract_timestamps(all_timestamps,
                                                  desc='Calc total'))
        stats['creation'].update(_extract_timestamps(creation_timestamps,
                                                     desc='Calc creation'))
        stats['edition'].update(_extract_timestamps(edition_timestamps,
                                                    desc='Calc edition'))
        stats['review'].update(_extract_timestamps(review_timestamps,
                                                   desc='Calc review'))

        return pd.DataFrame([[user_list.loc[u, 'login'],
                              stats['total'][u],
                              stats['creation'][u],
                              stats['edition'][u],
                              stats['review'][u]] for u in relevant_users],
                            columns=['user', 'total', 'creation', 'edition', 'review']).sort_values('total', ascending=False).reset_index(drop=True).set_index('user')

    elif mode == 'ACTIONS':
        all_ts = all_timestamps.set_index('timestamp', drop=False).timestamp.groupby(
            pd.Grouper(freq='1d')).count().to_frame()
        all_ts.columns = ['all_users']
        all_ts = all_ts.T
        # Get total time spent
        for u in config.tqdm(all_timestamps.user.unique(), desc='Calc. total', disable=config.pbar_hide, leave=False):
            this_ts = all_timestamps[all_timestamps.user == u].set_index(
                'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq='1d')).count().to_frame()
            this_ts.columns = [user_list.loc[u, 'login']]

            all_ts = pd.concat([all_ts, this_ts.T])

        return all_ts.fillna(0)

    elif mode == 'OVER_TIME':
        # First count all minutes with minimum number of actions
        minutes_counting = (all_timestamps.set_index('timestamp', drop=False).timestamp.groupby(
            pd.Grouper(freq=bin_width)).count().to_frame() > minimum_actions)
        # Then remove the minutes that have less than minimum actions
        minutes_counting = minutes_counting[minutes_counting.timestamp == True]
        # Now group by hour
        all_ts = minutes_counting.groupby(pd.Grouper(freq='1d')).count()
        all_ts.columns = ['all_users']
        all_ts = all_ts.T
        # Get total time spent
        for u in config.tqdm(all_timestamps.user.unique(), desc='Calc. total', disable=config.pbar_hide, leave=False):
            minutes_counting = (all_timestamps[all_timestamps.user == u].set_index(
                'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq=bin_width)).count().to_frame() > minimum_actions)
            minutes_counting = minutes_counting[minutes_counting.timestamp == True]
            this_ts = minutes_counting.groupby(pd.Grouper(freq='1d')).count()

            this_ts.columns = [user_list.loc[u, 'login']]

            all_ts = pd.concat([all_ts, this_ts.T])

        all_ts.fillna(0, inplace=True)

        return all_ts
Пример #8
0
def get_user_contributions(x, teams=None, remote_instance=None):
    """ Takes a list of neurons and returns nodes and synapses contributed
    by each user.

    Notes
    -----
    This is essentially a wrapper for :func:`pymaid.get_contributor_statistics`
    - if you are also interested in e.g. construction time, review time, etc.
    you may want to consider using :func:`~pymaid.get_contributor_statistics`
    instead.

    Parameters
    ----------
    x
                        Which neurons to check. Can be either:

                        1. skeleton IDs (int or str)
                        2. neuron name (str, must be exact match)
                        3. annotation: e.g. 'annotation:PN right'
                        4. CatmaidNeuron or CatmaidNeuronList object
    teams               dict, optional
                        Teams to group contributions for. Users must be logins::

                            {'teamA': ['user1', 'user2'], 'team2': ['user3'], ...]}

                        Users not part of any team, will be grouped as team
                        ``'others'``.

    remote_instance :   Catmaid Instance, optional
                        Either pass explicitly or define globally.

    Returns
    -------
    pandas.DataFrame
        DataFrame in which each row represents a user

        >>> df
        ...   user  nodes  presynapses  postsynapses  nodes_reviewed
        ... 0
        ... 1

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> # Get contributors for a single neuron
    >>> cont = pymaid.get_user_contributions(2333007)
    >>> # Get top 10 (by node contribution)
    >>> top10 = cont.iloc[:10].set_index('user')
    >>> # Plot as bar chart
    >>> ax = top10.plot(kind='bar')
    >>> plt.show()

    >>> # Plot relative contributions
    >>> cont = pymaid.get_user_contributions(2333007)
    >>> cont = cont.set_index('user')
    >>> # Normalise
    >>> cont_rel = cont / cont.sum(axis=0).values
    >>> # Plot contributors with >5% node contributions
    >>> ax = cont_rel[cont_rel.nodes > .05].plot(kind='bar')
    >>> plt.show()

    See Also
    --------
    :func:`~pymaid.get_contributor_statistics`
                           Gives you more basic info on neurons of interest
                           such as total reconstruction/review time.

    """

    if not isinstance(teams, type(None)):
        # Prepare teams
        if not isinstance(teams, dict):
            raise TypeError('Expected teams of type dict, got {}'.format(type(teams)))

        for t in teams:
            if not isinstance(teams[t], list):
                raise TypeError('Teams need to list of user logins, got {}'.format(type(teams[t])))

        # Turn teams into a login -> team dict
        teams = {u : t for t in teams for u in teams[t]}

    remote_instance = utils._eval_remote_instance(remote_instance)

    skids = utils.eval_skids(x, remote_instance)

    cont = fetch.get_contributor_statistics(
        skids, remote_instance, separate=False)

    all_users = set(list(cont.node_contributors.keys(
    )) + list(cont.pre_contributors.keys()) + list(cont.post_contributors.keys()))

    stats = {
        'nodes': {u: 0 for u in all_users},
        'presynapses': {u: 0 for u in all_users},
        'postsynapses': {u: 0 for u in all_users},
        'nodes_reviewed': {u: 0 for u in all_users}
    }

    for u in cont.node_contributors:
        stats['nodes'][u] = cont.node_contributors[u]
    for u in cont.pre_contributors:
        stats['presynapses'][u] = cont.pre_contributors[u]
    for u in cont.post_contributors:
        stats['postsynapses'][u] = cont.post_contributors[u]
    for u in cont.review_contributors:
        stats['nodes_reviewed'][u] = cont.review_contributors[u]

    stats = pd.DataFrame([[u, stats['nodes'][u],
                             stats['presynapses'][u],
                             stats['postsynapses'][u],
                             stats['nodes_reviewed'][u]] for u in all_users],
                         columns=['user', 'nodes', 'presynapses',
                                  'postsynapses', 'nodes_reviewed']
                        ).sort_values('nodes', ascending=False).reset_index(drop=True)

    if isinstance(teams, type(None)):
        return stats

    stats['team'] = [teams.get(u, 'others') for u in stats.user.values]
    return stats.groupby('team').sum()
Пример #9
0
def adjacency_matrix(s,
                     t=None,
                     remote_instance=None,
                     source_grp={},
                     target_grp={},
                     syn_threshold=None,
                     syn_cutoff=None,
                     use_connectors=False):
    """ Generate adjacency matrix for synaptic connections between sets of
    neurons. Directional: sources = rows, targets = columns.

    Parameters
    ----------
    s
                        Source neurons as single or list of either:

                        1. skeleton IDs (int or str)
                        2. neuron name (str, exact match)
                        3. annotation: e.g. 'annotation:PN right'
                        4. CatmaidNeuron or CatmaidNeuronList object
    t
                        Optional. Target neurons as single or list of either:

                        1. skeleton IDs (int or str)
                        2. neuron name (str, exact match)
                        3. annotation: e.g. ``'annotation:PN right'`
                        4. CatmaidNeuron or CatmaidNeuronList object

                        If not provided, ``source neurons = target neurons``.
    remote_instance :   CATMAID instance, optional
    syn_cutoff :        int, optional
                        If set, will cut off connections above given value.
    syn_threshold :     int, optional
                        If set, will ignore connections with less synapses.
    source_grp :        dict, optional
                        Use to collapse sources into groups. Can be either:
                          1. ``{group1: [neuron1, neuron2, ... ], ..}``
                          2. ``{neuron1: group1, neuron2 : group2, ..}``

                        ``syn_cutoff`` and ``syn_threshold`` are applied
                        BEFORE grouping!
    target_grp :        dict, optional
                        See ``source_grp`` for possible formats.
    use_connectors :    bool, optional
                        If True AND ``s`` or ``t`` are ``CatmaidNeuron/List``,
                        restrict adjacency matrix to their connectors. Use
                        if e.g. you are using pruned neurons. **Important**:
                        This does not work if you have multiple fragments per
                        neuron!

    Returns
    -------
    matrix :          ``pandas.Dataframe``

    See Also
    --------
    :func:`~pymaid.group_matrix`
                More fine-grained control over matrix grouping.
    :func:`~pymaid.adjacency_from_connectors`
                Use this function if you are working with multiple fragments
                per neuron.

    Examples
    --------
    Generate and plot a adjacency matrix:

    >>> import seaborn as sns
    >>> import matplotlib.pyplot as plt
    >>> import pymaid
    >>> rm = pymaid.CatmaidInstance(url, user, pw,, token)
    >>> neurons = pymaid.get_neurons('annotation:test')
    >>> mat = pymaid.adjacency_matrix( neurons )
    >>> g = sns.heatmap(adj_mat, square=True)
    >>> g.set_yticklabels(g.get_yticklabels(), rotation = 0, fontsize = 7)
    >>> g.set_xticklabels(g.get_xticklabels(), rotation = 90, fontsize = 7)
    >>> plt.show()

    Cut neurons into axon dendrites and compare their connectivity:

    >>> # Get a set of neurons
    >>> nl = pymaid.get_neurons('annnotation:type_16_candidates')
    >>> # Split into axon dendrite by using a tag
    >>> nl.reroot(nl.soma)
    >>> nl_axon = nl.prune_proximal_to('axon', inplace=False)
    >>> nl_dend = nl.prune_distal_to('axon', inplace=False)
    >>> # Get a list of the downstream partners
    >>> cn_table = pymaid.get_partners(nl)
    >>> ds_partners = cn_table[ cn_table.relation == 'downstream' ]
    >>> # Take the top 10 downstream partners
    >>> top_ds = ds_partners.iloc[:10].skeleton_id.values
    >>> # Generate separate adjacency matrices for axon and dendrites
    >>> adj_axon = pymaid.adjacency_matrix(nl_axon, top_ds, use_connectors=True )
    >>> adj_dend = pymaid.adjacency_matrix(nl_dend, top_ds, use_connectors=True )
    >>> # Rename rows and merge dataframes
    >>> adj_axon.index += '_axon'
    >>> adj_dend.index += '_dendrite'
    >>> adj_merged = pd.concat([adj_axon, adj_dend], axis=0)
    >>> # Plot heatmap using seaborn
    >>> ax = sns.heatmap(adj_merged)
    >>> plt.show()

    """

    remote_instance = utils._eval_remote_instance(remote_instance)

    if t is None:
        t = s

    neuronsA = utils.eval_skids(s, remote_instance=remote_instance)
    neuronsB = utils.eval_skids(t, remote_instance=remote_instance)

    # Make sure neurons are  integers
    neurons = list(set([int(n) for n in (neuronsA + neuronsB)]))
    neuronsA = [int(n) for n in neuronsA]
    neuronsB = [int(n) for n in neuronsB]

    # Make sure neurons are unique
    neuronsA = sorted(set(neuronsA), key=neuronsA.index)
    neuronsB = sorted(set(neuronsB), key=neuronsB.index)

    logger.info('Retrieving and filtering connectivity...')

    if use_connectors and (
            isinstance(s, (core.CatmaidNeuron, core.CatmaidNeuronList))
            or isinstance(t, (core.CatmaidNeuron, core.CatmaidNeuronList))):
        edges = _edges_from_connectors(s, t, remote_instance=remote_instance)
    else:
        edges = fetch.get_edges(neurons, remote_instance=remote_instance)

    # Turn into a adjacency matrix
    matrix = edges.pivot(values='weight',
                         columns='target_skid',
                         index='source_skid').fillna(0)

    # Filter to actual sources and targets
    matrix = matrix.reindex(neuronsA, columns=neuronsB, fill_value=0)

    # Apply cutoff and threshold
    matrix = matrix.clip(upper=syn_cutoff)

    if syn_threshold:
        matrix[matrix < syn_threshold] = 0

    matrix.datatype = 'adjacency_matrix'

    if source_grp or target_grp:
        matrix = group_matrix(matrix,
                              source_grp,
                              target_grp,
                              drop_ungrouped=False)

    logger.info('Finished!')

    return matrix
Пример #10
0
def adjacency_from_connectors(source, target=None, remote_instance=None):
    """ Regenerates adjacency matrices from neurons' connectors.

    Notes
    -----
    This function creates an adjacency matrix from scratch using just the
    neurons' connectors. This function is able to deal with non-unique
    skeleton IDs (most other functions are not). Use it e.g. when you
    split neurons into multiple fragments.

    Parameters
    ----------
    source,target : skeleton IDs | CatmaidNeuron | CatmaidNeuronList
                    Neuron(s) for which to generate adjacency matrix.
                    If ``target==None``, will use ``target=source``.

    Returns
    -------
    pandas.DataFrame
            Matrix holding possible synaptic contacts. Sources are rows,
            targets are columns. Labels are skeleton IDs. Order is preserved.

            >>> df
                        target1  target2  target3  ...
                source1    5        1        0
                source2    10       20       5
                source3    4        3        15
                ...

    See Also
    --------
    :func:`~pymaid.adjacency_matrix`
            If you are working with "intact" neurons. Much faster!
    :func:`~pymaid.filter_connectivity`
            Use this function if you have only a single fragment per neuron
            (e.g. just the axon). Also way faster.

    Examples
    --------
    >>> # Fetch some neurons
    >>> x = pymaid.get_neuron('annotation:PD2a1/b1')
    >>> # Split into axon / dendrites
    >>> x.reroot(x.soma)
    >>> split = pymaid.split_axon_dendrite(x)
    >>> # Regenerate all-by-all adjacency matrix
    >>> adj = pymaid.adjacency_from_connectors(split)
    >>> # Skeleton IDs are non-unique but column/row order = input order:
    >>> # in this example, the first occurrence is axon, the second dendrites
    >>> adj.head()

    """

    remote_instance = utils._eval_remote_instance(remote_instance)

    if not isinstance(source, (core.CatmaidNeuron, core.CatmaidNeuronList)):
        skids = utils.eval_skids(source)
        source = fetch.get_neuron(skids, remote_instance=remote_instance)

    if isinstance(target, type(None)):
        target = source
    elif not isinstance(target, (core.CatmaidNeuron, core.CatmaidNeuronList)):
        skids = utils.eval_skids(target)
        target = fetch.get_neuron(skids, remote_instance=remote_instance)

    if isinstance(source, core.CatmaidNeuron):
        source = core.CatmaidNeuronList(source)

    if isinstance(target, core.CatmaidNeuron):
        target = core.CatmaidNeuronList(target)

    # Generate empty adjacency matrix
    adj = np.zeros((len(source), len(target)))

    # Get connector details for all neurons
    all_cn = list(
        set(
            np.append(source.connectors.connector_id.values,
                      target.connectors.connector_id.values)))
    cn_details = fetch.get_connector_details(all_cn)

    # Now go over all source neurons and process connections
    for i, s in enumerate(
            config.tqdm(source,
                        desc='Processing',
                        disable=config.pbar_hide,
                        leave=config.pbar_leave)):

        # Get all connectors presynaptic for this source
        this_cn = cn_details[
            (cn_details.presynaptic_to == int(s.skeleton_id))
            & (cn_details.connector_id.isin(s.connectors.connector_id))]

        # Go over all target neurons
        for k, t in enumerate(target):
            t_tn = set(t.nodes.treenode_id.values)
            t_post = t.postsynapses.connector_id.values

            # Extract number of connections from source to this target
            this_t = this_cn[this_cn.connector_id.isin(t_post)]

            # Now figure out how many links are between this connector and
            # the target
            n_links = sum([
                len(t_tn & set(r.postsynaptic_to_node))
                for r in this_t.itertuples()
            ])

            adj[i][k] = n_links

    return pd.DataFrame(adj,
                        index=source.skeleton_id,
                        columns=target.skeleton_id)
Пример #11
0
def network2nx(x, remote_instance=None, threshold=1):
    """ Generates NetworkX graph for neuron connectivity.

    Parameters
    ----------
    x
                        Catmaid Neurons as:
                         1. list of skeleton IDs (int or str)
                         2. list of neuron names (str, exact match)
                         3. annotation(s): e.g. 'annotation:PN right'
                         4. CatmaidNeuronList object
                         5. Adjacency matrix (pd.DataFrame, rows=sources,
                            columns=targets)
    remote_instance :   CATMAID instance, optional
                        Either pass directly to function or define globally
                        as 'remote_instance'.
    threshold :         int, optional
                        Connections weaker than this will be excluded.

    Returns
    -------
    networkx.DiGraph
                        NetworkX representation of the network.

    Examples
    --------
    >>> import matplotlib.pyplot as plt
    >>> import networkx as nx
    >>> import numpy as np
    >>> g = pymaid.network2graph('annotation:large network')
    >>> # Plot with default settings
    >>> nx.draw(g)
    >>> plt.show()
    >>> # Plot with neuron names
    >>> labels = nx.get_node_attributes(g, 'neuron_name')
    >>> nx.draw(g, labels=labels, with_labels=True)
    >>> plt.show()
    >>> # Plot with layout
    >>> layout = nx.circular_layout(g)
    >>> nx.draw(g, pos=layout)
    >>> plt.show()
    >>> # Plot with edge weights
    >>> nx.draw_networkx_nodes(g, pos=layout)
    >>> weight = np.array(list(nx.get_edge_attributes(g, 'weight').values()))
    >>> nx.draw_networkx_edges(g, pos=layout, width=weight/50)
    >>> plt.show()

    """

    if isinstance(x, (core.CatmaidNeuronList, list, np.ndarray, str)):
        remote_instance = utils._eval_remote_instance(remote_instance)
        skids = utils.eval_skids(x, remote_instance=remote_instance)

        # Fetch edges
        edges = fetch.get_edges(skids, remote_instance=remote_instance)
        # Reformat into networkx format
        edges = [[
            str(e.source_skid),
            str(e.target_skid), {
                'weight': e.weight
            }
        ] for e in edges[edges.weight >= threshold].itertuples()]
    elif isinstance(x, pd.DataFrame):
        # We have to account for the fact that some might not be skids
        skids = []
        for s in list(set(x.columns.tolist() + x.index.tolist())):
            try:
                skids.append(int(s))
            except BaseException:
                pass
        # Generate edge list
        edges = [[str(s), str(t), {
            'weight': x.loc[s, t]
        }] for s in x.index.values for t in x.columns.values
                 if x.loc[s, t] >= threshold]
    else:
        raise ValueError('Unable to process data of type "{0}"'.format(
            type(x)))

    # Generate node dictionary
    names = fetch.get_names(skids, remote_instance=remote_instance)
    nodes = [[str(s), {'neuron_name': names.get(s, s)}] for s in skids]

    # Generate graph and assign custom properties
    g = nx.DiGraph()
    g.add_nodes_from(nodes)
    g.add_edges_from(edges)

    return g
Пример #12
0
def network2igraph(x, remote_instance=None, threshold=1):
    """ Generates iGraph graph for neuron connectivity. Requires iGraph to be
    installed.

    Parameters
    ----------
    x
                        Catmaid Neurons as:
                         1. list of skeleton IDs (int or str)
                         2. list of neuron names (str, exact match)
                         3. annotation(s): e.g. 'annotation:PN right'
                         4. CatmaidNeuronList object
                         5. Adjacency matrix (pd.DataFrame, rows=sources,
                            columns=targets)
    remote_instance :   CATMAID instance, optional
                        Either pass directly to function or define globally
                        as 'remote_instance'.
    threshold :         int, optional
                        Connections weaker than this will be excluded .

    Returns
    -------
    igraph.Graph(directed=True)
                        NetworkX representation of the network.

    Examples
    --------
    >>> import pymaid
    >>> import igraph
    >>> g = pymaid.network2igraph('annotation:large network', remote_instance=rm)
    >>> # Plot graph
    >>> igraph.plot(g)
    >>> # Plot with edge width
    >>> igraph.plot(g, **{'edge_width': [ w/10 for w in g.es['weight'] ] })
    >>> # Plot with edge label
    >>> igraph.plot(g, **{'edge_label': g.es['weight'] })
    >>> # Save as graphml to import into e.g. Cytoscape
    >>> g.save('graph.graphml')

    """
    if igraph is None:
        raise ImportError('igraph must be installed to use this function.')

    if isinstance(x, (core.CatmaidNeuronList, list, np.ndarray, str)):
        remote_instance = utils._eval_remote_instance(remote_instance)
        skids = utils.eval_skids(x, remote_instance=remote_instance)

        indices = {int(s): i for i, s in enumerate(skids)}

        # Fetch edges
        edges = fetch.get_edges(skids, remote_instance=remote_instance)

        # Reformat into igraph format
        edges_by_index = [[
            indices[e.source_skid], indices[e.target_skid]
        ] for e in edges[edges.weight >= threshold].itertuples()]
        weight = edges[edges.weight >= threshold].weight.tolist()
    elif isinstance(x, pd.DataFrame):
        skids = list(set(x.columns.tolist() + x.index.tolist()))
        # Generate edge list
        edges = [[i, j] for i in x.index.tolist() for j in x.columns.tolist()
                 if x.loc[i, j] >= threshold]
        edges_by_index = [[skids.index(e[0]),
                           skids.index(e[1])] for e in edges]
        weight = [
            x.loc[i, j] for i in range(x.shape[0]) for j in range(x.shape[1])
            if x.loc[i, j] >= threshold
        ]
    else:
        raise ValueError('Unable to process data of type "{0}"'.format(
            type(x)))

    # Generate igraph and assign custom properties
    g = igraph.Graph(directed=True)
    g.add_vertices(len(skids))
    g.add_edges(edges_by_index)

    g.vs['node_id'] = skids
    # g.vs['neuron_name'] = g.vs['label'] = neuron_names
    g.es['weight'] = weight

    return g
Пример #13
0
def take_snapshot(x,
                  skeleton_data=True,
                  cn_table=False,
                  node_details=False,
                  adjacency_matrix=True,
                  remote_instance=None,
                  cn_details=True,
                  annotations=False):
    """ Take a snapshot of CATMAID data associated with a set of neurons.

    Important
    ---------
    If you pass Catmaidneuron/List that have been modified (e.g. pruned),
    other data (e.g. connectivity, etc) will be subset as well if applicable.
    If your CatmaidNeuron/List is still naive, you might want to just pass the
    skeleton ID(s) to speed things up.

    Parameters
    ----------
    x :                 skeleton IDs | CatmaidNeuron/List
                        Neurons for which to retrieve data.
    skeleton_data :     bool, optional
                        Include 3D skeleton data.
    cn_table :          bool, optional
                        Include connectivity table. Covers all neurons
                        connected to input neurons.
    node_details :      bool, optional
                        Include treenode and connector details.
    adjacency_matrix :  bool, optional
                        Include adjacency matrix covering the input neurons.
    cn_details :        bool, optional
                        Include connector details.
    annotations :       bool, optional
                        Include neuron annotations.
    remote_instance :   Catmaid Instance, optional
                        Either pass explicitly or define globally. Will
                        obviously not be added to the snapshot!

    Returns
    -------
    pandas Series

    Examples
    --------

    See Also
    --------
    :func:`~pymaid.load_snapshot`
            Use to load a snapshot file.
    """

    remote_instance = utils._eval_remote_instance(remote_instance)

    skids = utils.eval_skids(x, remote_instance)

    snapshot = pd.Series()

    # Add Coordinates Universal Time
    snapshot['utc_date'] = datetime.datetime.utcnow()

    # Add pymaid version
    snapshot['pymaid_version'] = init.__version__

    # Add skeleton data
    if skeleton_data:
        if not isinstance(x, (core.CatmaidNeuronList, core.CatmaidNeuron)):
            skdata = fetch.get_neurons(skids, remote_instance=remote_instance)
        else:
            skdata = x

        if isinstance(skdata, core.CatmaidNeuron):
            skdata = core.CatmaidNeuronList(skdata)

        snapshot['skeleton_data'] = skdata.to_dataframe()

    # Add connectivity table
    if cn_table:
        if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
            snapshot['cn_table'] = connectivity.cn_table_from_connectors(
                x, remote_instance=remote_instance)
        else:
            # Add connectivity table
            snapshot['cn_table'] = fetch.get_partners(
                x, remote_instance=remote_instance)

    # Add connectivity table
    if node_details:
        snapshot['treenode_details'] = fetch.get_node_details(
            skdata.nodes.treenode_id.values, remote_instance=remote_instance)
        snapshot['connector_details'] = fetch.get_node_details(
            skdata.connectors.connector_id.values,
            remote_instance=remote_instance)

    # Add adjacency matrix
    if adjacency_matrix:
        if isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)):
            snapshot[
                'adjacency_matrix'] = connectivity.adjacency_from_connectors(
                    x, remote_instance=remote_instance)
        else:
            # Add connectivity table
            snapshot['adjacency_matrix'] = connectivity.adjacency_matrix(
                x, remote_instance=remote_instance)

    # Add annotations
    if annotations:
        snapshot['annotations'] = fetch.get_annotations(skids)

    # Add connector details
    if cn_details:
        snapshot['cn_details'] = fetch.get_connector_details(skdata)

    return snapshot
Пример #14
0
def watch_network(x,
                  sleep=3,
                  n_circles=1,
                  min_pre=2,
                  min_post=2,
                  layout=None,
                  remote_instance=None,
                  verbose=True):
    """ Loads and **continuously updates** a network into Cytoscape. Use
    CTRL-C to stop.

    Parameters
    ----------
    x :                 skeleton IDs | CatmaidNeuron/List
                        Seed neurons to keep track of.
    sleep :             int | None, optional
                        Time in seconds to sleep after each update.
    n_circles :         int, optional
                        Number of circles around seed neurons to include in
                        the network. See also :func:`pymaid.get_nth_partners`.
                        Set to ``None | 0 | False`` to only update
                        seed nodes.
    min_pre/min_post :  int, optional
                        Synapse threshold to apply to ``n_circles``.
                        Set to -1 to not get any pre-/post synaptic partners.
                        Please note: as long as there is a single
                        above-threshold connection, a neuron will be included.
                        This does not remove other, sub-threshold connections.
    layout :            str | None, optional
                        Name of a Cytoscape layout. If provided, will update
                        the network's layout on every change.
    remote_instance :   CatmaidInstance, optional
    verbose :           bool, optional
                        If True, will log changes made to the network.

    Returns
    -------
    Nothing

    Examples
    --------
    >>> import pymaid
    >>> import pymaid.cytoscape as cytomaid
    >>> rm = pymaid.CatmaidInstance('server_url', 'http_user',
    ...                             'http_pw', 'auth_token')
    >>> # Don't forget to start Cytoscape!
    >>> cytomaid.watch_network('annotation:glomerulus DA1', min_pre=5,
    ...                         min_post=-1, sleep=5)
    >>> # Use CTRL-C to stop the loop
    """

    cy = get_client()

    remote_instance = utils._eval_remote_instance(remote_instance)

    sleep = 0 if not sleep else sleep

    x = utils.eval_skids(x, remote_instance=remote_instance)

    # Generate the initial network
    if n_circles:
        to_add = fetch.get_nth_partners(
            x,
            n_circles=n_circles,
            min_pre=min_pre,
            min_post=min_post,
            remote_instance=remote_instance).skeleton_id
    else:
        to_add = []
    g = graph.network2nx(np.concatenate([x, to_add]).astype(int),
                         remote_instance=remote_instance)
    network = generate_network(g,
                               clear_session=True,
                               apply_style=False,
                               layout=layout)

    if layout:
        cy.layout.apply(name=layout, network=network)

    logger.info('Watching network. Use CTRL-C to stop.')
    if remote_instance.caching:
        logger.warning('Caching disabled.')
        remote_instance.caching = False
    utils.set_loggers('WARNING')
    while True:
        if n_circles:
            to_add = fetch.get_nth_partners(
                x,
                n_circles=n_circles,
                min_pre=min_pre,
                min_post=min_post,
                remote_instance=remote_instance).skeleton_id
        else:
            to_add = []

        g = graph.network2nx(np.concatenate([x, to_add]).astype(int),
                             remote_instance=remote_instance)

        # Add nodes that came in new
        ntable = network.get_node_table()
        nodes_to_add = [s for s in g.nodes if s not in ntable.id.values]
        if nodes_to_add:
            network.add_nodes(nodes_to_add)

        # Update neuron names
        ntable = network.get_node_table()
        names = ntable.set_index('name').neuron_name.to_dict()
        names.update({s: g.nodes[s]['neuron_name'] for s in g.nodes})
        ntable['id'] = ntable.name
        ntable['neuron_name'] = ntable.name.map(names)
        network.update_node_table(ntable,
                                  data_key_col='name',
                                  network_key_col='name')

        # Remove nodes that do not exist anymore
        ntable = network.get_node_table()
        nodes_to_remove = ntable[~ntable['id'].isin(g.nodes)]
        if not nodes_to_remove.empty:
            for v in nodes_to_remove.SUID.values:
                network.delete_node(v)

        # Remove edges
        etable = network.get_edge_table()
        edges_removed = 0
        for e in etable.itertuples():
            if (e.source, e.target) not in g.edges:
                edges_removed += 1
                network.delete_edge(e.SUID)

        # Add edges
        etable = network.get_edge_table()
        edges = [(s, t)
                 for s, t in zip(etable.source.values, etable.target.values)]
        skid_to_SUID = ntable.set_index('name').SUID.to_dict()
        edges_to_add = []
        for e in set(g.edges) - set(edges):
            edges_to_add.append({
                'source': skid_to_SUID[e[0]],
                'target': skid_to_SUID[e[1]],
                'interaction': None,
                'directed': True
            })
        if edges_to_add:
            network.add_edges(edges_to_add)

        # Fix table and modify weights if applicable
        etable = network.get_edge_table()
        if not etable.loc[etable.source.isnull()].empty:
            etable.loc[etable.source.isnull(), 'source'] = etable.loc[
                etable.source.isnull(),
                'name'].map(lambda x: x[:x.index('(') - 1])
            etable.loc[etable.target.isnull(), 'target'] = etable.loc[
                etable.target.isnull(),
                'name'].map(lambda x: x[x.index(')') + 2:])
        new_weights = [
            g.edges[e]['weight'] for e in etable[['source', 'target']].values
        ]
        weights_modified = [
            new_w for new_w, old_w in zip(new_weights, etable.weight.values)
            if new_w != old_w
        ]
        etable['weight'] = new_weights
        # For some reason, there os no official wrapper for this, so we have to get our hands dirty
        network._CyNetwork__update_table('edge',
                                         etable,
                                         network_key_col='SUID',
                                         data_key_col='SUID')

        # If changes were made, give some feedback and/or change layout
        if nodes_to_add or not nodes_to_remove.empty or edges_to_add or edges_removed or weights_modified:
            if verbose:
                logger.info(
                    '{} - nodes added/removed: {}/{}; edges added/removed/modified {}/{}/{}'
                    .format(
                        datetime.datetime.now(),
                        len(nodes_to_add),
                        len(nodes_to_remove),
                        len(edges_to_add),
                        edges_removed,
                        len(weights_modified),
                    ))

            if layout:
                cy.layout.apply(name=layout, network=network)

        # ZzzZzzzZ
        time.sleep(sleep)