def test_fetch_adjacencies(client):
    bodies = [
        294792184, 329566174, 329599710, 417199910, 420274150, 424379864,
        425790257, 451982486, 480927537, 481268653
    ]
    neuron_df, roi_conn_df = fetch_adjacencies(NC(bodyId=bodies),
                                               NC(bodyId=bodies))

    # Should not include non-primary ROIs (except 'NotPrimary')
    assert not ({*roi_conn_df['roi'].unique()} - {*fetch_primary_rois()} -
                {'NotPrimary'})

    #
    # For backwards compatibility with the previous API,
    # You can also pass a list of bodyIds to this function (instead of NeuronCriteria).
    #
    bodies = [
        294792184, 329566174, 329599710, 417199910, 420274150, 424379864,
        425790257, 451982486, 480927537, 481268653
    ]
    neuron_df2, roi_conn_df2 = fetch_adjacencies(bodies, bodies)

    # Should not include non-primary ROIs (except 'NotPrimary')
    assert not ({*roi_conn_df2['roi'].unique()} - {*fetch_primary_rois()} -
                {'NotPrimary'})

    assert (neuron_df.fillna('') == neuron_df2.fillna('')).all().all()
    assert (roi_conn_df == roi_conn_df2).all().all()

    # What happens if results are empty
    neuron_df, roi_conn_df = fetch_adjacencies(879442155, 5813027103)
    assert len(neuron_df) == 0
    assert len(roi_conn_df) == 0
    assert neuron_df.columns.tolist() == ['bodyId', 'instance', 'type']
def test_fetch_adjacencies(client):
    bodies = [
        294792184, 329566174, 329599710, 417199910, 420274150, 424379864,
        425790257, 451982486, 480927537, 481268653
    ]
    neuron_df, roi_conn_df = fetch_adjacencies(SC(bodyId=bodies),
                                               SC(bodyId=bodies))

    # Should not include non-primary ROIs (except 'NotPrimary')
    assert not ({*roi_conn_df['roi'].unique()} - {*fetch_primary_rois()} -
                {'NotPrimary'})

    #
    # For backwards compatibility with the previous API,
    # You can also pass a list of bodyIds to this function (instead of SegmentCriteria).
    #
    bodies = [
        294792184, 329566174, 329599710, 417199910, 420274150, 424379864,
        425790257, 451982486, 480927537, 481268653
    ]
    neuron_df2, roi_conn_df2 = fetch_adjacencies(bodies, bodies)

    # Should not include non-primary ROIs (except 'NotPrimary')
    assert not ({*roi_conn_df2['roi'].unique()} - {*fetch_primary_rois()} -
                {'NotPrimary'})

    assert (neuron_df.fillna('') == neuron_df2.fillna('')).all().all()
    assert (roi_conn_df == roi_conn_df2).all().all()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--neuprint-server', '-n', default='neuprint.janelia.org')
    parser.add_argument('--dataset', '-d')
    parser.add_argument('--init', '-i', choices=['groundtruth', 'random'])
    parser.add_argument('--verbose', '-v', action='store_true')
    parser.add_argument('--debug', action='store_true')
    parser.add_argument('--min-weight', '-w', default=10, type=int)
    args = parser.parse_args()

    c = Client(args.neuprint_server, args.dataset)
    export_dir = f"{c.dataset}-w{args.min_weight}-from-{args.init}"
    os.makedirs(export_dir, exist_ok=True)

    # Fetch connectome (and export)
    with Timer("Fetching/exporting connectome", logger):
        criteria = NC(status='Traced', cropped=False, client=c)
        neuron_df, roi_conn_df = fetch_adjacencies(criteria, criteria, min_total_weight=args.min_weight, export_dir=export_dir, properties=['type', 'instance'], client=c)
        conn_df = roi_conn_df.groupby(['bodyId_pre', 'bodyId_post'], as_index=False)['weight'].sum()
    
    strong_connections_df, g, nbs, partition_df = infer_hierarchy(neuron_df,
                                                                  conn_df,
                                                                  args.min_weight,
                                                                  args.init,
                                                                  args.verbose,
                                                                  args.debug)

    with Timer("Exporting inference results", logger):
        pickle.dump(g,                     open(f'{export_dir}/graph.pkl', 'wb'))
        pickle.dump(nbs,                   open(f'{export_dir}/nested-block-state.pkl', 'wb'))
        pickle.dump(partition_df,          open(f'{export_dir}/partition_df.pkl', 'wb'))
        pickle.dump(strong_connections_df, open(f'{export_dir}/strong_connections_df.pkl', 'wb'))

    logger.info("DONE")
Exemple #4
0
def pn_kc_connections(properties=None,
                      sum_across_rois=False,
                      checks=True,
                      **kwargs):
    """
    Returns `neuron_df`, `connection_df` as `neuprint.fetch_adjacencies`, but
    only for PN->KC connections.

    See also keyword arguments added by the `@fetch_function` decorator.

    Keywords not covered above are passed to `neuprint.fetch_adjacencies`.
    """
    if properties is None:
        # TODO TODO TODO replace hardcoded properties w/ something enumerated
        # using a cypher query
        properties = nc

    # TODO TODO TODO the docs make this sound like it might only return weights
    # for connections shared by ALL neurons matched. is this true? see also the
    # two kwargs dealing with minimum weights
    # TODO consider using rois=[<appropriate str for calyx>] to just match
    # those. compare results to those from manually filtering output not
    # specifying those rois.
    neuron_df, conn_df = nu.fetch_adjacencies(
        # TODO if i end up factoring some of this fn into something to be shared
        # across fns that get connections from one type to another, maybe just
        # test whether input strs have asterisk in them, and set regex=True if
        # so? first check if * is in any of the types that exist in the db!!!
        NC(type='.*PN.*', regex=True),
        NC(type='.*KC.*', regex=True),
        # Default is just ['type','instance']
        properties=properties,
        **kwargs)
    # TODO test the number of things returned above is the same as when doing
    # the equivalent CONTAINS query in the web interface
    # (also check just .* as suffix, not also prefix)

    if checks:
        # There CAN be (pre ID, post ID) duplicates, because weights are
        # reported per ROI, so need to sum across ROIs if we just want
        # the total weight between two neurons, ignoring where the synapses are.
        assert not conn_df.duplicated(
            [c for c in conn_df.columns if c != 'weight']).any()

        # TODO TODO technically only do this check if include_nonprimary=False
        # (so check kwargs) (if it's True, would need to do another check)
        assert (set(conn_df.roi.unique()) -
                set(nu.fetch_primary_rois()) == {'NotPrimary'})

    if sum_across_rois:
        conn_df = conn_df.groupby(['bodyId_pre',
                                   'bodyId_post']).weight.sum().reset_index()

    # No need to call filter_nontraced_or_cropped, at least as of 2020-05-31,
    # where it had no effect.

    return neuron_df, conn_df
Exemple #5
0
def fetch_adjacency(criteria=None,
                    prefix='noncropped_traced',
                    force_download=False,
                    neuprint=conf.enable_neuprint,
                    adjpath=None,
                    **kwargs):
    '''
    simple neuprint.fetch_adjacencies wrapper. 
    Checks whether datasets were already downloaded and loads them accordingly. 
    By default func loads traced and noncropped neurons.
    '''

    assert not force_download or neuprint, 'no neuprint; cannot download dataset'

    #compose adjpath
    if adjpath is None:
        if not isinstance(kwargs['rois'], list):
            kwargs['rois'] = [kwargs['rois']]

        datadir = conf.datasets_dir
        postfix = '_' + '.'.join(
            kwargs['rois']) if 'rois' in kwargs.keys() else ''
        adjpath = os.path.join(datadir, prefix + postfix)

    roipath = os.path.join(adjpath, conf.roi_connections_file)
    neurpath = os.path.join(adjpath, conf.neurons_file)

    files_exist = os.path.exists(adjpath) and os.path.exists(
        roipath) and os.path.exists(neurpath)
    assert files_exist or neuprint, 'no neuprint; cannot find dataset (and no way of fetching)'

    if neuprint:
        if 'client' not in kwargs.keys():
            kwargs['client'] = Client(conf.neuprint_URL, conf.dataset_version,
                                      conf.api_token)

        if criteria == None:
            criteria = NeuronCriteria(status='Traced',
                                      cropped=False,
                                      client=kwargs['client'])

    print('dataset in adjpath=', adjpath)

    if os.path.exists(adjpath) and os.path.exists(roipath) and os.path.exists(
            neurpath) and not force_download:
        print('dataset already downloaded')
        adj = pd.read_csv(roipath)
        neurons = pd.read_csv(neurpath)
    else:
        print('downloading dataset')
        print(criteria)
        neurons, adj = fetch_adjacencies(sources=criteria,
                                         targets=criteria,
                                         export_dir=adjpath,
                                         **kwargs)

    return neurons, adj
Exemple #6
0
 def get_adjacencies_neuprint(self,
                              accessions,
                              threshold=1,
                              testmode=False):  # add testmode?
     #fetch neuron-neuron connectivity for only between the accessions and only within PRIMARY rois and collapse rois to total
     neuron_df, conn_df = fetch_adjacencies(sources=accessions,
                                            targets=accessions)
     conn_df = conn_df.groupby(['bodyId_pre', 'bodyId_post'],
                               as_index=False)['weight'].sum()
     conn_df.rename(columns={
         'bodyId_pre': 'source',
         'bodyId_post': 'target'
     },
                    inplace=True)
     #filter by synapse count threshold (using total neuron-neuron connectivity in whole brain
     conn_df = conn_df[conn_df.weight > threshold]
     return conn_df
Exemple #7
0
def build_matrix(results):
    ntnstatus('Downloading structural information from Janelia')
    bids = list(results['bodyId'].values)  # Get bids of neurons to consider
    neuron_df, conn_df = fetch_adjacencies(
        bids, bids)  # Get all synapses between neurons, and their weight
    conn_df2 = conn_df.groupby(
        ['bodyId_pre', 'bodyId_post'], as_index=False)['weight'].sum(
        )  # Combine repeated edges and add their weights

    #Dictionary of bid to vertex id
    bodyIdtoId = {bids[i]: i for i in range(len(bids))}

    #Save results in coo format
    row = [bodyIdtoId[i] for i in conn_df2['bodyId_pre'].values]
    col = [bodyIdtoId[i] for i in conn_df2['bodyId_post'].values]
    data = [i for i in conn_df2['weight'].values]
    N = len(bids)
    mcoo = sparse.coo_matrix((data, (row, col)), shape=(N, N))
    np.save(root + "structure/drosophila_weighted.npy", mcoo)
    np.save(root + "structure/drosophila_bid2vertex.npy", bodyIdtoId)
Exemple #8
0
import sys
sys.path.append('..')

import fly

os.makedirs('../data', exist_ok=True)
os.makedirs('../processed', exist_ok=True)


roi = sys.argv[1]
print('Downloading', roi)
print()

criteria = NeuronCriteria(status="Traced", cropped=False)
fetch_adjacencies(criteria, criteria, export_dir='../data/noncropped_traced_{}'.format(roi), rois=[roi])


df = pd.read_csv('../data/noncropped_traced_{}/roi-connections.csv'.format(roi))

dfroi = df[df.roi==roi]
neurons = sorted(list(set(dfroi.bodyId_post.unique()).union(set(dfroi.bodyId_pre.unique()))))
N = len(neurons)

neuron_ids = np.zeros(N, dtype=int)

neuron = dict()
for i, nid in enumerate(neurons):
    neuron[nid] = i
    neuron_ids[i] = nid
Exemple #9
0
def fetch_adj_and_merge(criteria):
    default_criteria_for_test = NC(inputRois='AB(L)')
    neuron_df, conn_df = fetch_adjacencies(criteria, default_criteria_for_test)
    conn_df = merge_neuron_properties(neuron_df, conn_df, ['type', 'instance'])
    return conn_df
]])
print(roi_counts_df.query('bodyId==5813128308'))  # synapse counts for ROIs
# print(neuron_df.columns)
# neuron_df, roi_counts_df = fetch_neurons(NC(type='MBON.*', regex=True)) # get mushroom body output neurons

# ------------------------------------------
# Fetch connections
# ------------------------------------------
''' Find synaptic connection strengths between one set of neurons and another using fetch_adjacencies().
The “source” and/or “target” neurons are selected using NeuronCriteria. Additional parameters allow you
to filter by connection strength or ROI. Two DataFrames are returned, for properties of pre/post synaptic
neurons and per-ROI connection strengths (for each pre-post pair)
'''

# Example: Fetch all downstream connections FROM a set of neurons
neuron_df, conn_df = fetch_adjacencies(
    sources=[387023620, 387364605, 416642425], targets=None)

# Example: Fetch all upstream connections TO a set of neurons
neuron_df, conn_df = fetch_adjacencies(
    sources=None, targets=[387023620, 387364605, 416642425])

# Example: Fetch all direct connections between a set of upstream neurons and downstream neurons
neuron_df, conn_df = fetch_adjacencies(sources=NC(type='Delta.*', regex=True),
                                       targets=NC(type='PEN.*', regex=True))

# Print connections in descending order of strength
print(conn_df.sort_values('weight', ascending=False))

# ------------------------------------------
# Connection matrix
# ------------------------------------------
Exemple #11
0
def get_all_connections():
    connections_crit = NC()
    neuron_df, conn_df = fetch_adjacencies(connections_crit, None)
    print('got adjacencies!!!!!!!!!!!!!!!!!!!!!!')
    conn_df.to_csv('all_connections.csv', index=False)
    print('got csv file!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')