return catmaid.algorithms.population.distance.near_path_length(
            nd,
            na,
            g1=nd.dendrites,
            g2=na.dendrites,
            distance=distance,
            resample_distance=resample_distance)
    except Exception as e:
        return e


n_jobs = -1
output_file = '../../results/scripts/dendrite_near_path_lengths_%i_%i.csv' % (
    int(distance), int(resample_distance))

s = catmaid.get_source('../../data/skeletons')
s._cache = None
pairs = []
if os.path.exists(pairs_fn):
    print("Loading pairs from csv: %s" % pairs_fn)
    with open(pairs_fn, 'r') as f:
        for l in f:
            if l.strip() != '':
                pairs.append(map(int, l.strip().split(',')))
else:
    raise Exception
    sids = []
    with open('skels2pull.csv', 'r') as f:
        for l in f:
            if l.strip() != '':
                sids.append(l.strip())
Пример #2
0
    'soma',
    'myelinated',
    'unmyelinated',
    'root_myelinated',
    'root_unmyelinated',
    'ends',
    'damage',
    'axon',
    'projection',
    'backbone',
]

# connect to catmaid and get the source from the connection
# default to using environmental variables
c = catmaid.connect()
src = catmaid.get_source(c, ignore_none_skeletons=True)

# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

# creates two lists, one for error messages and one for url links
master_errors = []
master_urls = []

root_errors, root_urls = [], []
leaf_tag_errors, leaf_tag_urls = [], []
loop_errors, loop_urls = [], []
incorrect_tag_errors, incorrect_tag_urls = [], []
max_distance_errors, max_distance_urls = [], []
min_distance_errors, min_distance_urls = [], []
Пример #3
0
import catmaid
import numpy
import pickle
from time import strftime
import sys

filesource = '../../data/skeletons/'
# TODO transformfile should be set as environmental variable
#      and an error thrown if not set
transformfile = './physical_section_affine_4x4.p'
resXYZ = numpy.array([18.85, 18.85, 60.])
src = catmaid.get_source(filesource)

try:
    outfile = str(sys.argv[1])
except:
    outfile = '../../results/exports/{}_130201zf142_' \
              'ALLNODE_dump_RAW.txt'.format(strftime('%y%m%dT%H%M'))

with open(transformfile, 'r') as f:
    transforms = pickle.load(f)

with open(outfile, 'w') as f:
    for sid in src.skeleton_ids():
        n = src.get_neuron(sid)
        #if any([('blacklist' in anno) for anno in n.annotations]):
        #    continue
        for nid in n.nodes:
            if nid == n.root:
                parent = 'root'
            else:
Пример #4
0
    help="Max number of attempts to fetch skeletons in case error encountered")
parser.add_argument(
    '-n',
    '--ignore_none_skeletons',
    action='store_true',
    help="Option to ignore fetching invalid skeletons with type None, "
    "such as those which have been deleted or merged "
    "since starting fetch.")
opts = parser.parse_args()

# create a skeleton source (which connects to catmaid)
# the server, user and project to use can be set by
# - environment variables (see README)
# - interactive command prompts
# - creating and passing in a connection (see catmaid.connect)
source = catmaid.get_source(ignore_none_skeletons=opts.ignore_none_skeletons)

# source is now a ServerSource that can fetch skeletons from catmaid

# check if only a subset of ids should be saved
sids = opts.sids
if opts.idfile is not None:
    sids = []  # overwrite any ids provided on the command line
    ext = os.path.splitext(opts.idfile)[1].lower()
    if ext in ('.p', '.pickle', '.pkl'):
        # assume these are pickled iterators
        import cPickle as pickle
        with open(opts.idfile, 'r') as f:
            sids = pickle.load(f)
    elif ext in ('.json', '.js'):
        import json
Пример #5
0
def collect_project_data(source, conn=None, skels_list=None, except_anno=None):
    if except_anno is not None:
        print "Excluding all skeletons with annotation(s): {}".format(
            except_anno)
    print "Collecting Project Data..."
    all_data = {}
    numskels = 0
    somareconstructed = 0
    max_somapathlengths = {}
    min_somapathlengths = {}
    mean_somapathlengths = {}
    median_somapathlengths = {}
    max_leafpathlengths = {}
    min_leafpathlengths = {}
    mean_leafpathlengths = {}
    median_leafpathlengths = {}
    hassoma = {}
    totalpaths = {}
    allsomapathlengths, allleafpathlengths = [], []
    s = catmaid.get_source(source)
    if skels_list is not None:
        skels = skels_list
    else:
        skels = s.skeleton_ids()
    for sid in skels:
        # Create empty variable for skipping a skel with x annotation
        skip_skel = 0
        if type(sid) == list:
            sid = sid[0]
        n = s.get_neuron(sid)
        if except_anno is not None:
            if len(except_anno) > 1:
                for e in except_anno:
                    if any(a[0] == e for a in n.annotations):
                        skip_skel += 1
            else:
                if any(a[0] == except_anno[0] for a in n.annotations):
                    continue
        # Skip this skeleton if it has any excluded annotations
        if skip_skel != 0:
            continue
        totalpaths[sid] = total_pathlength(n)
        if len(n.nodes.keys()) > nnodethres:
            numskels += 1
        if 'soma' in n.tags:
            hassoma[sid] = 1
        else:
            hassoma[sid] = 0
        if len(n.nodes) > 1:
            rootpathlengths = [path_length(n, n.root, leaf)
                               for leaf in n.leaves]
            if len(n.leaves) > 1:
                leafpathlengths = [path_length(n, leaf1, leaf2)
                                   for leaf1, leaf2 in combinations(
                                    n.leaves, 2)]
            else:
                leafpathlengths = rootpathlengths

            if hassoma[sid]:
                somareconstructed += 1
                max_somapathlengths[sid] = max(rootpathlengths)
                min_somapathlengths[sid] = min(rootpathlengths)
                mean_somapathlengths[sid] = numpy.mean(rootpathlengths)
                median_somapathlengths[sid] = numpy.median(rootpathlengths)
                allsomapathlengths.extend(rootpathlengths)
            allleafpathlengths.extend(leafpathlengths)
            max_leafpathlengths[sid] = max(leafpathlengths)
            min_leafpathlengths[sid] = min(leafpathlengths)
            mean_leafpathlengths[sid] = numpy.mean(leafpathlengths)
            median_leafpathlengths[sid] = numpy.median(leafpathlengths)
    project_pathlength = sum(totalpaths.values())
    max_pathlength = max(totalpaths.values())
    mean_pathlength = numpy.mean(totalpaths.values())
    min_pathlength = min(totalpaths.values())
    median_pathlength = numpy.median(totalpaths.values())
    project_somata = somareconstructed
    somapercent = float(project_somata)/float(numskels)
    projdata = {'length traced': project_pathlength,
                'number of somata': project_somata,
                'number of reconstructions': numskels,
                'percent with soma': somapercent,
                'max length traced': max_pathlength,
                'min length traced': min_pathlength,
                'mean length traced': mean_pathlength,
                'median length traced': median_pathlength,
                'soma to leaf max': max(allsomapathlengths),
                'soma to leaf min': min(allsomapathlengths),
                'soma to leaf median': numpy.median(allsomapathlengths),
                'soma to leaf mean': numpy.mean(allsomapathlengths),
                'total pathlength of neurons with a soma': sum(allsomapathlengths),
                'leaf to leaf max': max(allleafpathlengths),
                'leaf to leaf min': min(allleafpathlengths),
                'leaf to leaf median': numpy.median(allleafpathlengths),
                'leaf to leaf mean': numpy.mean(allleafpathlengths)}
    all_data['max_somapathlengths'] = max_somapathlengths
    all_data['min_somapathlengths'] = min_somapathlengths
    all_data['mean_somapathlengths'] = mean_somapathlengths
    all_data['median_somapathlengths'] = median_somapathlengths
    all_data['max_leafpathlengths'] = max_leafpathlengths
    all_data['min_leafpathlengths'] = min_leafpathlengths
    all_data['mean_leafpathlengths'] = mean_leafpathlengths
    all_data['median_leafpathlengths'] = median_leafpathlengths
    all_data['hassoma'] = hassoma
    all_data['totalpaths'] = totalpaths
    all_data['allsomapathlengths'] = allsomapathlengths
    all_data['allleafpathlengths'] = allleafpathlengths
    all_data['project_pathlength'] = project_pathlength
    all_data['somapercent'] = somapercent
    all_data['projdata'] = projdata
    print "Project Data Collected"
    return all_data
Пример #6
0
#!/usr/bin/env python

import catmaid

src = catmaid.get_source('../../data/skeletons', cache=False)

allsids, PLLnsids, Dnmsids, Onmsids, nucMLFsids, Mauthsids, Anmsids = ([], [],
                                                                       [], [],
                                                                       [], [],
                                                                       [])
for sid in src.skeleton_ids():
    neu = src.get_neuron(sid)
    if not any([('Blacklist' in anno) for anno in neu.annotations]):
        allsids.append(neu.skeleton_id)

        # FIXME probably a better way to handle these
        # generate lists of mutually exclusive cell descriptions
        if any([('PLLn' in anno) for anno in neu.annotations]):
            PLLnsids.append(neu.skeleton_id)
        elif any([('Dorsal Neuromast' in anno) for anno in neu.annotations]):
            Dnmsids.append(neu.skeleton_id)
        elif any([('Anterior Neuromast' in anno) for anno in neu.annotations]):
            Anmsids.append(neu.skeleton_id)
        elif any([('Occipital Neuromast' in anno)
                  for anno in neu.annotations]):
            Onmsids.append(neu.skeleton_id)
        elif any([('nucMLF' in anno) for anno in neu.annotations]):
            nucMLFsids.append(neu.skeleton_id)
        elif any([('Mauthner' in anno) for anno in neu.annotations]):
            Mauthsids.append(neu.skeleton_id)
import catmaid
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import csv

csv_w = csv.writer(open('ax.csv', 'wb'))
csv_2 = csv.writer(open('den.csv', 'wb'))
c = catmaid.connection.Connection('http://catmaid.hms.harvard.edu',
                                  'thomas.lo', 'asdfjkl;', 9)
src = catmaid.get_source(c)

n1 = src.get_neuron(325123)
n2 = src.get_neuron(38321)
g1 = catmaid.algorithms.morphology.resample_edges_2(n1, 400., ['axon'])
g2 = catmaid.algorithms.morphology.resample_edges_2(n2, 400., ['dendrite'])
t_s = catmaid.algorithms.population.synapses.skeleton_overlap_v_verbose_ids(
    n1, n2, 1000., 10000., g1, g2)

fig = plt.figure()
ax = fig.gca(projection='3d')
min_r = float(t_s[0][2])
max_r = min_r
sum_r = 0.0
for (_, _, r) in t_s:
    min_r = min(min_r, float(r))
    max_r = max(max_r, float(r))
    sum_r += float(r)
print("With Resampling: SUM:{} MIN:{} MAX:{}".format(sum_r, min_r, max_r))
for (a, b, r) in t_s:
    [x1, y1, z1] = [n1.nodes[a][i] for i in ['x', 'y', 'z']]
Пример #8
0
        myFile.write('<html>')
        myFile.close()


def fetch_and_check(source):
    '''fetch the skeletons to the computer, and check each skeleton'''
    sids = fetch_skeletons_pc(source)
    problemSkels = check_skeleton_list(sids, source)
    return problemSkels


def problem_to_skels(problemSkels):
    '''return a dict, key is a problem type, value is a list of skel_ids'''
    problem_to_skels_dict = {}
    for skel in problemSkels.keys():
        problems = problemSkels[skel]
        for key in problems.keys():
            if key in problem_to_skels_dict.keys():
                problem_to_skels_dict[key].append(skel)
            else:
                problem_to_skels_dict[key] = []
                problem_to_skels_dict[key].append(skel)
    return problem_to_skels_dict


if __name__ == '__main__':
    c = connect_to_catmaid()
    src = catmaid.get_source(c, opts.cache)
    problem_viewer(src)
Пример #9
0
import catmaid
#from mpl_toolkits.mplot3d import Axes3D
import csv
import numpy as np




c_a = csv.writer(open('ax.csv','wb'))
c_d = csv.writer(open('den.csv','wb'))
c=catmaid.connection.Connection('http://catmaid.hms.harvard.edu','thomas.lo','asdfjkl;',9)
src = catmaid.get_source(c)

n1 = src.get_neuron(325123)
n2 = src.get_neuron(38321)
g1 = catmaid.algorithms.morphology.resample_edges_2(n1,400.,['axon'])
g2 = catmaid.algorithms.morphology.resample_edges_2(n2,400.,['dendrite'])
t_s = catmaid.algorithms.population.synapses.skeleton_overlap_v_verbose_ids(n1,n2,1000.,10000.,g1,g2)


min_r = float(t_s[0][2])
max_r = min_r
sum_r = 0.0
for (_,_,r) in t_s:
    min_r = min(min_r,float(r))
    max_r = max(max_r,float(r))
    sum_r += float(r)
print("With Resampling: SUM:{} MIN:{} MAX:{}".format(sum_r,min_r,max_r))

for (u,k,r) in t_s:
    xyz = []
Пример #10
0
import argparse
import catmaid
import catmaid.algorithms.images as IM
import os
import scipy.misc
import urllib2

conn = catmaid.connect()
s = catmaid.get_source(conn)


def gen_images(connection, zrange, center, outdir, imgshape=(1024, 1024)):
    if not isinstance(zrange, tuple):
        raise Exception("Must pass in a tuple for the image range!")
    if not isinstance(center, tuple):
        raise Exception("Must pass in a tuple for the center position")
    broken_slices = [
        int(a) for a in conn.stack_info()[6][u'broken_slices'].keys()
    ]
    for z in range(zrange[0], zrange[1]):
        if z not in broken_slices:
            print "Outputting image for z: {}".format(z)
            fn = '{}_sub.png'.format(str(int(z)).zfill(5))
            try:
                image, no_overlay = IM.img_from_catmaid(connection,
                                                        center[0],
                                                        center[1],
                                                        int(z),
                                                        imgshape=imgshape,
                                                        stack_id=6,
                                                        tiletype=4,
Пример #11
0
    if opts.name is None:
        opts.name = os.path.splitext(opts.input)[1]
    bn = None
else:  # assuming opts.input is a list of skeleton ids all 1 group
    logging.debug("Loading input from string: %s", opts.input)
    groups = [opts.input.split(',')]
    bn = None
logging.debug("Found %s groups", len(groups))

all_sids = []
for g in groups:
    all_sids += g

# prepare source
logging.debug("Getting source: %s", opts.source)
s = catmaid.get_source(opts.source)
logging.debug("Preparing source: %s", s)
# TODO figure out a way to check the source to see if conversion is needed
tree_dir, conn_fn, fails = catmaid.rendering.render.prepare_source(s, all_sids)
#tree_dir = opts.source + '/trees'
#conn_fn = opts.source + '/trees/conns.p'
#fails = None
logging.debug("Trees are in %s, conn file is %s", tree_dir, conn_fn)
if opts.conns is not None:
    # TODO figure out what to do here
    assert opts.conns == conn_fn

# make general kwargs
kwargs = {
    'conns_fn': conn_fn,
    'attrs_fn': full_path(opts.attrs),
Пример #12
0
                     '--threads',
                     type=int,
                     required=False,
                     help="The number of threads to use for smoothing")
 opts = parser.parse_args()
 if opts.source:
     indir = opts.source
 else:
     indir = None
 if opts.dest:
     outdir = opts.dest
 else:
     print "Outdir not passed through. Reverting to default"
     outdir = outdir
 print "Connecting to source"
 s = catmaid.get_source(indir)
 if outdir[-1] != '/':
     outdir += '/'
 if opts.threads:
     cpu_count = mp.cpu_count()
     if opts.threads > cpu_count:
         cpu_count = int(cpu_count / 2)
     else:
         cpu_count = opts.threads
 else:
     cpu_count = int(mp.cpu_count() / 2)
 if cpu_count < 0:
     cpu_count = 1
 print "Creating pool operator with {} CPUS".format(cpu_count)
 pool = mp.Pool(processes=cpu_count)
 print "Creating Smoothing Object"
Пример #13
0
import catmaid
import scipy.io
import numpy
from httplib import BadStatusLine

# First setup a new Source
src = catmaid.get_source(cache=False)

print 'Pulling Adj and Skeletons from source'
adj, skels = src.get_graph()
scipy.io.savemat('adjacency.mat', mdict={'adjacency': adj})
scipy.io.savemat('skeletons.mat', mdict={'skeletons': skels})

print 'Pulling ApicalList and EMidOriRGBSpeed list from file'
fn_a = 'ApSkList'
fn_e = 'EMidOriRGBneuronIDSFTFspeed'
ApicalList = scipy.io.loadmat(fn_a+'.mat')[fn_a].astype(int)
EMidOriSpd = scipy.io.loadmat(fn_e+'.mat')[fn_e]

c = src._skel_source
somalist = []
COMList = []
print 'Creating SomaList and COMList'
for x in skels:
    st = str(x)
    print st
    try:
        urltemp = 'http://catmaid.hms.harvard.edu/9/skeleton/' +\
                  st + '/get-root'
        jsonA = c.fetchJSON(urltemp)
        rid = jsonA['root_id']
    sep = os.sep
    base_dir = "Z:" + sep + "Data" + sep + "simulation" + sep + "tracing"
    skeleton_dir = os.path.join(base_dir, 'skeletons')

    # Make our base directory
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)

    # Make a directory for our skeletons, nested one deep in our base directory.
    # We'll be saving individual skeletons as JSON files here.
    if not os.path.exists(skeleton_dir):
        os.mkdir(skeleton_dir)

    # Get the connection to our Postgre database
    thesource = catmaid.get_source()

    # Save out all the annotations for our skeletons
    annotations_dict = thesource._skel_source.fetchJSON(
        'http://catmaid2.hms.harvard.edu/6/neuron/table/query-by-annotations')
    with open(os.path.join(base_dir, 'annotations.json'), 'w') as f:
        json.dump(annotations_dict, f)

    # Pull connectors for all neurons in the project and save them to a json file called connectors
    connectors = catmaid.algorithms.population.network.find_conns(
        thesource.all_neurons_iter())
    with open(os.path.join(base_dir, 'connectors.json'), 'w') as f:
        json.dump(connectors, f)

    # For every skeleton, get a dictionary representation, and save it to a JSON file
    # in our skeletons directory