示例#1
0
def main_all_human():
    parser = argparse.ArgumentParser()
    parser.add_argument('output_dir', default='.')
    args = parser.parse_args()

    from allensdk.core.cell_types_cache import CellTypesCache
    from allensdk.api.queries.cell_types_api import CellTypesApi
    ctc = CellTypesCache(
        manifest_file=os.path.join(args.output_dir, "ctc", "manifest.json"))

    cells = ctc.get_cells(require_reconstruction=True,
                          species=[CellTypesApi.HUMAN])

    for cell in cells:
        morphology = fetch_aligned_morphology(specimen_id=cell['id'])

        cell_dir = os.path.join(args.output_dir, str(cell['id']))

        if not os.path.exists(cell_dir):
            os.makedirs(cell_dir)

        # swc_file = os.path.join(cell_dir, "recon.swc")
        ply_file = os.path.join(cell_dir, "recon.ply")
        vtk_file = os.path.join(cell_dir, "recon.vtk")

        tube_pd = vtkmorph.generate_mesh(morphology.compartment_index,
                                         morphology.root,
                                         color_by_type,
                                         6,
                                         radius=None)
        vtkmorph.write_ply(tube_pd, ply_file)
        vtkmorph.write_vtk(tube_pd, vtk_file)

        print(ply_file)
示例#2
0
class AllenMorphology(Paths):
    def __init__(self, *args, **kwargs):
        if not connected_to_internet():
            raise ConnectionError("You will need to be connected to the internet to use the AllenMorphology class")

        Paths.__init__(self, *args, **kwargs)

        # Create a Cache for the Cell Types Cache API
        self.ctc = CellTypesCache(manifest_file=os.path.join(self.morphology_allen, 'manifest.json'))

        # Get a list of cell metadata for neurons with reconstructions, download if necessary
        self.neurons = pd.DataFrame(self.ctc.get_cells(species=[CellTypesApi.MOUSE], require_reconstruction = True))
        self.n_neurons = len(self.neurons)
        if not self.n_neurons: raise ValueError("Something went wrong and couldn't get neurons metadata from Allen")

        self.downloaded_neurons = self.get_downloaded_neurons()

    def get_downloaded_neurons(self):
        return [os.path.join(self.morphology_allen, f) for f in os.listdir(self.morphology_allen) if ".swc" in f]    

    def download_neurons(self, ids):
        if isinstance(ids, np.ndarray):
            ids = list(ids)
        if not isinstance(ids, (list)): ids = [ids]

        for neuron_id in ids:
            neuron_file = os.path.join(self.morphology_allen, "{}.swc".format(neuron_id))
            neuron = self.ctc.get_reconstruction(neuron_id, file_name=neuron_file)
示例#3
0
def Main():
    filter_obj = {
        'dendrite_type': 'spiny',
        'structure_layer_name': '5',
        'structure_area_abbrev': 'VISp'
    }

    ctc = CellTypesCache()
    cells = ctc.get_cells(species=['Mus musculus'])

    cells_df = pd.DataFrame(cells)
    for filt_key, filt_val in filter_obj.items():
        cells_df = cells_df.loc[cells_df[filt_key] == filt_val, :]

    cell_ids = list(cells_df['id'].values)
    rc = Client(profile=os.getenv('IPYTHON_PROFILE'))
    logger.debug('Using ipyparallel with %d engines', len(rc))
    lview = rc.load_balanced_view()

    func = partial(get_fi_data, ctc)
    filter_fi_data = lview.map_sync(func, cell_ids)
    filter_fi_data = [data for data in filter_fi_data if data is not None]
    file_name = 'fi_data.pkl'
    with open(file_name, 'wb') as fh:
        pickle.dump(filter_fi_data, fh)
    plot_fi_data(filter_fi_data)

    rc.shutdown(hub=True)
示例#4
0
def download():
    ctc = CellTypesCache(manifest_file='ctc/manifest.json')
    cells = ctc.get_cells()

    for i, (cats, patches) in enumerate(sample_data_sets(cells, ctc, 100, 1000, 100, 4096)):
        print(cats.shape)
        np.save('patches/cats_%04d.npy' % i, cats)
        np.save('patches/patches_%04d.npy' % i, patches)
def get_data_sets_from_remote(upper_bound=2, lower_bound=None):
    try:
        with open('all_allen_cells.p', 'rb') as f:
            cells = pickle.load(f)
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

    except:
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

        cells = ctc.get_cells()
        with open('all_allen_cells.p', 'wb') as f:
            pickle.dump(cells, f)
    data = []
    data_sets = []
    path_name = 'data_nwbs'

    try:
        os.mkdir(path_name)
    except:
        print('directory already made.')

    ids = [c['id'] for c in cells]
    if upper_bound == None and lower_bound is None:
        limited_range = ids[0:-1]
    elif upper_bound is not None and lower_bound is not None:
        limited_range = ids[lower_bound:upper_bound]
    cnt = 0
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            cnt += 1
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            with open(temp_path, 'rb') as f:
                (data_set_nwb, sweeps, specimen_id) = pickle.load(f)
            data_sets.append((data_set_nwb, sweeps, specimen_id))
        else:

            data_set = ctc.get_ephys_data(specimen_id)
            sweeps = ctc.get_ephys_sweeps(specimen_id)

            file_name = 'cell_types/specimen_' + str(
                specimen_id) + '/ephys.nwb'
            data_set_nwb = NwbDataSet(file_name)

            data_sets.append((data_set_nwb, sweeps, specimen_id))

            with open(temp_path, 'wb') as f:
                pickle.dump((data_set_nwb, sweeps, specimen_id), f)
    return data_sets
class NwbDataSetTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(NwbDataSetTest, self).__init__(*args, **kwargs)

    def testAllDataSets(self):

        manifest_file = '/local1/projects/FHL2015/cell_types/manifest.json'
        if not os.path.exists(manifest_file):
            print "Cannot run this test: manifest does not exist (%s)" % manifest_file
            return True
        
        self.cache = CellTypesCache(manifest_file=manifest_file)
        cells = self.cache.get_cells()

        for cell in cells:
            data_set = self.cache.get_ephys_data(cell['id'])
            sweeps = self.cache.get_ephys_sweeps(cell['id'])

            for sweep in sweeps:
                metadata = data_set.get_sweep_metadata(sweep['sweep_number'])
示例#7
0
def load_data(stim_names, reps=10, dur=3000, delay=200):
    ctc = CellTypesCache(manifest_file="ctc/manifest.json")
    cells = ctc.get_cells()

    cell_id = cells[0]['id']
    sweeps = ctc.get_ephys_sweeps(cell_id)
    sweeps = [(sweep['sweep_number'], sweep['stimulus_name'])
              for sweep in sweeps if sweep['stimulus_name'] in stim_names]

    ds = ctc.get_ephys_data(cell_id)

    vv, ii = [], []

    for sn, st in sweeps:
        v, i, t = load_sweep(ds, sn)

        stim_start = np.argwhere(i != 0)[0][0]

        for rep in range(reps):
            idx0 = stim_start - delay - np.random.randint(0, dur // 2)

            vr = v[idx0:]
            ir = i[idx0:]

            if st.startswith('Noise'):
                offs = [0, 200000, 400000]
                for off in offs:
                    vv.append(vr[off:off + dur])
                    ii.append(ir[off:off + dur])
            else:
                vv.append(vr[:dur])
                ii.append(ir[:dur])

    stims = np.vstack(ii)
    resps = np.vstack(vv) + 74.0

    print(stims.shape)

    return stims, resps
from allensdk.core.cell_types_cache import CellTypesCache
import allensdk.internal.core.lims_utilities as lu
import pandas as pd
from allensdk.api.queries.glif_api import GlifApi
import os
import sys

relative_path = os.path.dirname(os.getcwd())
sys.path.append(os.path.join(relative_path, 'libraries'))

# Find all mouse cells with models
glif_api = GlifApi()
ctc = CellTypesCache(
    manifest_file=os.path.join(relative_path, 'cell_types_manifest.json'))
specimen_id_list = []
temp = ctc.get_cells()
for c in temp:
    if c['species'] == 'Mus musculus':
        specimen_id_list.append(c['id'])

print len(specimen_id_list), 'mouse specimens in public database'


def get_expVar(specimen_id_list, keyword):
    '''Grab explained variance value of specimen id list in public database
    Inputs:
        specimen_id_list: list of integers
            desired specimen ids of data in AIBS public database
        keyword: string
            string to search for in the 'name' entry of the  AIBS public database 
    Outputs:
示例#9
0
class AllenMorphology(Paths):
    """ Handles the download and visualisation of neuronal morphology data from the Allen database. """
    def __init__(self, *args, scene_kwargs={}, **kwargs):
        """
			Initialise API interaction and fetch metadata of neurons in the Allen Database. 
		"""
        if not connected_to_internet():
            raise ConnectionError(
                "You will need to be connected to the internet to use the AllenMorphology class"
            )

        Paths.__init__(self, *args, **kwargs)
        self.scene = Scene(add_root=False, display_inset=False, **scene_kwargs)

        # Create a Cache for the Cell Types Cache API
        self.ctc = CellTypesCache(
            manifest_file=os.path.join(self.morphology_allen, 'manifest.json'))

        # Get a list of cell metadata for neurons with reconstructions, download if necessary
        self.neurons = pd.DataFrame(
            self.ctc.get_cells(species=[CellTypesApi.MOUSE],
                               require_reconstruction=True))
        self.n_neurons = len(self.neurons)
        if not self.n_neurons:
            raise ValueError(
                "Something went wrong and couldn't get neurons metadata from Allen"
            )

        self.downloaded_neurons = self.get_downloaded_neurons()

    def get_downloaded_neurons(self):
        """ 
			Get's the path to files of downloaded neurons
		"""
        return [
            os.path.join(self.morphology_allen, f)
            for f in os.listdir(self.morphology_allen) if ".swc" in f
        ]

    def download_neurons(self, ids):
        """
			Download neurons

		:param ids: list of integers with neurons IDs

		"""
        if isinstance(ids, np.ndarray):
            ids = list(ids)
        if not isinstance(ids, (list)): ids = [ids]

        neurons = []
        for neuron_id in ids:
            neuron_file = os.path.join(self.morphology_allen,
                                       "{}.swc".format(neuron_id))
            neurons.append(
                self.ctc.get_reconstruction(neuron_id, file_name=neuron_file))

        return neurons

    def parse_neurons_swc_allen(self, morphology, color='blackboard', alpha=1):
        """
		SWC parser for Allen neuron's morphology data, they're a bit different from the Mouse Light SWC

		:param morphology: data with morphology
		:param neuron_number: int, number of the neuron being rendered.

		"""
        # Create soma actor
        radius = 1
        neuron_actors = [
            shapes.Sphere(pos=get_coords(morphology.soma)[::-1],
                          c=color,
                          r=radius * 3)
        ]

        # loop over trees
        for tree in morphology._tree_list:

            tree = pd.DataFrame(tree)
            branching_points = [
                t.id for i, t in tree.iterrows()
                if len(t.children) > 2 and t.id < len(tree)
            ]

            branch_starts = []
            for bp in branching_points:
                branch_starts.extend(tree.iloc[bp].children)

            for bp in branch_starts:
                parent = tree.iloc[tree.iloc[bp].parent]
                branch = [(parent.x, parent.y, parent.z)]
                point = tree.iloc[bp]

                while True:
                    branch.append((point.x, point.y, point.z))

                    if not point.children:
                        break
                    else:
                        try:
                            point = tree.iloc[point.children[0]]
                        except:
                            break

                # Create actor
                neuron_actors.append(
                    shapes.Tube(branch, r=radius, c='red', alpha=1, res=24))

        actor = merge(*neuron_actors)
        actor.color(color)
        actor.alpha(alpha)
        return actor

    # # Todo load/save neurons??
    def load_save_neuron(self, neuron_file, neuron=None):
        neuron_name = os.path.split(neuron_file)[-1].split('.swc')[0]

        savepath = os.path.join(self.morphology_cache, neuron_name + '.vtk')

        if neuron is None and os.path.isfile(savepath):
            return load(savepath)
        elif neuron is None:
            return None
        elif neuron is not None:
            neuron.write(savepath)

    def parse_neuron_swc(self,
                         filepath,
                         color='blackboard',
                         alpha=1,
                         radius_multiplier=.1,
                         overwrite=False):
        """
		Given an swc file, render the neuron

		:param filepath: str with path to swc file
		:param neuron_number: numnber of neuron being rendered

		"""
        # See if we rendered this neuron already
        if not overwrite:
            loaded = self.load_save_neuron(filepath)
            if loaded is not None:
                return loaded.color(color)

        print(f"Parsing swc file: {filepath}")
        # details on swc files: http://www.neuronland.org/NLMorphologyConverter/MorphologyFormats/SWC/Spec.html
        _sample = namedtuple("sample", "sampleN structureID x y z r parent"
                             )  # sampleN structureID x y z r parent

        if not os.path.isfile(filepath) or not ".swc" in filepath.lower():
            raise ValueError("unrecognized file path: {}".format(filepath))

        try:
            return self.parse_neurons_swc_allen(filepath)
        except:
            pass  #  the .swc file fas not generate with by allen

        f = open(filepath)
        content = f.readlines()
        f.close()
        content = [
            sample.replace("\n", "") for sample in content if sample[0] != '#'
        ]
        content = [sample for sample in content if len(sample) > 3]

        # crate empty dicts for soma axon and dendrites
        data = dict(id=[],
                    parentNumber=[],
                    radius=[],
                    sampleNumber=[],
                    x=[],
                    y=[],
                    z=[])

        # start looping around samples
        for sample in content:
            s = _sample(
                *[float(samp) for samp in sample.lstrip().rstrip().split(" ")])

            # append data to dictionary
            data['id'] = s.structureID
            data['parentNumber'].append(int(s.parent))
            data['radius'].append(s.r)
            data['x'].append(s.x)
            data['y'].append(s.y)
            data['z'].append(s.z)
            data['sampleNumber'].append(int(s.sampleN))

        # Get branches and soma
        print("		reconstructing neurites trees")
        data = pd.DataFrame(data)
        radius = data['radius'].values[0] * radius_multiplier

        soma = data.iloc[0]
        soma = shapes.Sphere(pos=[soma.x, soma.y, soma.z],
                             c=color,
                             r=radius * 4)
        neuron_actors = [soma]

        branches_end, branches_start = [], []  # Get branches start and end
        for parent in data.parentNumber.values:
            sons = data.loc[data.parentNumber == parent]
            if len(sons) > 1:
                branches_end.append(parent)
                for i, son in sons.iterrows():
                    branches_start.append(son.sampleNumber)

        print("		creating actors")
        for start in branches_start:
            node = data.loc[data.sampleNumber == start]
            parent = data.loc[data.sampleNumber == node.parentNumber.values[0]]

            branch = [(parent.x.values[0], parent.y.values[0],
                       parent.z.values[0])]
            while True:
                branch.append(
                    (node.x.values[0], node.y.values[0], node.z.values[0]))

                node = data.loc[data.parentNumber ==
                                node.sampleNumber.values[0]]
                if not len(node): break
                if node.sampleNumber.values[0] in branches_end:
                    branch.append(
                        (node.x.values[0], node.y.values[0], node.z.values[0]))
                    break

            neuron_actors.append(
                shapes.Tube(branch, r=radius, c='red', alpha=1, res=24))

        # Merge actors and save
        actor = merge(*neuron_actors)
        actor.color(color)
        actor.alpha(alpha)

        self.load_save_neuron(filepath, neuron=actor)
        return actor

    def add_neuron(self,
                   neuron,
                   shadow_axis=None,
                   shadow_offset=-20,
                   **kwargs):
        if isinstance(neuron, list):
            neurons = neuron
        else:
            if isinstance(neuron, str):
                if os.path.isdir(neuron):
                    neurons = listdir(neuron)
            else:
                neurons = [neuron]

        actors = []
        for neuron in neurons:
            if isinstance(neuron, str):
                neuron = self.parse_neuron_swc(neuron, **kwargs)
            elif isinstance(neuron, Morphology):
                neuron = self.parse_neurons_swc_allen(neuron, **kwargs)

            actor = self.scene.add_vtkactor(neuron)

            # scals = actor.points()[:, 1]
            # alphas = np.linspace(0.82, .83, 250)
            # actor.pointColors(scals, alpha=alphas, cmap="Greens_r")

            # actor.points()[:, 0] += np.random.normal(0, 2000)
            # actor.points()[:, 2] += np.random.normal(0, 2000)

            if shadow_axis == 'x':
                actor.addShadow(x=shadow_offset)
            elif shadow_axis == 'y':
                actor.addShadow(y=shadow_offset)
            elif shadow_axis == 'z':
                actor.addShadow(z=shadow_offset)

            actors.append(neuron)

        return actors

    def render(self, **kwargs):
        self.scene.render(**kwargs)
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')

# We'll then initialize the cache as 'ctc' (cell types cache)
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

print('Packages were successfully imported.')

# The `get_cells` method downloads metadata for cells in the database. The database contains human cells and mouse cells. By default, `get_cells()` downloads metata for all cells in the database. Alternatively, one can filter out the database to only include cells collected from a certain species.
# Look through <a href="https://allensdk.readthedocs.io/en/latest/allensdk.core.cell_types_cache.html">the documentation for the CellTypesCache</a> for more information on the `get_cells` method.

# In[19]:

# Download metadata for all cells in the database
all_cells = ctc.get_cells()
all_cells_df = pd.DataFrame(all_cells).set_index('id')
print('Length of dataframe:')
print(len(all_cells_df))
all_cells_df.head()

# As you may have noticed already, out current dataframe only contains metadeta about our cells and no information on the morphology or electrophysiolgy of our cells. In order to get information about the morphology of these cells, we need to use the `get_morphology_features()` method on our instance of the cell types cache. We will set the indices to be the `specimen_id` because these ids will align with those in `all_cells_df`.

# In[20]:

# Downloads the morphology features and sets up the dataframe all in one line
morphology_df = pd.DataFrame(
    ctc.get_morphology_features()).set_index('specimen_id')
print('Length of dataframe:')
print(len(morphology_df))
morphology_df.head()
def CreateDB(specimenList, databaseName, resetDB, manifestFile, 
             host, user, password, verbose):
    
    if verbose:
        print "CreateDB importing..."
        
    import sys
    from allensdk.ephys.extract_cell_features import extract_cell_features
    from allensdk.core.cell_types_cache import CellTypesCache
    from collections import defaultdict
    
    import mysql.connector
    
    import numpy as np
    from numpyconversion import NumpyMySQLConverter
    
    from CellSurveyTableOps import dropTable, createDonorsTable
    from CellSurveyTableOps import createSpecimensTable, createSpecimenFXsTable
    from CellSurveyTableOps import createExperimentsTable, createExperimentFXsTable
    from CellSurveyTableOps import addSpecimen, addExperiment, addDonor
    from CellSurveyTableOps import addExpFX,addSpecFX
    from ABISweepFX import getABIAnalysisPoints, ExtractSweepFeatures
    
    #### Create the database from scratch if required
    if verbose:
        print "Connecting to the database"; 
    
    try: 
        cnx = mysql.connector.connect(user=user, password=password,
                                      host=host, database=databaseName,
                                      converter_class=NumpyMySQLConverter)
        if verbose:
            print "Connection complete"
            
        cursobj = cnx.cursor()
    except:
        cnx = mysql.connector.connect(user=user, password=password, host=host,
                                      converter_class=NumpyMySQLConverter)
        if verbose:
            print cnx
        cursobj = cnx.cursor()
        mycmd = 'create database ' + databaseName
        cursobj.execute(mycmd)
        if verbose:
            print "Database created"
        mycmd = 'use ' + databaseName
        cursobj.execute(mycmd)
        if verbose:
            print "Using database " + databaseName
    
    if resetDB:
        if verbose:
            print "Dropping all tables"
            
        tablenames = ['specimenFXs', 'experimentFXs', 'experiments', 
                      'specimens', 'donors']
        for tablename in tablenames:
            result = dropTable(cnx, tablename)
            if verbose:
                if result:
                    print tablename + " table dropped"
                else:
                    print " There was a problem dropping table " + tablename
    
        # -----
        if verbose:
            print "Creating tables"
       
        result = createDonorsTable(cnx)
        if verbose:
            if result:
                print "Donors Table created"
            else:
                print "There was a problem creating the Donors Table"
    
        result = createSpecimensTable(cnx)
        if verbose:
            if result:
                print "Specimens Table created"
            else:
                print "There was a problem creating the Specimens Table"
    
        result = createExperimentsTable(cnx)
        if verbose:
            if result:
                print "Experiments Table created"
            else:
                print "There was a problem creating the Experiments Table"
    
        result = createSpecimenFXsTable(cnx)
        if verbose:
            if result:
                print "SpecimenFXs Table created"
            else:
                print "There was a problem creating the SpecimenFXs Table"
    
        result = createExperimentFXsTable(cnx)
        if verbose:
            if result:
                print "ExperimentFXs Table created"
            else:
                print "There was a problem creating the ExperimentFXs Table"
    
        
    # ====================================================================
    # Install the ABI Datasets
    if verbose:
        print "Installing the ABI Datasets into the database"; sys.stdout.flush()
        
    # Instantiate the CellTypesCache instance.  
    ctc = CellTypesCache(manifest_file=manifestFile)
    
    # Get metadata on all cells
    cells = ctc.get_cells()
    
    ####### ALL DONORS #######
    # Populate the donors table with all donors of all cells
    if verbose:
        print "Populating donors table"
    
    for cell in cells:
        addDonor(cnx, cell['donor_id'], cell['donor']['sex'], cell['donor']['name'])

        
    ####### ALL EPHYS FEATURES #######
    try:
        # for all cells
        allEphysFeatures = ctc.get_ephys_features()  
    except:
        # If no ephys features, we cannot do anything
        print "No ephys features available; aborting program."
        sys.exit()
            
            
    ####### SPECIMENS #######
    # Get relevant info for each specimen in input list
    if verbose:
        print "Processing each specimen in turn"; sys.stdout.flush()
        
    for specimen in specimenList:
#        if verbose:
        print '@@@@@ Processing specimen:', specimen
        
        try:
            specEphysData = ctc.get_ephys_data(specimen)
        except:
            # If no ephys data, we do not want to bother with it
            print "No ephys data for specimen ", specimen, "; ignoring it."
            continue
    
        ###### SPECIMEN >>> METADATA ######
        # Paw through the cells to find the metadata for the current specimen
        # The cell is a dictionary that has most of the "other" non-sweep stuff
        # we need such as cell averages, rheobase info, transgenic line, hemisphere, 
        # age, sex, graph order, dendrite type, area, has_burst,...
        # May be able to improve this search Pythonically 
        for cell in cells:
            datasets = cell['data_sets']
            for dataset in datasets:
                dsspec = dataset['specimen_id']
                if dsspec == specimen:
                    specCell = cell
                    break
                
        # Add the specimen to the database
        donorID = specCell['donor_id']
        specimenTableIDX = addSpecimen(cnx, donorID, specimen)
    
        ####### SPECIMEN >>> SWEEPS/EXPERIMENTS #######
        # Change these to true if show in any sweep 
        cellHasBursts = False
        cellHasDelays = False
        cellHasPauses = False
        
        # Process each sweep in turn
        sweeps = ctc.get_ephys_sweeps(specimen)
        for sweep in sweeps:
            sweepNum = sweep['sweep_number']
            
#             if verbose:
            msg = ("  Processing sweep_number: " + str(sweepNum) + 
                   "  stimulus: " + str(sweep['stimulus_name']) + 
                   "  num_spikes = " + str(sweep['num_spikes']))
            print msg
    
            # Screen out some sweep types because they are not suitable for our 
            #      simulations or because the stimulus type is not successful 
            #      in use of process_spikes() (which we use for simulations)
            databaseList = ['Long Square', 'Short Square', 'Noise 1', 'Noise 2', 
                            'Square - 2s Suprathreshold', 'Square - 0.5ms Subthreshold',
                            'Short Square - Triple', 'Ramp', 'Ramp to Rheobase']
            if sweep['stimulus_name'] not in databaseList:
                print "    Stimulus type", sweep['stimulus_name'], "not supported."
                continue
    
            # sweepData holds index range, response data vector, sampling_rate, and stimulus vector 
            sweepData = specEphysData.get_sweep(sweepNum)
    
            # sweep_metadata holds aibs_stimulus_amplitude_pa, aibs_stimulus_name,
            #  gain, initial_access_resistance, and seal
            sweep_metadata = specEphysData.get_sweep_metadata(sweepNum)
            samplingRate = sweepData["sampling_rate"] # in Hz
            
            # Need to check if this sweep is actually an experiment
            # [not implemented]
            
            # Add the experiment to the database
            experimentIDX = (#
                addExperiment(cnx, specimenTableIDX, 
                              sweepNum, samplingRate,
                              sweep_metadata['aibs_stimulus_name'],
                              float(sweep_metadata['aibs_stimulus_amplitude_pa'])))

            # Only Long Square is suitable for our simulations
            fxOKList = ['Long Square']
            if sweep['stimulus_name'] not in fxOKList:
                print "    Stimulus type", sweep['stimulus_name'], "entered into database but not supported for feature extractions."
                continue

            ## Create the experiment feature extraction data ## 
            # This approach seen at   
            # http://alleninstitute.github.io/AllenSDK/_static/examples/nb/
            #      cell_types.html#Computing-Electrophysiology-Features
            # index_range[0] is the "experiment" start index. 0 is the "sweep" start index
            indexRange = sweepData["index_range"]
            # For our purposes, we grab the data from the beginning of the sweep 
            #  instead of the beginning of the experiment
            # i = sweepData["stimulus"][indexRange[0]:indexRange[1]+1] # in A
            # v = sweepData["response"][indexRange[0]:indexRange[1]+1] # in V
            i = sweepData["stimulus"][0:indexRange[1]+1] # in A
            v = sweepData["response"][0:indexRange[1]+1] # in V
            i *= 1e12 # to pA
            v *= 1e3 # to mV
            t = np.arange(0, len(v)) * (1.0 / samplingRate) # in seconds
         
            ###### Do the sweep's feature extraction #######
            # Determine the position and length of the analysis window with respect
            # to the beginning of the sweep 
            stimType = sweep_metadata['aibs_stimulus_name']
            analysisPoints = getABIAnalysisPoints(stimType)
            analysis_start = analysisPoints['analysisStart']
            stimulus_start = analysisPoints['stimulusStart']
            analysis_duration = analysisPoints['analysisDuration']
    
            if verbose:
                print ('analysis_start', analysis_start, 'stimulus_start ', 
                       stimulus_start, 'analysis_duration', analysis_duration)
    
            # Trim the analysis to end of experiment if necessary
            if (analysis_start + analysis_duration) * samplingRate >= indexRange[1]:
                end_time = (indexRange[1]-1)/samplingRate
                analysis_duration = end_time - analysis_start
    
            if verbose:
                print ('analysis_start', analysis_start, 'stimulus_start ', 
                       stimulus_start, 'analysis_duration', analysis_duration)
    
            # Now we extract the sweep features from that analysis window
            swFXs = ExtractSweepFeatures(t, v, i, analysis_start, 
                            analysis_duration, stimulus_start, verbose)
            if len(swFXs) == 0:
                print "Skipping experiment: ", specimen, '/', sweepNum, " and continuing..."
                continue
            
            if swFXs['hasBursts']: cellHasBursts = True
            if swFXs['hasPauses']: cellHasPauses = True
            if swFXs['hasDelay']: cellHasDelays = True

            ## Add the feature extraction to the database ##
            expFXs = dict(swFXs)
            # individual spike data not going into the database directly
            if 'spikeData' in expFXs:
                del expFXs['spikeData']
                   
            addExpFX(cnx, experimentIDX, expFXs)
        # end of:  for sweep in sweeps:
    
        ## Assemble the specimen feature extraction data ##
        specimenEphysFeaturesList = [f for f in allEphysFeatures if f['specimen_id'] == specimen]
        specimenEphysFeatures = specimenEphysFeaturesList[0]
         
        data_set = ctc.get_ephys_data(specCell['id'])
        sweeps = ctc.get_ephys_sweeps(specimen)
        sweep_numbers = defaultdict(list)
        for sweep in sweeps:
            sweep_numbers[sweep['stimulus_name']].append(sweep['sweep_number'])
    
        cell_features = (extract_cell_features(data_set, sweep_numbers['Ramp'], 
                    sweep_numbers['Short Square'], sweep_numbers['Long Square']))
        spFXs = {}
        spFXs['hasSpikes']                   = cell_features['long_squares']['spiking_sweeps'] != []
        spFXs['hero_sweep_id']               = cell_features['long_squares']['hero_sweep']['id']
        spFXs['hero_sweep_avg_firing_rate']  = cell_features['long_squares']['hero_sweep']['avg_rate']
        spFXs['hero_sweep_adaptation']       = cell_features['long_squares']['hero_sweep']['adapt']
        spFXs['hero_sweep_first_isi']        = cell_features['long_squares']['hero_sweep']['first_isi']
        spFXs['hero_sweep_mean_isi']         = cell_features['long_squares']['hero_sweep']['mean_isi']
        spFXs['hero_sweep_median_isi']       = cell_features['long_squares']['hero_sweep']['median_isi']
        spFXs['hero_sweep_isi_cv']           = cell_features['long_squares']['hero_sweep']['isi_cv']
        spFXs['hero_sweep_latency']          = cell_features['long_squares']['hero_sweep']['latency']
        spFXs['hero_sweep_stim_amp']         = cell_features['long_squares']['hero_sweep']['stim_amp']
        spFXs['hero_sweep_v_baseline']       = cell_features['long_squares']['hero_sweep']['v_baseline']
        spFXs['dendrite_type']               = specCell['dendrite_type']
        spFXs['electrode_0_pa']              = specimenEphysFeatures['electrode_0_pa']
        spFXs['f_i_curve_slope']             = specimenEphysFeatures['f_i_curve_slope']
        spFXs['fast_trough_t_long_square']   = specimenEphysFeatures['fast_trough_t_long_square']     
        spFXs['fast_trough_t_ramp']          = specimenEphysFeatures['fast_trough_t_ramp']    
        spFXs['fast_trough_t_short_square']  = specimenEphysFeatures['fast_trough_t_short_square']  
        spFXs['fast_trough_v_long_square']   = specimenEphysFeatures['fast_trough_v_long_square']
        spFXs['fast_trough_v_ramp']          = specimenEphysFeatures['fast_trough_v_ramp']    
        spFXs['fast_trough_v_short_square']  = specimenEphysFeatures['fast_trough_v_short_square']
        spFXs['has_bursts']                  = cellHasBursts
        spFXs['has_delays']                  = cellHasDelays    
        spFXs['has_pauses']                  = cellHasPauses
        spFXs['hemisphere']                  = specCell['hemisphere'] 
        spFXs['input_resistance_mohm']       = specimenEphysFeatures['input_resistance_mohm']
        spFXs['peak_t_long_square']          = specimenEphysFeatures['peak_t_long_square']
        spFXs['peak_t_ramp']                 = specimenEphysFeatures['peak_t_ramp']    
        spFXs['peak_t_short_square']         = specimenEphysFeatures['peak_t_short_square']
        spFXs['peak_v_long_square']          = specimenEphysFeatures['peak_v_long_square'] 
        spFXs['peak_v_ramp']                 = specimenEphysFeatures['peak_v_ramp']    
        spFXs['peak_v_short_square']         = specimenEphysFeatures['peak_v_short_square']
        spFXs['reporter_status']             = specCell['reporter_status']
        spFXs['rheobase_current']            = cell_features['long_squares']['rheobase_i'] 
        spFXs['ri']                          = specimenEphysFeatures['ri']
        spFXs['sagFraction']                 = specimenEphysFeatures['sag']
        spFXs['seal_gohm']                   = specimenEphysFeatures['seal_gohm']
        spFXs['slow_trough_t_long_square']   = specimenEphysFeatures['slow_trough_t_long_square']
        spFXs['slow_trough_t_ramp']          = specimenEphysFeatures['slow_trough_t_ramp']           
        spFXs['slow_trough_t_short_square']  = specimenEphysFeatures['slow_trough_t_short_square']
        spFXs['slow_trough_v_long_square']   = specimenEphysFeatures['slow_trough_v_long_square']  
        spFXs['slow_trough_v_ramp']          = specimenEphysFeatures['slow_trough_v_ramp']                
        spFXs['slow_trough_v_short_square']  = specimenEphysFeatures['slow_trough_v_short_square']
        spFXs['structure_acronym']           = specCell['structure']['acronym']  
        spFXs['structure_name']              = specCell['structure']['name']
        spFXs['tau']                         = specimenEphysFeatures['tau']
        spFXs['threshold_i_long_square']     = specimenEphysFeatures['threshold_i_long_square']
        spFXs['threshold_i_ramp']            = specimenEphysFeatures['threshold_i_ramp']              
        spFXs['threshold_i_short_square']    = specimenEphysFeatures['threshold_i_short_square']
        spFXs['threshold_t_long_square']     = specimenEphysFeatures['threshold_t_long_square']  
        spFXs['threshold_t_ramp']            = specimenEphysFeatures['threshold_t_ramp']              
        spFXs['threshold_t_short_square']    = specimenEphysFeatures['threshold_t_short_square']
        spFXs['threshold_v_long_square']     = specimenEphysFeatures['threshold_v_long_square']  
        spFXs['threshold_v_ramp']            = specimenEphysFeatures['threshold_v_ramp']              
        spFXs['threshold_v_short_square']    = specimenEphysFeatures['threshold_v_short_square']
        spFXs['transgenic_line']             = specCell['transgenic_line']
        spFXs['trough_t_long_square']        = specimenEphysFeatures['trough_t_long_square']        
        spFXs['trough_t_ramp']               = specimenEphysFeatures['trough_t_ramp']                 
        spFXs['trough_t_short_square']       = specimenEphysFeatures['trough_t_short_square'] 
        spFXs['trough_v_long_square']        = specimenEphysFeatures['trough_v_long_square']   
        spFXs['trough_v_ramp']               = specimenEphysFeatures['trough_v_ramp']                 
        spFXs['trough_v_short_square']       = specimenEphysFeatures['trough_v_short_square'] 
        spFXs['upstroke_downstroke_ratio_long_square'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_long_square']  
        spFXs['upstroke_downstroke_ratio_ramp'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_ramp']        
        spFXs['upstroke_downstroke_ratio_short_square'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_short_square'] 
        spFXs['v_rest']                      = specimenEphysFeatures['vrest']
        spFXs['vm_for_sag']                  = specimenEphysFeatures['vm_for_sag']

        ## Add the specimen feature extraction data to the database ##
        addSpecFX(cnx, specimenTableIDX, spFXs)
    # end of:  for specimen in specimenList
    
    cnx.close()
示例#12
0
#Import CellTypesApi, which will allow us to query the database.
from allensdk.api.queries.cell_types_api import CellTypesApi

# We'll then initialize the cache as 'ctc' (cell types cache)
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')


# ### Get Cells & Manipulate Dataframe
# 
# As stated in previous sections, the `get_cells` method downloads metadata for all cells in the database. By selecting a species within the method, `get_cells` will only retrieve the data from the species of interest. We will be working first with data on human cells.
# Look through <a href="https://allensdk.readthedocs.io/en/latest/allensdk.core.cell_types_cache.html">the documentation for the CellTypesCache</a> for more information on the `get_cells` method.

# In[2]:


human_cells = ctc.get_cells(species=[CellTypesApi.HUMAN])
print(human_cells)


# As you can see, the output for the metadata of our cells is messy and difficutlt to visualize. To make our data easier to read and work with, we can convert `all_cells` into a Pandas datadrame.
# 

# Note: If you're having trouble with Pandas, it can help to look at <a href="https://pandas.pydata.org/pandas-docs/stable/user_guide/">the user guide</a>.

# In[3]:


import pandas as pd 

# Create a dataframe from 'human_cells' and re-assign the dataframe to a new variable
human_cells_df = pd.DataFrame(human_cells)
def get_files_from_LIMS_public(output_path, glif_sp_ids=None, type='mouse'):
    '''This will grab cre positive data config files from LIMS and sort them and put them in 
    the specified output folder.  
    input:
        output_path: string
            specifies path for files to be placed in
        glif_sp_ids: list of strings or integers
            specimen ids of cells specifically want to grab.  If none it will get all available on the 
            Allen Institue Cell Types Database.
        type: string
            can be 'mouse' or 'human'. Note that if mouse is specified is will only grab cre positive mouse cells 
            (code can be altered to get cre negative cells).
    output:
        Does not return values but creates the specified 'output_path' folder.  
        Inside the folder a series of folders are created with the name format:
        specimenid_cre.  Inside those inner folders are the neuron configs of 
        the available GLIF models along with the preprocessor files.
    '''

    glif_api = GlifApi()     
    ctc = CellTypesCache(manifest_file=os.path.join(relative_path,'cell_types_manifest.json'))

    # select the specimen ids to grab from the data base (cre positive or human which have at least 1 GLIF model)
    if glif_sp_ids==None: #if no specimen id's are specified grab all data in the cell types manifest
        specimen_id_list = []
        if type=='mouse':
            for c in ctc.get_cells():
                if c['reporter_status']=='cre reporter positive':
                    specimen_id_list.append(c['id'])
        elif type=='human':
            print 'getting human'
            for c in ctc.get_cells(species=['H**o Sapiens']):
                #print c
                specimen_id_list.append(c['id'])
            print specimen_id_list
        # reduce list to cells that have a GLIF model
        glif_sp_ids=[]
        for sp in specimen_id_list:
            models=glif_api.get_neuronal_models(sp)[0]
            for m in models['neuronal_models']:
                if 'LIF' in m['name']:
                    glif_sp_ids.append(m['specimen_id'])
                    
        glif_sp_ids=list(set(glif_sp_ids))
        print len(glif_sp_ids), 'cre positive specimens with at least 1 LIF model'

    # create the overall output directory if it doesn't exist
    try:
        os.makedirs(output_path)
    except:
        pass
    
    # go get the files corresponding to the specimen ids from the Allen Cell Types Database 
    # and put them into a specified output directory 
    for id in glif_sp_ids:
        model_query=glif_api.get_neuronal_models(id)[0]['neuronal_models']
        df=pd.DataFrame(model_query)
        for mt_id, short_name in zip(model_template_ids, model_names):
            dff=df[df['neuronal_model_template_id']==mt_id]
            if len(dff)>=2:
                print dff
                raise Exception("This is public data, there should not be more than 1 model")
            elif len(dff)==1:
                use_me=dff
                #go get the file 
                path=use_me['well_known_files'].iloc[0][0]['path'] 
                if type=='mouse':
                    cre=(str(use_me['name'].values).split(')_'))[1].split(';')[0]
                elif type=='human':
                    cre='human'
                else:
                    raise Exception('specified species not known')
                # convert old non complete cre names
                if 'Ntsr1-Cre' in cre:
                    cre='Ntsr1-Cre_GN220'
                if 'Chat-IRES-Cre' in cre:
                    cre='Chat-IRES-Cre-neo'
                dir_name=os.path.join(output_path, str(id)+'_'+cre)
                try:    
                    os.makedirs(dir_name)
                except:
                    pass
                if path.endswith('_neuron_config.json'):
                    pass
                else:
                    print path
                    raise Exception('the file doesnt end with _neuron_config.json')       
                try:   
                    copyfile(path, os.path.join(dir_name, str(id)+'_'+cre+'_'+short_name+'_neuron_config.json'))
                except:
                    print 'couldnt make ', os.path.join(dir_name, str(id)+'_'+cre+'_'+short_name+'_neuron_config.json')
                if mt_id==model_template_ids[0]:
                    model_path=os.path.dirname(path)
                    pp_path=os.path.join(model_path,
                        os.listdir(model_path)[np.where([fname.endswith('_preprocessor_values.json') for fname in os.listdir(model_path)])[0][0]]) 
                    try:   
                        copyfile(pp_path, os.path.join(dir_name, str(id)+'_'+cre+'_preprocessor_values.json'))
                    except:
                        print 'couldnt make ', os.path.join(dir_name, str(id)+'_'+cre+'_preprocessor_values.json')
                        raise Exception('there should be a preprocessed file')
            elif len(dff)<1:
                use_me=pd.DataFrame()
                path=None
if __name__ == '__main__':
    config = Dict({
        'cell_type_tag':
        'Vip',
        'stimulus_amplitude':
        1e-8,
        'duration':
        5e6,
        'noise_exponent':
        1,
        'dump_file':
        './experiments/model_generated_data/data/vip_glif_spike_trains_exp1.csv'
    })

    ctc = CellTypesCache()
    cells = ctc.get_cells(species=[CellTypesApi.MOUSE],
                          require_reconstruction=False)
    cells_df = pd.DataFrame.from_records(cells, index='id')

    model_id_list = model_ids_for_cell_type(cells_df, config.cell_type_tag)
    spike_trains = []
    for model_id in tqdm(model_id_list):
        spike_train = generate_train_from_model_id(
            model_id,
            stimulus_amplitude=config.stimulus_amplitude,
            duration=config.duration,
            noise_exponent=config.noise_exponent)
        spike_trains.append(' '.join(
            ['{:2f}'.format(value) for value in spike_train]))
        pd.Series(spike_trains).to_csv(config.dump_file)
示例#15
0
class AllenMorphology(Paths):
    """ Handles the download of neuronal morphology data from the Allen database. """
    def __init__(self, *args, **kwargs):
        """
            Initialise API interaction and fetch metadata of neurons in the Allen Database. 
        """
        if not connected_to_internet():
            raise ConnectionError(
                "You will need to be connected to the internet to use the AllenMorphology class to download neurons"
            )

        Paths.__init__(self, *args, **kwargs)

        # Create a Cache for the Cell Types Cache API
        self.ctc = CellTypesCache(manifest_file=os.path.join(
            self.allen_morphology_cache, "manifest.json"))

        # Get a list of cell metadata for neurons with reconstructions, download if necessary
        self.neurons = pd.DataFrame(
            self.ctc.get_cells(species=[CellTypesApi.MOUSE],
                               require_reconstruction=True))
        self.n_neurons = len(self.neurons)

        if not self.n_neurons:
            raise ValueError(
                "Something went wrong and couldn't get neurons metadata from Allen"
            )

        self.downloaded_neurons = self.get_downloaded_neurons()

    def get_downloaded_neurons(self):
        """ 
            Get's the path to files of downloaded neurons
        """
        return [
            os.path.join(self.allen_morphology_cache, f)
            for f in os.listdir(self.allen_morphology_cache) if ".swc" in f
        ]

    def download_neurons(self, ids, **kwargs):
        """
            Download neurons and return neuron reconstructions (instances
            of Neuron class)

        :param ids: list of integers with neurons IDs

        """
        if isinstance(ids, np.ndarray):
            ids = list(ids)
        if not isinstance(ids, (list)):
            ids = [ids]

        neurons = []
        print("Downloading neurons")
        for neuron_id in track(ids):
            neuron_file = os.path.join(self.allen_morphology_cache,
                                       "{}.swc".format(neuron_id))

            # Download file
            self.ctc.get_reconstruction(neuron_id, file_name=neuron_file)

            # Reconstruct neuron
            neurons.append(
                Neuron(neuron_file, neuron_name=str(neuron_id), **kwargs))

        return neurons
#                '2/3':['485184849','475515168','485468180','476087653','571306690'],
                '2/3':['485184849','475515168','485468180'],
#                '4':['483101699','602822298','490205998','569723367','324257146'],
                '4':['483101699','602822298','569723367'],
                '5':['479225052','607124114','515249852'],
                '6a':['490259231','473564515','561985849'],
#                '6b':['589128331','574993444','510136749','509881736','590558808']
                '6b':['589128331']
                }

highlight_cells = ['485184849', '479225052', '473564515']

# Get normalized depth metadata for individual cells

ctc = CellTypesCache()
cells_allensdk = ctc.get_cells(species = ['Mus musculus'],simple = False)
sdk_data = pd.DataFrame(cells_allensdk)
sdk_data['specimen__id'] = sdk_data['specimen__id'].astype(str)

ylim_min,ylim_max =-200, 1200
soma_loc_x = 0
sigma_layer = 50
soma_loc_displacement_x = 350
unique_layers = sorted(sdk_data.structure__layer.unique().tolist())

layer_dist = {layer_ : i*soma_loc_displacement_x for i,layer_ in enumerate(unique_layers)}

sns.set(style='whitegrid')
fig,ax = plt.subplots()
figname = os.path.join('figures','morph_layerwise.png')
utility.create_filepath(figname)
示例#17
0
from allensdk.core.cell_types_cache import CellTypesCache

#Import CellTypesApi, which will allow us to query the database.
from allensdk.api.queries.cell_types_api import CellTypesApi

# We'll then initialize the cache as 'ctc' (cell types cache)
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

# ### Step One: Get Cells & Manipulate Dataframe
#
# The `get_cells` method downloads metadata for all cells in the database. The database contains human cells and mouse cells. Alternatively, one can filter out the database to only include cells collected from a certain species.
# Look through <a href="https://allensdk.readthedocs.io/en/latest/allensdk.core.cell_types_cache.html">the documentation for the CellTypesCache</a> for more information on the `get_cells` method.

# In[2]:

all_cells = ctc.get_cells()
print(all_cells)

# As you can see, the output for the metadata of our cells is messy and difficutlt to interpret. To make our data easier to read and work with, we can convert `all_cells` into a pandas datadrame.
#

# Note: If you're having trouble with Pandas, it can help to look at <a href="https://pandas.pydata.org/pandas-docs/stable/user_guide/">the user guide</a>.

# In[3]:

import pandas as pd
import numpy as np

# Create a dataframe from 'all_cells' and re-assign the dataframe to a new variable
all_cells_df = pd.DataFrame(all_cells)
示例#18
0
#Import all the necessary packages and initalize an instance of the cache
import pandas as pd
from allensdk.core.cell_types_cache import CellTypesCache
from allensdk.api.queries.cell_types_api import CellTypesApi
import matplotlib.pyplot as plt
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

print('Packages succesfully downloaded.')

# Below we have created a pandas dataframe from the electrophysiology data and metadata of our mouse cells and set the row indices to be the `id` column.

# In[2]:

mouse_df = pd.DataFrame(
    ctc.get_cells(species=[CellTypesApi.MOUSE])).set_index('id')
ephys_df = pd.DataFrame(ctc.get_ephys_features()).set_index('specimen_id')
mouse_ephys_df = mouse_df.join(ephys_df)
mouse_ephys_df.head()

# The Allen has many pre-computed features that you might consider comparing across cells. Some of these features include input resistance ('input_resistance_mohm'), Adapation ratio ('adaptation'), Average ISI ('avg_isi'), and many others (you can find a complete glossary <a href = "https://docs.google.com/document/d/1YGLwkMTebwrXd_1E817LFbztMjSTCWh83Mlp3_3ZMEo/edit#heading=h.t0p3wngfkxc1"> here </a>).

# We must first select 2 or more cell types that we would like to compare. We can subset our electrophysiology dataframe to compare across transgenic lines, structure layer, and many more  columns. We created two dataframes to compare spiny dendrite types to aspiny dendrite types.

# In[3]:

# Define your cell type variables below
cell_type1 = 'spiny'
cell_type2 = 'aspiny'

# Create our dataframes from our cell types
示例#19
0
#===============================================================================
# example 1
#===============================================================================

from allensdk.core.cell_types_cache import CellTypesCache

ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

# a list of cell metadata for cells with reconstructions, download if necessary
cells = ctc.get_cells(require_reconstruction=True)

# open the electrophysiology data of one cell, download if necessary
data_set = ctc.get_ephys_data(cells[0]['id'])

# read the reconstruction, download if necessary
reconstruction = ctc.get_reconstruction(cells[0]['id'])

#===============================================================================
# example 2
#===============================================================================

from allensdk.core.cell_types_cache import CellTypesCache
from allensdk.ephys.extract_cell_features import extract_cell_features
from collections import defaultdict

# initialize the cache
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

# pick a cell to analyze
specimen_id = 324257146
示例#20
0
from allensdk.core.cell_types_cache import CellTypesCache

ctc = CellTypesCache()

# a list of cell metadata for cells with reconstructions, download if necessary
cells = ctc.get_cells(require_reconstruction=True)

# open the electrophysiology data of one cell, download if necessary
data_set = ctc.get_ephys_data(cells[0]['id'])

# read the reconstruction, download if necessary
reconstruction = ctc.get_reconstruction(cells[0]['id'])