コード例 #1
0
def allen_id_to_sweeps(specimen_id):
    ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

    specimen_id = int(specimen_id)
    data_set = ctc.get_ephys_data(specimen_id)
    sweeps = ctc.get_ephys_sweeps(specimen_id)
    sweep_numbers = defaultdict(list)
    for sweep in sweeps:
        sweep_numbers[sweep['stimulus_name']].append(sweep['sweep_number'])
    return sweep_numbers,data_set,sweeps
def get_data_sets_from_remote(upper_bound=2, lower_bound=None):
    try:
        with open('all_allen_cells.p', 'rb') as f:
            cells = pickle.load(f)
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

    except:
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

        cells = ctc.get_cells()
        with open('all_allen_cells.p', 'wb') as f:
            pickle.dump(cells, f)
    data = []
    data_sets = []
    path_name = 'data_nwbs'

    try:
        os.mkdir(path_name)
    except:
        print('directory already made.')

    ids = [c['id'] for c in cells]
    if upper_bound == None and lower_bound is None:
        limited_range = ids[0:-1]
    elif upper_bound is not None and lower_bound is not None:
        limited_range = ids[lower_bound:upper_bound]
    cnt = 0
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            cnt += 1
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            with open(temp_path, 'rb') as f:
                (data_set_nwb, sweeps, specimen_id) = pickle.load(f)
            data_sets.append((data_set_nwb, sweeps, specimen_id))
        else:

            data_set = ctc.get_ephys_data(specimen_id)
            sweeps = ctc.get_ephys_sweeps(specimen_id)

            file_name = 'cell_types/specimen_' + str(
                specimen_id) + '/ephys.nwb'
            data_set_nwb = NwbDataSet(file_name)

            data_sets.append((data_set_nwb, sweeps, specimen_id))

            with open(temp_path, 'wb') as f:
                pickle.dump((data_set_nwb, sweeps, specimen_id), f)
    return data_sets
コード例 #3
0
def run_nwb(cell_id, model_type, neuron_config, stim_type='Ramp'):
    """Generates a injection current from nwb sweep file

    Parameters
    ----------
    cell_id : ID of cell speciment
    model_type : LIF, LIF-R, LIF-R-ASC, LIF-ASC, LIF-R-ASC-A
    stumilus_data : stumilus data from NWB file
    """

    # get sweep/stimulus data
    ctc = CellTypesCache()
    ephys_sweeps = ctc.get_ephys_sweeps(cell_id)
    ds = ctc.get_ephys_data(cell_id)
    #ephys_sweep = next( s for s in ephys_sweeps if s['stimulus_name'] == stim_type )
    ephys_sweep_stim = [
        s for s in ephys_sweeps if s['stimulus_name'] == stim_type
    ]
    ephys_sweep = ephys_sweep_stim[0]

    stumilus_data = ds.get_sweep(ephys_sweep['sweep_number'])

    ret = {}

    n_steps = len(stumilus_data['stimulus'])
    dt = 1.0 / stumilus_data['sampling_rate'] * 1.0e03
    amp_times = [t * dt for t in xrange(n_steps)]
    amp_values = stumilus_data['stimulus'].tolist()
    total_time = n_steps * dt

    output = runNestModel(model_type, neuron_config, amp_times, amp_values, dt,
                          total_time)
    ret['nest'] = {
        'times': output[0],
        'voltages': output[1],
        'spike_times': output[2]
    }

    output = runGlifNeuron(neuron_config, amp_values, dt)
    ret['allen'] = {
        'times': output[0],
        'voltages': output[1],
        'spike_times': output[2]
    }
    ret['I'] = amp_values
    ret['dt'] = dt

    return ret
コード例 #4
0
def getExperiment(cell_id, stimtypes):
    ctc = CellTypesCache()
    ephys_sweeps = ctc.get_ephys_sweeps(specimen_id=cell_id)
    sweeps_by_type = defaultdict(list)
    sweeps = []
    for sweep in ephys_sweeps:
        if sweep['stimulus_name'] in stimtypes:
            sweeps.append(sweep['sweep_number'])
            sweeps_by_type[sweep['stimulus_name']].append(
                sweep['sweep_number'])

    ephys_filename = f'{cell_id}/{cell_id}_ephys.nwb'
    ctc.get_ephys_data(cell_id, ephys_filename)
    swc_filename = f'{cell_id}/{cell_id}.swc'
    ctc.get_reconstruction(cell_id, swc_filename)
    return ephys_filename, swc_filename, sweeps, sweeps_by_type
コード例 #5
0
class NwbDataSetTest(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(NwbDataSetTest, self).__init__(*args, **kwargs)

    def testAllDataSets(self):

        manifest_file = '/local1/projects/FHL2015/cell_types/manifest.json'
        if not os.path.exists(manifest_file):
            print "Cannot run this test: manifest does not exist (%s)" % manifest_file
            return True
        
        self.cache = CellTypesCache(manifest_file=manifest_file)
        cells = self.cache.get_cells()

        for cell in cells:
            data_set = self.cache.get_ephys_data(cell['id'])
            sweeps = self.cache.get_ephys_sweeps(cell['id'])

            for sweep in sweeps:
                metadata = data_set.get_sweep_metadata(sweep['sweep_number'])
コード例 #6
0
ファイル: load_stim_resp.py プロジェクト: dyf/nnspike
def load_data(stim_names, reps=10, dur=3000, delay=200):
    ctc = CellTypesCache(manifest_file="ctc/manifest.json")
    cells = ctc.get_cells()

    cell_id = cells[0]['id']
    sweeps = ctc.get_ephys_sweeps(cell_id)
    sweeps = [(sweep['sweep_number'], sweep['stimulus_name'])
              for sweep in sweeps if sweep['stimulus_name'] in stim_names]

    ds = ctc.get_ephys_data(cell_id)

    vv, ii = [], []

    for sn, st in sweeps:
        v, i, t = load_sweep(ds, sn)

        stim_start = np.argwhere(i != 0)[0][0]

        for rep in range(reps):
            idx0 = stim_start - delay - np.random.randint(0, dur // 2)

            vr = v[idx0:]
            ir = i[idx0:]

            if st.startswith('Noise'):
                offs = [0, 200000, 400000]
                for off in offs:
                    vv.append(vr[off:off + dur])
                    ii.append(ir[off:off + dur])
            else:
                vv.append(vr[:dur])
                ii.append(ir[:dur])

    stims = np.vstack(ii)
    resps = np.vstack(vv) + 74.0

    print(stims.shape)

    return stims, resps
コード例 #7
0
import pandas as pd
import os
ctc = CellTypesCache(manifest_file='/mnt/f/allen_cell_type/manifest.json')
path = '/mnt/f/temp/allen/'
os.chdir(path)
basename = "_ephys.nwb"
file_list = pd.read_csv('id.csv', header=None, usecols=[0], skiprows=1)
features_list = [
    'tau', 'input_resistance', 'vm_for_sag', 'fi_fit_slope', 'sag',
    'rheobase_i', 'v_baseline'
]
features = defaultdict(list)
for i in range(0, len(file_list)):
    filename = str(file_list.loc[i, 0]) + basename
    data_set = NwbDataSet(filename)
    sweeps = ctc.get_ephys_sweeps(file_list.loc[i, 0])
    # group the sweeps by stimulus
    sweep_numbers = defaultdict(list)
    for sweep in sweeps:
        sweep_numbers[sweep['stimulus_name']].append(sweep['sweep_number'])

    # calculate features
    cell_features = extract_cell_features(data_set, sweep_numbers['Ramp'],
                                          sweep_numbers['Short Square'],
                                          sweep_numbers['Long Square'])
    cell_features = cell_features['long_squares']  #仅提取Long Squares实验模式下的信息
    features["Id"].append(str(file_list.loc[i, 0]))
    for j in range(0, len(features_list)):
        features[features_list[j]].append(cell_features[features_list[j]])
features = pd.DataFrame(features)
features.to_csv('features1.csv', index=False)  #将提取的数据框存储为csv文件
コード例 #8
0
def CreateDB(specimenList, databaseName, resetDB, manifestFile, 
             host, user, password, verbose):
    
    if verbose:
        print "CreateDB importing..."
        
    import sys
    from allensdk.ephys.extract_cell_features import extract_cell_features
    from allensdk.core.cell_types_cache import CellTypesCache
    from collections import defaultdict
    
    import mysql.connector
    
    import numpy as np
    from numpyconversion import NumpyMySQLConverter
    
    from CellSurveyTableOps import dropTable, createDonorsTable
    from CellSurveyTableOps import createSpecimensTable, createSpecimenFXsTable
    from CellSurveyTableOps import createExperimentsTable, createExperimentFXsTable
    from CellSurveyTableOps import addSpecimen, addExperiment, addDonor
    from CellSurveyTableOps import addExpFX,addSpecFX
    from ABISweepFX import getABIAnalysisPoints, ExtractSweepFeatures
    
    #### Create the database from scratch if required
    if verbose:
        print "Connecting to the database"; 
    
    try: 
        cnx = mysql.connector.connect(user=user, password=password,
                                      host=host, database=databaseName,
                                      converter_class=NumpyMySQLConverter)
        if verbose:
            print "Connection complete"
            
        cursobj = cnx.cursor()
    except:
        cnx = mysql.connector.connect(user=user, password=password, host=host,
                                      converter_class=NumpyMySQLConverter)
        if verbose:
            print cnx
        cursobj = cnx.cursor()
        mycmd = 'create database ' + databaseName
        cursobj.execute(mycmd)
        if verbose:
            print "Database created"
        mycmd = 'use ' + databaseName
        cursobj.execute(mycmd)
        if verbose:
            print "Using database " + databaseName
    
    if resetDB:
        if verbose:
            print "Dropping all tables"
            
        tablenames = ['specimenFXs', 'experimentFXs', 'experiments', 
                      'specimens', 'donors']
        for tablename in tablenames:
            result = dropTable(cnx, tablename)
            if verbose:
                if result:
                    print tablename + " table dropped"
                else:
                    print " There was a problem dropping table " + tablename
    
        # -----
        if verbose:
            print "Creating tables"
       
        result = createDonorsTable(cnx)
        if verbose:
            if result:
                print "Donors Table created"
            else:
                print "There was a problem creating the Donors Table"
    
        result = createSpecimensTable(cnx)
        if verbose:
            if result:
                print "Specimens Table created"
            else:
                print "There was a problem creating the Specimens Table"
    
        result = createExperimentsTable(cnx)
        if verbose:
            if result:
                print "Experiments Table created"
            else:
                print "There was a problem creating the Experiments Table"
    
        result = createSpecimenFXsTable(cnx)
        if verbose:
            if result:
                print "SpecimenFXs Table created"
            else:
                print "There was a problem creating the SpecimenFXs Table"
    
        result = createExperimentFXsTable(cnx)
        if verbose:
            if result:
                print "ExperimentFXs Table created"
            else:
                print "There was a problem creating the ExperimentFXs Table"
    
        
    # ====================================================================
    # Install the ABI Datasets
    if verbose:
        print "Installing the ABI Datasets into the database"; sys.stdout.flush()
        
    # Instantiate the CellTypesCache instance.  
    ctc = CellTypesCache(manifest_file=manifestFile)
    
    # Get metadata on all cells
    cells = ctc.get_cells()
    
    ####### ALL DONORS #######
    # Populate the donors table with all donors of all cells
    if verbose:
        print "Populating donors table"
    
    for cell in cells:
        addDonor(cnx, cell['donor_id'], cell['donor']['sex'], cell['donor']['name'])

        
    ####### ALL EPHYS FEATURES #######
    try:
        # for all cells
        allEphysFeatures = ctc.get_ephys_features()  
    except:
        # If no ephys features, we cannot do anything
        print "No ephys features available; aborting program."
        sys.exit()
            
            
    ####### SPECIMENS #######
    # Get relevant info for each specimen in input list
    if verbose:
        print "Processing each specimen in turn"; sys.stdout.flush()
        
    for specimen in specimenList:
#        if verbose:
        print '@@@@@ Processing specimen:', specimen
        
        try:
            specEphysData = ctc.get_ephys_data(specimen)
        except:
            # If no ephys data, we do not want to bother with it
            print "No ephys data for specimen ", specimen, "; ignoring it."
            continue
    
        ###### SPECIMEN >>> METADATA ######
        # Paw through the cells to find the metadata for the current specimen
        # The cell is a dictionary that has most of the "other" non-sweep stuff
        # we need such as cell averages, rheobase info, transgenic line, hemisphere, 
        # age, sex, graph order, dendrite type, area, has_burst,...
        # May be able to improve this search Pythonically 
        for cell in cells:
            datasets = cell['data_sets']
            for dataset in datasets:
                dsspec = dataset['specimen_id']
                if dsspec == specimen:
                    specCell = cell
                    break
                
        # Add the specimen to the database
        donorID = specCell['donor_id']
        specimenTableIDX = addSpecimen(cnx, donorID, specimen)
    
        ####### SPECIMEN >>> SWEEPS/EXPERIMENTS #######
        # Change these to true if show in any sweep 
        cellHasBursts = False
        cellHasDelays = False
        cellHasPauses = False
        
        # Process each sweep in turn
        sweeps = ctc.get_ephys_sweeps(specimen)
        for sweep in sweeps:
            sweepNum = sweep['sweep_number']
            
#             if verbose:
            msg = ("  Processing sweep_number: " + str(sweepNum) + 
                   "  stimulus: " + str(sweep['stimulus_name']) + 
                   "  num_spikes = " + str(sweep['num_spikes']))
            print msg
    
            # Screen out some sweep types because they are not suitable for our 
            #      simulations or because the stimulus type is not successful 
            #      in use of process_spikes() (which we use for simulations)
            databaseList = ['Long Square', 'Short Square', 'Noise 1', 'Noise 2', 
                            'Square - 2s Suprathreshold', 'Square - 0.5ms Subthreshold',
                            'Short Square - Triple', 'Ramp', 'Ramp to Rheobase']
            if sweep['stimulus_name'] not in databaseList:
                print "    Stimulus type", sweep['stimulus_name'], "not supported."
                continue
    
            # sweepData holds index range, response data vector, sampling_rate, and stimulus vector 
            sweepData = specEphysData.get_sweep(sweepNum)
    
            # sweep_metadata holds aibs_stimulus_amplitude_pa, aibs_stimulus_name,
            #  gain, initial_access_resistance, and seal
            sweep_metadata = specEphysData.get_sweep_metadata(sweepNum)
            samplingRate = sweepData["sampling_rate"] # in Hz
            
            # Need to check if this sweep is actually an experiment
            # [not implemented]
            
            # Add the experiment to the database
            experimentIDX = (#
                addExperiment(cnx, specimenTableIDX, 
                              sweepNum, samplingRate,
                              sweep_metadata['aibs_stimulus_name'],
                              float(sweep_metadata['aibs_stimulus_amplitude_pa'])))

            # Only Long Square is suitable for our simulations
            fxOKList = ['Long Square']
            if sweep['stimulus_name'] not in fxOKList:
                print "    Stimulus type", sweep['stimulus_name'], "entered into database but not supported for feature extractions."
                continue

            ## Create the experiment feature extraction data ## 
            # This approach seen at   
            # http://alleninstitute.github.io/AllenSDK/_static/examples/nb/
            #      cell_types.html#Computing-Electrophysiology-Features
            # index_range[0] is the "experiment" start index. 0 is the "sweep" start index
            indexRange = sweepData["index_range"]
            # For our purposes, we grab the data from the beginning of the sweep 
            #  instead of the beginning of the experiment
            # i = sweepData["stimulus"][indexRange[0]:indexRange[1]+1] # in A
            # v = sweepData["response"][indexRange[0]:indexRange[1]+1] # in V
            i = sweepData["stimulus"][0:indexRange[1]+1] # in A
            v = sweepData["response"][0:indexRange[1]+1] # in V
            i *= 1e12 # to pA
            v *= 1e3 # to mV
            t = np.arange(0, len(v)) * (1.0 / samplingRate) # in seconds
         
            ###### Do the sweep's feature extraction #######
            # Determine the position and length of the analysis window with respect
            # to the beginning of the sweep 
            stimType = sweep_metadata['aibs_stimulus_name']
            analysisPoints = getABIAnalysisPoints(stimType)
            analysis_start = analysisPoints['analysisStart']
            stimulus_start = analysisPoints['stimulusStart']
            analysis_duration = analysisPoints['analysisDuration']
    
            if verbose:
                print ('analysis_start', analysis_start, 'stimulus_start ', 
                       stimulus_start, 'analysis_duration', analysis_duration)
    
            # Trim the analysis to end of experiment if necessary
            if (analysis_start + analysis_duration) * samplingRate >= indexRange[1]:
                end_time = (indexRange[1]-1)/samplingRate
                analysis_duration = end_time - analysis_start
    
            if verbose:
                print ('analysis_start', analysis_start, 'stimulus_start ', 
                       stimulus_start, 'analysis_duration', analysis_duration)
    
            # Now we extract the sweep features from that analysis window
            swFXs = ExtractSweepFeatures(t, v, i, analysis_start, 
                            analysis_duration, stimulus_start, verbose)
            if len(swFXs) == 0:
                print "Skipping experiment: ", specimen, '/', sweepNum, " and continuing..."
                continue
            
            if swFXs['hasBursts']: cellHasBursts = True
            if swFXs['hasPauses']: cellHasPauses = True
            if swFXs['hasDelay']: cellHasDelays = True

            ## Add the feature extraction to the database ##
            expFXs = dict(swFXs)
            # individual spike data not going into the database directly
            if 'spikeData' in expFXs:
                del expFXs['spikeData']
                   
            addExpFX(cnx, experimentIDX, expFXs)
        # end of:  for sweep in sweeps:
    
        ## Assemble the specimen feature extraction data ##
        specimenEphysFeaturesList = [f for f in allEphysFeatures if f['specimen_id'] == specimen]
        specimenEphysFeatures = specimenEphysFeaturesList[0]
         
        data_set = ctc.get_ephys_data(specCell['id'])
        sweeps = ctc.get_ephys_sweeps(specimen)
        sweep_numbers = defaultdict(list)
        for sweep in sweeps:
            sweep_numbers[sweep['stimulus_name']].append(sweep['sweep_number'])
    
        cell_features = (extract_cell_features(data_set, sweep_numbers['Ramp'], 
                    sweep_numbers['Short Square'], sweep_numbers['Long Square']))
        spFXs = {}
        spFXs['hasSpikes']                   = cell_features['long_squares']['spiking_sweeps'] != []
        spFXs['hero_sweep_id']               = cell_features['long_squares']['hero_sweep']['id']
        spFXs['hero_sweep_avg_firing_rate']  = cell_features['long_squares']['hero_sweep']['avg_rate']
        spFXs['hero_sweep_adaptation']       = cell_features['long_squares']['hero_sweep']['adapt']
        spFXs['hero_sweep_first_isi']        = cell_features['long_squares']['hero_sweep']['first_isi']
        spFXs['hero_sweep_mean_isi']         = cell_features['long_squares']['hero_sweep']['mean_isi']
        spFXs['hero_sweep_median_isi']       = cell_features['long_squares']['hero_sweep']['median_isi']
        spFXs['hero_sweep_isi_cv']           = cell_features['long_squares']['hero_sweep']['isi_cv']
        spFXs['hero_sweep_latency']          = cell_features['long_squares']['hero_sweep']['latency']
        spFXs['hero_sweep_stim_amp']         = cell_features['long_squares']['hero_sweep']['stim_amp']
        spFXs['hero_sweep_v_baseline']       = cell_features['long_squares']['hero_sweep']['v_baseline']
        spFXs['dendrite_type']               = specCell['dendrite_type']
        spFXs['electrode_0_pa']              = specimenEphysFeatures['electrode_0_pa']
        spFXs['f_i_curve_slope']             = specimenEphysFeatures['f_i_curve_slope']
        spFXs['fast_trough_t_long_square']   = specimenEphysFeatures['fast_trough_t_long_square']     
        spFXs['fast_trough_t_ramp']          = specimenEphysFeatures['fast_trough_t_ramp']    
        spFXs['fast_trough_t_short_square']  = specimenEphysFeatures['fast_trough_t_short_square']  
        spFXs['fast_trough_v_long_square']   = specimenEphysFeatures['fast_trough_v_long_square']
        spFXs['fast_trough_v_ramp']          = specimenEphysFeatures['fast_trough_v_ramp']    
        spFXs['fast_trough_v_short_square']  = specimenEphysFeatures['fast_trough_v_short_square']
        spFXs['has_bursts']                  = cellHasBursts
        spFXs['has_delays']                  = cellHasDelays    
        spFXs['has_pauses']                  = cellHasPauses
        spFXs['hemisphere']                  = specCell['hemisphere'] 
        spFXs['input_resistance_mohm']       = specimenEphysFeatures['input_resistance_mohm']
        spFXs['peak_t_long_square']          = specimenEphysFeatures['peak_t_long_square']
        spFXs['peak_t_ramp']                 = specimenEphysFeatures['peak_t_ramp']    
        spFXs['peak_t_short_square']         = specimenEphysFeatures['peak_t_short_square']
        spFXs['peak_v_long_square']          = specimenEphysFeatures['peak_v_long_square'] 
        spFXs['peak_v_ramp']                 = specimenEphysFeatures['peak_v_ramp']    
        spFXs['peak_v_short_square']         = specimenEphysFeatures['peak_v_short_square']
        spFXs['reporter_status']             = specCell['reporter_status']
        spFXs['rheobase_current']            = cell_features['long_squares']['rheobase_i'] 
        spFXs['ri']                          = specimenEphysFeatures['ri']
        spFXs['sagFraction']                 = specimenEphysFeatures['sag']
        spFXs['seal_gohm']                   = specimenEphysFeatures['seal_gohm']
        spFXs['slow_trough_t_long_square']   = specimenEphysFeatures['slow_trough_t_long_square']
        spFXs['slow_trough_t_ramp']          = specimenEphysFeatures['slow_trough_t_ramp']           
        spFXs['slow_trough_t_short_square']  = specimenEphysFeatures['slow_trough_t_short_square']
        spFXs['slow_trough_v_long_square']   = specimenEphysFeatures['slow_trough_v_long_square']  
        spFXs['slow_trough_v_ramp']          = specimenEphysFeatures['slow_trough_v_ramp']                
        spFXs['slow_trough_v_short_square']  = specimenEphysFeatures['slow_trough_v_short_square']
        spFXs['structure_acronym']           = specCell['structure']['acronym']  
        spFXs['structure_name']              = specCell['structure']['name']
        spFXs['tau']                         = specimenEphysFeatures['tau']
        spFXs['threshold_i_long_square']     = specimenEphysFeatures['threshold_i_long_square']
        spFXs['threshold_i_ramp']            = specimenEphysFeatures['threshold_i_ramp']              
        spFXs['threshold_i_short_square']    = specimenEphysFeatures['threshold_i_short_square']
        spFXs['threshold_t_long_square']     = specimenEphysFeatures['threshold_t_long_square']  
        spFXs['threshold_t_ramp']            = specimenEphysFeatures['threshold_t_ramp']              
        spFXs['threshold_t_short_square']    = specimenEphysFeatures['threshold_t_short_square']
        spFXs['threshold_v_long_square']     = specimenEphysFeatures['threshold_v_long_square']  
        spFXs['threshold_v_ramp']            = specimenEphysFeatures['threshold_v_ramp']              
        spFXs['threshold_v_short_square']    = specimenEphysFeatures['threshold_v_short_square']
        spFXs['transgenic_line']             = specCell['transgenic_line']
        spFXs['trough_t_long_square']        = specimenEphysFeatures['trough_t_long_square']        
        spFXs['trough_t_ramp']               = specimenEphysFeatures['trough_t_ramp']                 
        spFXs['trough_t_short_square']       = specimenEphysFeatures['trough_t_short_square'] 
        spFXs['trough_v_long_square']        = specimenEphysFeatures['trough_v_long_square']   
        spFXs['trough_v_ramp']               = specimenEphysFeatures['trough_v_ramp']                 
        spFXs['trough_v_short_square']       = specimenEphysFeatures['trough_v_short_square'] 
        spFXs['upstroke_downstroke_ratio_long_square'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_long_square']  
        spFXs['upstroke_downstroke_ratio_ramp'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_ramp']        
        spFXs['upstroke_downstroke_ratio_short_square'] \
                                = specimenEphysFeatures['upstroke_downstroke_ratio_short_square'] 
        spFXs['v_rest']                      = specimenEphysFeatures['vrest']
        spFXs['vm_for_sag']                  = specimenEphysFeatures['vm_for_sag']

        ## Add the specimen feature extraction data to the database ##
        addSpecFX(cnx, specimenTableIDX, spFXs)
    # end of:  for specimen in specimenList
    
    cnx.close()
def allen_to_model_and_features(content):
    data_set, sweeps, specimen_id = content
    sweep_numbers_ = defaultdict(list)
    for sweep in sweeps:
        sweep_numbers_[sweep['stimulus_name']].append(sweep['sweep_number'])
    try:
        sweep_numbers = data_set.get_sweep_numbers()
    except:
        print('erroneous deletion of relevant ephys.nwb file')
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

        data_set = ctc.get_ephys_data(specimen_id)
        sweeps = ctc.get_ephys_sweeps(specimen_id)
        file_name = 'cell_types/specimen_' + str(specimen_id) + '/ephys.nwb'
        data_set_nwb = NwbDataSet(file_name)
        try:
            sweep_numbers = data_set_nwb.get_sweep_numbers()
        except:
            return None

    for sn in sweep_numbers:
        spike_times = data_set.get_spike_times(sn)
        sweep_data = data_set.get_sweep(sn)

    ##
    # cell_features = extract_cell_features(data_set, sweep_numbers_['Ramp'],sweep_numbers_['Short Square'],sweep_numbers_['Long Square'])
    ##
    cell_features = None
    if cell_features is not None:
        spiking_sweeps = cell_features['long_squares']['spiking_sweeps'][0]
        multi_spike_features = cell_features['long_squares']['hero_sweep']
        biophysics = cell_features['long_squares']
        shapes = cell_features['long_squares']['spiking_sweeps'][0]['spikes'][
            0]

    supras = [
        s for s in sweeps
        if s['stimulus_name'] == str('Square - 2s Suprathreshold')
    ]
    if len(supras) == 0:
        return None
    supra_numbers = [s['sweep_number'] for s in supras]

    smallest_multi = 1000
    all_currents = []
    temp_vm = None
    for sn in supra_numbers:
        spike_times = data_set.get_spike_times(sn)
        sweep_data = data_set.get_sweep(sn)

        if len(spike_times) == 1:
            inj_rheobase = np.max(sweep_data['stimulus'])
            temp_vm = sweep_data['response']
            break
        if len(spike_times) < smallest_multi and len(spike_times) > 1:
            smallest_multi = len(spike_times)
            inj_multi_spike = np.max(sweep_data['stimulus'])
            inj_rheobase = inj_multi_spike
            temp_vm = sweep_data['response']

    spike_times = data_set.get_spike_times(supras[-1]['sweep_number'])
    sweep_data = data_set.get_sweep(supras[-1]['sweep_number'])
    sd = sweep_data['stimulus']
    # sampling rate is in Hz
    sampling_rate = sweep_data['sampling_rate']

    inj = AnalogSignal(sd, sampling_rate=sampling_rate * qt.Hz, units=qt.pA)

    indexs = np.where(sd == np.max(sd))[0]

    if temp_vm is None:
        return (None, None, None, None)
    vm = AnalogSignal(temp_vm, sampling_rate=sampling_rate * qt.Hz, units=qt.V)
    sm = models.StaticModel(vm)
    sm.allen = None
    sm.allen = True
    sm.protocol = {}
    sm.protocol['Time_Start'] = inj.times[indexs[0]]
    sm.protocol['Time_End'] = inj.times[indexs[-1]]

    sm.name = specimen_id
    sm.data_set = data_set
    sm.sweeps = sweeps
    sm.inject_square_current = MethodType(inject_square_current, sm)
    sm.get_membrane_potential = MethodType(get_membrane_potential, sm)

    sm.rheobase_current = inj_rheobase
    current = {}
    current['amplitude'] = sm.rheobase_current
    sm.vm_rheobase = sm.inject_square_current(current)

    try:
        import asciiplotlib as apl
        fig = apl.figure()
        fig.plot([float(t) for t in sm.vm30.times],
                 [float(v) for v in sm.vm30],
                 label="data",
                 width=100,
                 height=80)
        fig.show()

        import asciiplotlib as apl
        fig = apl.figure()
        fig.plot([float(t) for t in sm.vm15.times],
                 [float(v) for v in sm.vm15],
                 label="data",
                 width=100,
                 height=80)
        fig.show()
    except:
        pass
    sm.get_spike_count = MethodType(get_spike_count, sm)
    subs = [
        s for s in sweeps
        if s['stimulus_name'] == str('Square - 0.5ms Subthreshold')
    ]
    sub_currents = [(s['stimulus_absolute_amplitude'] * qt.A).rescale(qt.pA)
                    for s in subs]
    if len(sub_currents) == 3:
        sm.druckmann2013_input_resistance_currents = [
            sub_currents[0], sub_currents[1], sub_currents[2]
        ]

    elif len(sub_currents) == 2:

        sm.druckmann2013_input_resistance_currents = [
            sub_currents[0], sub_currents[0], sub_currents[1]
        ]
    elif len(sub_currents) == 1:
        # unfortunately only one inhibitory current available here.
        sm.druckmann2013_input_resistance_currents = [
            sub_currents[0], sub_currents[0], sub_currents[0]
        ]
    try:
        sm.inject_square_current(sub_currents[0])
    except:
        pass
    get_15_30(sm, inj_rheobase)
    everything = (sm, sweep_data, cell_features, vm)
    return everything
コード例 #10
0
#-------------------------------------------------

if species =='mouse':
    structured_data_directory='mouse_struc_data_dir'  #Note that this is accessing data local to this folder not the global 'mouse_data' folder saved one directory up for convenience
elif species=='human':
    structured_data_directory='human_struc_data_dir'
else:
    raise Exception('species not recognized')

# sorting folders into an order (not necessary)
folders=np.sort([os.path.join(structured_data_directory, f) for f in  os.listdir(structured_data_directory)])

def make_the_directory(dir):
    if not os.path.exists(dir):
        os.makedirs(dir)

for folder in folders:
    specimen_id=int(os.path.basename(folder)[:9])
    
    if species =='mouse':
        dir_name=os.path.join(relative_path, 'mouse_nwb/specimen_'+ str(specimen_id))
    elif species=='human':
        dir_name=os.path.join(relative_path, 'human_nwb/specimen_'+ str(specimen_id))
    else:
        raise Exception('species not recognized')    
    
    make_the_directory(dir_name)
    ctc.get_ephys_sweeps(specimen_id,  os.path.join(dir_name, 'ephys_sweeps.json'))
    ctc.get_ephys_data(specimen_id, os.path.join(dir_name, 'ephys.nwb'))

コード例 #11
0
def sdk_nwb_information(specimen_id):
    ctc = CellTypesCache()
    nwb_data_set = ctc.get_ephys_data(specimen_id)
    sweep_info = ctc.get_ephys_sweeps(specimen_id)
    return nwb_data_set.file_name, sweep_info
コード例 #12
0
    else:
        return True
    
# sort the data so that specifying start and end integers works
folders=np.sort([os.path.join(structured_data_directory, f) for f in  os.listdir(structured_data_directory)])

sp_id_to_remove=[]
for ii, specimen_id_directory in enumerate(folders):
    print 'looking at', ii, 'of', len(folders)  #prints the sequential file number being run
    specimen_id=int(os.path.basename(specimen_id_directory)[:9])

    sweeps_file=os.path.join(nwb_directory,'specimen_'+ str(specimen_id), 'ephys_sweeps.json')
    nwb_file=os.path.join(nwb_directory,'specimen_'+ str(specimen_id), 'ephys.nwb')

    # load files
    the_sweeps=ctc.get_ephys_sweeps(specimen_id, sweeps_file)
    nwb=ctc.get_ephys_data(specimen_id, nwb_file) 
    n1_sweeps=get_sweep_num_by_name(the_sweeps, 'Noise 1')
    n2_sweeps=get_sweep_num_by_name(the_sweeps, 'Noise 2')
    
    # check to see if their are at least two noise 1 and noise 2 sweeps in the data nwb file
    if not check_more_than_two_sweeps(n1_sweeps, nwb):
        print specimen_id ,"has less than two noise_1 sweeps"
        logging.warning(str(specimen_id) +" has less than two noise_1 sweeps")
        sp_id_to_remove.append(specimen_id)
        continue
    if not check_more_than_two_sweeps(n2_sweeps, nwb):
        print specimen_id ,"has less than two noise_2 sweeps"
        logging.warning(str(specimen_id) +" has less than two noise_2 sweeps")
        sp_id_to_remove.append(specimen_id)
        continue
コード例 #13
0
ファイル: test_glif.py プロジェクト: iconstartup/AllenSDK
neuronal_model_id = 566302806

# download model metadata
glif_api = GlifApi()
nm = glif_api.get_neuronal_models_by_id([neuronal_model_id])[0]

# download the model configuration file
nc = glif_api.get_neuron_configs([neuronal_model_id])[neuronal_model_id]
neuron_config = glif_api.get_neuron_configs([neuronal_model_id])
json_utilities.write('neuron_config.json', neuron_config)

# download information about the cell
ctc = CellTypesCache()
ctc.get_ephys_data(nm['specimen_id'], file_name='stimulus.nwb')
ctc.get_ephys_sweeps(nm['specimen_id'], file_name='ephys_sweeps.json')
import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('neuron_config.json')['566302806']
neuron = GlifNeuron.from_dict(neuron_config)

# make a short square pulse. stimulus units should be in Amps.
stimulus = [0.0] * 100 + [10e-9] * 100 + [0.0] * 100

# important! set the neuron's dt value for your stimulus in seconds
neuron.dt = 5e-6

# simulate the neuron
コード例 #14
0
ファイル: glif_ex.py プロジェクト: AllenInstitute/AllenSDK
neuronal_model_id = 566302806

# download model metadata
glif_api = GlifApi()
nm = glif_api.get_neuronal_models_by_id([neuronal_model_id])[0]

# download the model configuration file
nc = glif_api.get_neuron_configs([neuronal_model_id])[neuronal_model_id]
neuron_config = glif_api.get_neuron_configs([neuronal_model_id])
json_utilities.write('neuron_config.json', neuron_config)

# download information about the cell
ctc = CellTypesCache()
ctc.get_ephys_data(nm['specimen_id'], file_name='stimulus.nwb')
ctc.get_ephys_sweeps(nm['specimen_id'], file_name='ephys_sweeps.json')

#===============================================================================
# example 2
#===============================================================================

import allensdk.core.json_utilities as json_utilities
from allensdk.model.glif.glif_neuron import GlifNeuron

# initialize the neuron
neuron_config = json_utilities.read('neuron_config.json')['566302806']
neuron = GlifNeuron.from_dict(neuron_config)

# make a short square pulse. stimulus units should be in Amps.
stimulus = [ 0.0 ] * 100 + [ 10e-9 ] * 100 + [ 0.0 ] * 100
コード例 #15
0
specimen_id = '474637203'  #htr3
sub_folder = os.path.join(
    data_path,
    os.listdir(data_path)[np.where(
        [specimen_id in fname for fname in os.listdir(data_path)])[0][0]])
file = get_file_path_endswith(sub_folder, '_preprocessor_values.json')
neuron_dict = ju.read(file)
R_NO_asc = neuron_dict['resistance']['R_test_list']['mean']
R_asc = neuron_dict['resistance']['R_fit_ASC_and_R']['mean']
C = neuron_dict['capacitance']['C_test_list']['mean']
El = neuron_dict['El']['El_noise']['measured']['mean']

# get the sweeps
dir_name = os.path.join(relative_path, 'mouse_nwb/specimen_' + specimen_id)
the_sweeps = ctc.get_ephys_sweeps(int(specimen_id),
                                  os.path.join(dir_name, 'ephys_sweeps.json'))
noise1_sweeps = get_sweep_num_by_name(the_sweeps, 'Noise 1')

# put data in the format required for functions below
n1_s1_data = ctc.get_ephys_data(int(specimen_id),
                                os.path.join(dir_name, 'ephys.nwb')).get_sweep(
                                    noise1_sweeps[0])
sr = n1_s1_data['sampling_rate']
dt = 1. / sr
suthresh_i = n1_s1_data['stimulus'][n1_s1_data['index_range'][0] +
                                    int(1.2 / dt):int(3. / dt)]
suthresh_v = []
for s in noise1_sweeps:
    data = ctc.get_ephys_data(int(specimen_id),
                              os.path.join(dir_name, 'ephys.nwb')).get_sweep(s)
    suthresh_v.append(data['response'][data['index_range'][0] +
    if specimen_id == '580895033':
        # note that one could copy the data from the ephys_sweeps.json file from the archive
        # at http://download.alleninstitute.org/informatics-archive/september-2017/mouse_cell_types/glif/
        # to a directory named 'cell_data' (where the other data is automatically downloaded if one
        # is reprocessing data from the Allen Institute Cell Types Database) and then comment
        # in the relevant line below to get the values. However, here I save you from that necessity.
        #        all_sweeps=ctc.get_ephys_sweeps(580895033, file_name=os.path.join(relative_path,'mouse_nwb/specimen_580895033/ephys_sweeps.json'))
        #        num_of_spikes=np.mean([s['num_spikes'] for s in all_sweeps if s['stimulus_name'] == 'Noise 1' ])
        #        recip=1./num_of_spikes
        num_of_spikes = 295.333333333
        recip = 0.0033860045146726866
    else:
        sweeps_file = os.path.join(nwb_directory,
                                   'specimen_' + str(specimen_id),
                                   'ephys_sweeps.json')
        all_sweeps = ctc.get_ephys_sweeps(int(specimen_id), sweeps_file)
        num_of_spikes = np.mean([
            s['num_spikes'] for s in all_sweeps
            if s['stimulus_name'] == 'Noise 1'
        ])
        recip = 1. / num_of_spikes
    reciprocal_num_sp_list.append(recip)

    ev_LIFASC = get_ev_percent_from_calculated_file(
        'GLIF3_exp_var_ratio_10ms.json', folder, 'after_opt', 'noise_2')
    ev_LIFASC_list.append(ev_LIFASC)
    all_neurons.append(
        [specimen_id, cre, std_err, length, num_of_spikes, recip, ev_LIFASC])

# create input for multiple linear regression
matrix = [std_error_list, spike_length_list, reciprocal_num_sp_list]
#------------SPECIFY WHETHER THE CODE IS BEING RUN INSIDE THE INSTITUTE---------------------------
#------------------------------------------------------------------------------------------------

where_running = 'external'
#where_running='internal'

# load data out of configuration files
data_path = os.path.join(relative_path, 'create_data_dir/human_data')
folders = np.sort([os.path.join(data_path, f) for f in os.listdir(data_path)])

all_neurons = []
for ii, folder in enumerate(folders):
    # sort the data so that specifying start and end integers works
    specimen_id = int(os.path.basename(folder)[:9])
    cre = os.path.basename(folder)[10:]
    the_sweeps = ctc.get_ephys_sweeps(specimen_id)
    noise1_sweeps = get_sweep_num_by_name(the_sweeps, 'Noise 1')
    noise2_sweeps = get_sweep_num_by_name(the_sweeps, 'Noise 2')
    pairs = [['_GLIF1_neuron_config.json', '(LIF)'],
             ['_GLIF2_neuron_config.json', '(LIF-R)'],
             ['_GLIF3_neuron_config.json', '(LIF-ASC)'],
             ['_GLIF4_neuron_config.json', '(LIF-R-ASC)'],
             ['_GLIF5_neuron_config.json', '(LIF-R-ASC-A)']]
    for pair in pairs:  #TODO: update this to use new function or check files
        glif_spike_ind_n1 = get_model_spike_times_from_nwb(
            pair[0], folder, pair[1], noise1_sweeps, where_running)
        if check_spike_times_identical(glif_spike_ind_n1):
            pass
        else:
            print ii, specimen_id, 'has unmatching noise 1 spike times'
            print glif_spike_ind_n1
コード例 #18
0
# example 2
#===============================================================================

from allensdk.core.cell_types_cache import CellTypesCache
from allensdk.ephys.extract_cell_features import extract_cell_features
from collections import defaultdict

# initialize the cache
ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

# pick a cell to analyze
specimen_id = 324257146

# download the ephys data and sweep metadata
data_set = ctc.get_ephys_data(specimen_id)
sweeps = ctc.get_ephys_sweeps(specimen_id)

# group the sweeps by stimulus 
sweep_numbers = defaultdict(list)
for sweep in sweeps:
    sweep_numbers[sweep['stimulus_name']].append(sweep['sweep_number'])

# calculate features
cell_features = extract_cell_features(data_set,
                                      sweep_numbers['Ramp'],
                                      sweep_numbers['Short Square'],
                                      sweep_numbers['Long Square'])

#===============================================================================
# example 3
#===============================================================================
コード例 #19
0
LIFRASC_Htr3a = plt.subplot2grid((10, 2), (6, 0), rowspan=2)
LIFRASCAT_Htr3a = plt.subplot2grid((10, 2), (8, 0), rowspan=2)

current2 = plt.subplot2grid((10, 2), (0, 1))
data2 = plt.subplot2grid((10, 2), (1, 1))
LIF_Ctgf = plt.subplot2grid((10, 2), (2, 1))
LIFR_Ctgf = plt.subplot2grid((10, 2), (3, 1))
LIFASC_Ctgf = plt.subplot2grid((10, 2), (4, 1), rowspan=2)
LIFRASC_Ctgf = plt.subplot2grid((10, 2), (6, 1), rowspan=2)
LIFRASCAT_Ctgf = plt.subplot2grid((10, 2), (8, 1), rowspan=2)
x_lim = [18, 18.3]

##---474637203Htr3a------

dir_name = os.path.join(relative_path, 'mouse_nwb/specimen_' + str(474637203))
the_sweeps = ctc.get_ephys_sweeps(474637203,
                                  os.path.join(dir_name, 'ephys_sweeps.json'))
nwb = ctc.get_ephys_data(474637203, os.path.join(dir_name, 'ephys.json'))
sweeps = get_sweep_num_by_name(the_sweeps, 'Noise 2')
data = []
spike_times = []
for s in sweeps:
    spike_times.append(nwb.get_spike_times(s))
    data.append(nwb.get_sweep(s))

print 'loading LIF'
LIF_model = pickle.load(
    open("pkl_data/474637203Htr3a-Cre_NO152_LIF_model.pkl", "rb"))
print 'loading LIFR'
LIFR_model = pickle.load(
    open("pkl_data/474637203Htr3a-Cre_NO152_LIFR_model.pkl", "rb"))
print 'loading LIFASC'