コード例 #1
0
def hh_compartment_with_channels():
    filename = 'hh_compartment_with_channels.h5'
    compartment = nsdf.ModelComponent('compartment', uid=uuid1().hex)
    na_channel = nsdf.ModelComponent('NaChannel', uid=uuid1().hex,
                                     parent=compartment)
    k_channel =  nsdf.ModelComponent('KChannel', uid=uuid1().hex,
                                     parent=compartment)
    writer = nsdf.NSDFWriter(filename, mode='w')
    writer.add_modeltree(compartment)
    source_ds = writer.add_static_ds(compartment.name, [compartment.uid])
    # Add membrane resitance
    data_obj = nsdf.StaticData('Rm', unit='Mohm')
    data_obj.put_data(compartment.uid, 1e3)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane capacitance
    data_obj = nsdf.StaticData('Cm', unit='pF')
    data_obj.put_data(compartment.uid, 0.9)
    writer.add_static_data(source_ds, data_obj)
    # Add leak reversal potential
    data_obj = nsdf.StaticData('Em', unit='mV')
    data_obj.put_data(compartment.uid, -65.0)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane potential
    source_ds = writer.add_uniform_ds(compartment.name, [compartment.uid])
    data_obj = nsdf.UniformData('Vm', unit='mV')
    data_obj.put_data(compartment.uid,
                      np.random.uniform(low=-67.0, high=-63.0, size=100))
    data_obj.set_dt(0.1, unit='ms')
    writer.add_uniform_data(source_ds, data_obj)
    source_ds = writer.add_uniform_ds('channel', [na_channel.uid,
                                                  k_channel.uid])
    data_obj = nsdf.UniformData('Ik', unit='nA', dt=0.1, tunit='ms')
    data_obj.update_source_data_dict([(na_channel.uid, ina),
                                      (k_channel.uid, ik)])
    writer.add_uniform_data(source_ds, data_obj)
コード例 #2
0
def example():
    directory = os.path.dirname(granule.__file__)
    current = os.getcwd()
    os.chdir(directory)
    start_time = datetime.now()    
    granule.loadGran98NeuroML_L123(granule.filename)
    end_time = datetime.now()
    tvec = np.arange(0.0, granule.runtime, granule.plotdt)
    soma_path = '/cells[0]/Gran_0[0]/Soma_0[0]'
    ca = moose.element('{}/Gran_CaPool_98/data/somaCa'.format(soma_path))
    vm = moose.element('{}/data[0]/somaVm[0]'.format(soma_path))
    os.chdir(current)
    writer = nsdf.NSDFWriter('granulecell.h5', mode='w', compression='gzip')
    writer.add_model_filecontents([directory])
    ca_data = nsdf.UniformData('Ca', unit='mM', dt=granule.plotdt, tunit='s')
    ca_data.put_data(soma_path, ca.vector)
    source_ds = writer.add_uniform_ds('GranuleSoma', [soma_path])
    writer.add_uniform_data(source_ds, ca_data)
    vm_data = nsdf.UniformData('Vm', unit='V', dt=granule.plotdt, tunit='s')
    vm_data.put_data(soma_path, vm.vector)
    writer.add_uniform_data(source_ds, vm_data)
    writer.title = 'Sample NSDF file for olfactory bulb granule cell model'
    writer.description = 'This file stores the entire model'    \
                         ' directory in `/model/filecontent`'
    writer.tstart = start_time
    writer.tend = end_time
    writer.creator = [os.environ['USER']]
    writer.contributor = ['Subhasis Ray', 'Aditya Gilra']
    writer.license = 'CC BY-SA'
    writer.software = ['Python2.7', 'moose', 'nsdf python library']
    writer.method = ['exponential Euler']
    print 'Finished writing example NSDF file for GranuleCell demo'
コード例 #3
0
def save_NSDF(cPlotDt, ePlotDt, voxel_val_dict, vox_info):  #added by Chaitanya
    sys.path.append('../..')
    import nsdf
    chem_sources = []
    writer = nsdf.NSDFWriter('moose_multi.h5', mode='w')
    data_obj = nsdf.UniformData('conc', unit='mM')
    for x in moose.wildcardFind('/graphs/chem/#[ISA=Table]'):
        chem_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    chem_source_ds = writer.add_uniform_ds('chem', chem_sources)
    data_obj.set_dt(cPlotDt, unit='s')
    writer.add_uniform_data(chem_source_ds, data_obj)

    data_obj = nsdf.UniformData('Vm', unit='V')
    elec_sources = []
    for x in moose.wildcardFind('/graphs/elec/#[ISA=Table]'):
        elec_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    elec_source_ds = writer.add_uniform_ds('elec', elec_sources)
    data_obj.set_dt(ePlotDt, unit='s')
    writer.add_uniform_data(elec_source_ds, data_obj)

    data_obj = nsdf.UniformData('[Ca]', unit='mM')
    ca_sources = []
    for x in moose.wildcardFind('/graphs/ca/#[ISA=Table]'):
        ca_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    ca_source_ds = writer.add_uniform_ds('Ca', ca_sources)
    data_obj.set_dt(ePlotDt, unit='s')
    writer.add_uniform_data(ca_source_ds, data_obj)

    h5 = writer._fd  #Falling back to using native h5py operations. Multidimensional uniform dataset.
    ds = h5.create_dataset('/data/uniform/chem/voxel',
                           dtype=numpy.float32,
                           shape=(vox_info[0], vox_info[1],
                                  len(voxel_val_dict)))
    idx = 0
    label_list = []
    for ii, jj in voxel_val_dict.iteritems():
        ds[:, :, idx] = jj
        label_list.append(ii)
        idx += 1
    label_ds = h5.create_dataset('/map/uniform/spine_vox', data=label_list)
    voxel_ds = h5.create_dataset('/map/uniform/vox_number',
                                 data=range(vox_info[0]))
    tie_data_map(ds, label_ds, 'source', axis=2)
    tie_data_map(ds, voxel_ds, 'voxel_number', axis=0)
    ds.attrs.create('dt', data=vox_info[2])
    ds.attrs.create('field', data='conc')
    ds.attrs.create('tstart', data=0.0)
    ds.attrs.create('unit', data='mM')
    ds.attrs.create('timeunit', data='s')
コード例 #4
0
ファイル: test_nsdfwriter.py プロジェクト: nsdf/nsdf
 def setUp(self):
     self.mdict = create_ob_model_tree()
     # self.mdict['model_tree'].print_tree()
     self.filepath = '{}.h5'.format(self.id())
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='w',
                              dialect=nsdf.dialect.ONED)
     writer.add_modeltree(self.mdict['model_tree'])
     # print '######## Model Tree ################'
     # writer.modelroot.print_tree()
     # print '========================'
     self.granule_somata = []
     self.popname = 'pop0'
     for cell in self.mdict['granule_cells']:
         for name, comp in list(cell.children.items()):
             if name == 'gc_0':
                 self.granule_somata.append(comp.uid)
     uds = writer.add_uniform_ds(self.popname, self.granule_somata)
     self.data_object = nsdf.UniformData('Vm', unit='mV', field='Vm')
     self.dlen = 5
     for uid in self.granule_somata:
         self.data_object.put_data(
             uid, np.random.uniform(-65, -55, size=self.dlen))
     self.data_object.set_dt(1e-4, 's')
     self.tstart = 0.0
     data = writer.add_uniform_data(uds,
                                    self.data_object,
                                    tstart=self.tstart)
コード例 #5
0
def create_example():
    # First create the model tree
    model = nsdf.ModelComponent('LeisureCenter', uid=uuid1().hex)
    poolroom = nsdf.ModelComponent('PoolRoom', uid=uuid1().hex, parent=model)
    tables = []
    balls = []
    for ii in range(2):
        tables.append(
            nsdf.ModelComponent('table_{}'.format(ii),
                                uid=uuid1().hex,
                                parent=poolroom))
        for jj in range(3):
            balls.append(
                nsdf.ModelComponent('ball_{}'.format(jj),
                                    uid=uuid1().hex,
                                    parent=tables[-1]))
    id_path_dict = model.get_id_path_dict()
    path_id_dict = {value: key for key, value in id_path_dict.items()}

    # Create the NSDF writer object
    writer = nsdf.NSDFWriter('poolroom.h5', mode='w')
    writer.add_modeltree(model)
    dataobj = nsdf.StaticData('area', unit='m^2')
    dataobj.put_data(poolroom.uid, [100.0])
    source_ds = writer.add_static_ds('rooms', [poolroom.uid])
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_static_ds('tables', [tab.uid for tab in tables])
    dataobj = nsdf.StaticData('height', unit='m')
    for tab in tables:
        dataobj.put_data(tab.uid, 1.2)
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_nonuniform_ds_1d('tables', 'players',
                                            [tab.uid for tab in tables])
    dataobj = nsdf.NonuniformData('players',
                                  unit='item',
                                  tunit='hour',
                                  dtype=np.int32)
    for tab in tables:
        times = np.cumsum(np.random.exponential(1 / 10.0, size=10))
        dataobj.put_data(tab.uid, (np.random.randint(10, size=10), times))
    writer.add_nonuniform_1d(source_ds, dataobj,
                             {tab.uid: tab.name
                              for tab in tables})
    source_ds = writer.add_uniform_ds('balls', [ball.uid for ball in balls])
    dataobj = nsdf.UniformData('x', unit='cm')
    for ball in balls:
        dataobj.put_data(ball.uid, np.random.rand(10) * 10)
    dataobj.set_dt(1e-2, 's')
    writer.add_uniform_data(source_ds, dataobj)

    source_ds = writer.add_event_ds_1d('balls', 'hit',
                                       [ball.uid for ball in balls])
    dataobj = nsdf.EventData('hit', unit='s')
    source_name_dict = {}
    for ball in balls:
        source_name_dict[ball.uid] = '{}_{}'.format(ball.name, ball.uid)
        data = np.cumsum(np.random.exponential(1 / 100.0, size=10))
        dataobj.put_data(ball.uid, data)
    writer.add_event_1d(source_ds, dataobj, source_name_dict)
コード例 #6
0
def benchmark_write_oned_incremental(**kwargs):
    compression = kwargs.get('compression', '')
    prefix = DATAFILE.split('.')[0]
    filename = '{}_ONED_{}_incr.h5'.format(prefix, compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             dialect=nsdf.dialect.ONED,
                             mode='w',
                             **kwargs)
    cont_src = writer.add_uniform_ds('continuous_recorded',
                                     ca_data.get_sources())
    spike_sources = writer.add_event_ds_1d('all_cells', 'spike',
                                           spike_data.get_sources())
    for ii in range(
            0,
            int(DATA['simtime'] / DATA['dt'] + 0.5) + INCREMENTAL_STEP / 2,
            INCREMENTAL_STEP):
        ca_data_tmp = nsdf.UniformData(ca_data.name,
                                       unit=ca_data.unit,
                                       dt=ca_data.dt,
                                       tunit=ca_data.tunit)
        for src, data in ca_data.get_source_data_dict().items():
            ca_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, ca_data_tmp)
        Vm_data_tmp = nsdf.UniformData(Vm_data.name,
                                       unit=Vm_data.unit,
                                       dt=Vm_data.dt,
                                       tunit=Vm_data.tunit)
        for src, data in Vm_data.get_source_data_dict().items():
            Vm_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, Vm_data_tmp)
        tstart = ii * Vm_data.dt
        tend = (ii + INCREMENTAL_STEP) * Vm_data.dt
        spike_data_tmp = nsdf.EventData(spike_data.name,
                                        unit=spike_data.unit,
                                        dtype=spike_data.dtype)
        for src, data in spike_data.get_source_data_dict().items():
            spike_data_tmp.put_data(src,
                                    data[(data >= tstart) & (data < tend)])
        writer.add_event_1d(spike_sources, spike_data_tmp)
コード例 #7
0
ファイル: nsdfio.py プロジェクト: gilad4591/malDetect
    def _write_signal_data(self, model, channels, r_signal, signal, source_ds, writer):
        dataobj = nsdf.UniformData('signal', unit=str(signal.units.dimensionality))
        dataobj.dtype = signal.dtype
        for i in range(len(channels)):
            dataobj.put_data(channels[i].uid, r_signal[i])

        dataobj.set_dt(float(signal.sampling_period.magnitude),
                       str(signal.sampling_period.dimensionality))

        rescaled_tstart = signal.t_start.rescale(signal.sampling_period.dimensionality)
        writer.add_uniform_data(source_ds, dataobj,
                                tstart=float(rescaled_tstart.magnitude))
        model.attrs['t_start_unit'] = str(signal.t_start.dimensionality)
コード例 #8
0
ファイル: nsdfio.py プロジェクト: dmthvlngm/python-neo
    def _write_signal_data(self, model, channels, r_signal, signal, source_ds,
                           writer):
        dataobj = nsdf.UniformData('signal',
                                   unit=str(signal.units.dimensionality))
        dataobj.dtype = signal.dtype
        for i in range(len(channels)):
            dataobj.put_data(channels[i].uid, r_signal[i])

        dataobj.set_dt(float(signal.sampling_period.magnitude),
                       str(signal.sampling_period.dimensionality))

        self._write_analogsignal_t_start(dataobj, model, signal, source_ds,
                                         writer)
コード例 #9
0
ファイル: traub2005.py プロジェクト: nsdf/nsdf
def create_example(dialect=nsdf.dialect.ONED, simtime=10.0, dt=1e-4):
    """Create a sample NSDF file using the specified dialect.

    The file stores the model tree (down to single compartments) and

    spike times from all the cells, categorized into populations by
    celltype.
    
    Vm for 10% of the cells of each type as uniformly sampled data.

    """
    start_time = datetime.now()
    writer = nsdf.NSDFWriter('traub_et_al_2005_{}.h5'.format(dialect),
                             mode='w',
                             dialect=dialect)
    print('Start add_modeltree')
    writer.add_modeltree(model)
    print('End add_modeltree')
    for celltype, cell_list in list(model.cells.items()):
        event_sources = [cell.compartments[cell.presyn - 1]     \
                         for cell in cell_list]
        event_data = nsdf.EventData('spiketime', unit='s', dtype=FLOATDTYPE)
        for src in event_sources:
            num_spikes = np.random.randint(EVENT_MAX)
            times = np.cumsum(np.random.exponential(1 / 10.0, size=num_spikes))
            times = times[times < simtime].copy()
            event_data.put_data(src.uid, times)
        if dialect in (nsdf.dialect.ONED, nsdf.dialect.NUREGULAR):
            event_ds = writer.add_event_ds_1d(celltype, 'spiketime',
                                              event_data.get_sources())
            writer.add_event_1d(event_ds, event_data)
        else:
            event_ds = writer.add_event_ds(celltype, event_data.get_sources())
            dialect_eventwriter_map[dialect](writer, event_ds, event_data)
        vm_sources = random.sample(event_sources, len(event_sources) // 10)
        vm_data = nsdf.UniformData('Vm', unit='V', field='Vm')
        vm_data.set_dt(dt, unit='s')
        for src in vm_sources:
            vm = np.random.uniform(-120e-3, 40e-3, size=int(simtime // dt))
            vm_data.put_data(src.uid, vm)
        vm_ds = writer.add_uniform_ds(celltype, vm_data.get_sources())
        writer.add_uniform_data(vm_ds, vm_data)
    end_time = datetime.now()
    writer.title = 'Sample NSDF file for Traub et al 2005 model'
    writer.description = 'This file uses {} dialect of NSDF'.format(dialect)
    writer.tstart = start_time
    writer.tend = end_time
    writer.creator = [os.environ['USER']]
    writer.license = 'CC BY-SA'
    writer.software = ['Python2.7', 'nsdf python library']
    print('Finished writing example NSDF file for dialect {}'.format(dialect))
コード例 #10
0
def hh_vm():
    model = nsdf.ModelComponent('compartment')
    data_container = nsdf.UniformData('Vm', unit='mV', dt=0.1, tunit='ms')
    data_container.put_data(model.uid, vm_array)                            
    writer = nsdf.NSDFWriter('hh_vm.h5', mode='w')
    writer.set_properties({'title': 'Example Vm recording',
                           'creator': 'user',
                           'software': ['python2.7', 'python-nsdf-0.1'],
                           'method': ['np.random.rand'],
                           'description': 'Randomly generated Vm for a single' \
                                          'compartment',
                           'rights': 'CC-BY-SA',
                           'tstart': datetime.now(),
                           'tend': (datetime.now() + timedelta(seconds=3)),
                           'contributor': ['Chaitanya Chintaluri',
                                           'Daniel Wocjcik',
                                           'Upinder Bhalla']})
                           
    writer.add_modeltree(model)
    source_ds = writer.add_uniform_ds('compartment_population', [model.uid])
    writer.add_uniform_data(source_ds, data_container)
コード例 #11
0
def hh_compartment():
    filename = 'hh_compartment.h5'
    model = nsdf.ModelComponent('compartment', uid=uuid1().hex)
    writer = nsdf.NSDFWriter(filename, mode='w')
    writer.add_modeltree(model)
    source_ds = writer.add_static_ds(model.name, [model.uid])
    # Add membrane resitance
    data_obj = nsdf.StaticData('Rm', unit='Mohm')
    data_obj.put_data(model.uid, 1e3)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane capacitance
    data_obj = nsdf.StaticData('Cm', unit='pF')
    data_obj.put_data(model.uid, 0.9)
    writer.add_static_data(source_ds, data_obj)
    # Add leak reversal potential
    data_obj = nsdf.StaticData('Em', unit='mV')
    data_obj.put_data(model.uid, -65.0)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane potential
    source_ds = writer.add_uniform_ds(model.name, [model.uid])
    data_obj = nsdf.UniformData('Vm', unit='mV', dt=0.1, tunit='ms')
    data_obj.put_data(model.uid, vm_array)
    writer.add_uniform_data(source_ds, data_obj)
コード例 #12
0
ファイル: test_nsdfreader.py プロジェクト: nsdf/nsdf
def create_test_data_file(filename, dialect):
    """Create a datafile at path `filename` using dialect `dialect`. 

    """
    tstart = datetime.now()
    mdict = create_ob_model_tree()
    uniform_data = nsdf.UniformData('Vm',
                                    unit='mV',
                                    field='Vm',
                                    dt=1e-2,
                                    tunit='ms')
    for cell in mdict['granule_cells']:
        uniform_data.put_data(cell.children['gc_0'].uid,
                              np.random.uniform(-63, -57, 100))

    if dialect == nsdf.dialect.NUREGULAR:
        size = 150
        nonuniform_data = nsdf.NonuniformRegularData('Im',
                                                     unit='pA',
                                                     field='Im',
                                                     tunit='ms')
        nonuniform_data.set_times(np.cumsum(np.random.rand(size)))
        for ii, cell in enumerate(mdict['mitral_cells']):
            nonuniform_data.put_data(cell.children['mc_0'].uid,
                                     np.random.rand(size))
    else:
        nonuniform_data = nsdf.NonuniformData('Im',
                                              unit='pA',
                                              field='Im',
                                              tunit='ms',
                                              dtype=np.float32)
        sizes = 150 + np.random.randint(-50, 50, len(mdict['mitral_cells']))
        for ii, cell in enumerate(mdict['mitral_cells']):
            data = np.random.rand(sizes[ii])
            times = np.cumsum(np.random.rand(sizes[ii]))
            assert len(data) == len(times)
            nonuniform_data.put_data(cell.children['mc_0'].uid, (data, times))
    sizes = 200 + np.random.randint(-50, 50, len(mdict['cells']))
    event_data = nsdf.EventData('spike', unit='ms', dtype=np.float32)
    for ii, cell in enumerate(mdict['cells']):
        times = np.cumsum(np.random.exponential(scale=0.01, size=sizes[ii]))
        event_data.put_data(cell.uid, times)
    writer = nsdf.NSDFWriter(filename, dialect=dialect, mode='w')
    writer.add_modeltree(mdict['model_tree'])
    uniform_ds = writer.add_uniform_ds('granule', uniform_data.get_sources())
    writer.add_uniform_data(uniform_ds, uniform_data)

    if dialect == nsdf.dialect.ONED:
        nonuniform_ds = writer.add_nonuniform_ds_1d(
            'mitral', 'Im', nonuniform_data.get_sources())
    else:
        nonuniform_ds = writer.add_nonuniform_ds('mitral',
                                                 nonuniform_data.get_sources())
    if (dialect == nsdf.dialect.ONED) or (dialect == nsdf.dialect.NUREGULAR):
        event_ds = writer.add_event_ds_1d('cells', 'spike',
                                          event_data.get_sources())
    else:
        event_ds = writer.add_event_ds('cells', event_data.get_sources())
    if dialect == nsdf.dialect.ONED:
        writer.add_nonuniform_1d(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.NUREGULAR:
        writer.add_nonuniform_regular(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.VLEN:
        writer.add_nonuniform_vlen(nonuniform_ds, nonuniform_data)
        writer.add_event_vlen(event_ds, event_data)
    elif dialect == nsdf.dialect.NANPADDED:
        writer.add_nonuniform_nan(nonuniform_ds, nonuniform_data)
        writer.add_event_nan(event_ds, event_data)
    else:
        raise Exception('unknown dialect: {}'.format(dialect))
    tend = datetime.now()
    description = 'Testing nsdf reader'
    title = 'NSDFReader.test'
    creator = ['Subhasis Ray']
    contributor = ['Chaitanya Chintaluri', 'Daniel Wojcik', 'Upinder Bhalla']
    software = ['Python']
    method = ['manual']
    license = 'CC-BY-SA'
    rights = 'Subhasis Ray, 2014'
    writer.tstart = tstart
    writer.tend = tend
    writer.title = title
    writer.creator = creator
    writer.description = description
    writer.contributor = contributor
    writer.software = software
    writer.method = method
    writer.rights = rights
    writer.license = license
    return {
        'uniform_data': uniform_data,
        'nonuniform_data': nonuniform_data,
        'event_data': event_data,
        'title': title,
        'creator': creator,
        'contributor': contributor,
        'software': software,
        'tstart': tstart,
        'tend': tend,
        'description': description,
        'method': method,
        'rights': rights,
        'license': license,
    }
コード例 #13
0
        morp_dset[:, 'd1'] = loc_array[:, 7]

        morp_dset.attrs.create(
            'unit', ['um', 'um', 'um', 'um', 'um', 'um', 'um', 'um'])
        morp_dset.attrs.create(
            'field', ['x0', 'y0', 'z0', 'd0', 'x1', 'y1', 'z1', 'd1'])

        morp_names = h5.create_dataset('/map/static/Hay', data=loc_names)
        tie_data_map(morp_dset, morp_names, 'source', 0)

    elif dump_type == 2:
        sys.path.append('../../')
        import nsdf
        writer = nsdf.NSDFWriter('hay_currents_nsdf.h5', mode='w')
        curr_source_ds = writer.add_uniform_ds('hay_currs', i_cp_names)
        data_obj_1 = nsdf.UniformData('i', unit='nA')
        data_obj_2 = nsdf.UniformData('i_pas', unit='nA')
        data_obj_3 = nsdf.UniformData('i_cap', unit='nA')
        data_obj_4 = nsdf.UniformData('i_ca', unit='nA')
        data_obj_5 = nsdf.UniformData('i_na', unit='nA')
        data_obj_6 = nsdf.UniformData('i_k', unit='nA')
        data_obj_7 = nsdf.UniformData('i_ih', unit='nA')

        for ii, source in enumerate(i_cp_names):
            data_obj_1.put_data(source, total_hdf5[ii])
            data_obj_2.put_data(source, i_pas_hdf5[ii])
            data_obj_3.put_data(source, i_cap_hdf5[ii])
            data_obj_4.put_data(source, i_ca_hdf5[ii])
            data_obj_5.put_data(source, i_na_hdf5[ii])
            data_obj_6.put_data(source, i_k_hdf5[ii])
            data_obj_7.put_data(source, i_ih_hdf5[ii])
コード例 #14
0
        return {
            'cell_ids': cell_ids,
            'electrode_ids': electrode_ids,
            'spike': spike,
            'ca': ca,
            'Vm': Vm,
            'lfp': lfp,
            'dt': plotdt,  # note that stim has a difrferent dt
            'simtime': simtime
        }


DATA = read_data()
ca_data = nsdf.UniformData('Ca',
                           field='conc',
                           unit='mM',
                           dt=DATA['dt'],
                           tunit='s')
ca_data.update_source_data_dict(DATA['ca'])
Vm_data = nsdf.UniformData('Vm',
                           field='Vm',
                           unit='V',
                           dt=DATA['dt'],
                           tunit='s')
Vm_data.update_source_data_dict(DATA['Vm'])
spike_data = nsdf.EventData('spike',
                            field='spiketime',
                            unit='s',
                            dtype=np.float32)
spike_data.update_source_data_dict(DATA['spike'])
コード例 #15
0
 def setUp(self):
     self.data = nsdf.UniformData('test', unit='m', field='distance')