예제 #1
0
 def test_append_data(self):
     """Try appending data to existing NaN-padded event dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='a',
                              dialect=nsdf.dialect.NANPADDED)
     source_ds = writer.mapping[nsdf.EVENT][self.popname]
     rate = 100.0
     new_dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, cell in enumerate(self.mdict['mitral_cells']):
         uid = cell.uid
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=new_dlen[ii]))
         self.data_object.put_data(uid, data)
     writer.add_event_nan(source_ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         dataset = fd['/data'][nsdf.EVENT][self.popname][self.varname]
         for ii, cell in enumerate(self.mdict['mitral_cells']):
             uid = cell.uid
             orig_data = self.data_object.get_data(uid)
             file_data = dataset[ii,
                                 self.dlen[ii]:self.dlen[ii] + new_dlen[ii]]
             nptest.assert_allclose(orig_data, file_data)
             nptest.assert_allclose(dataset[self.dlen[ii] + new_dlen[ii]:],
                                    np.nan)
     os.remove(self.filepath)
예제 #2
0
 def setUp(self):
     self.mdict = create_ob_model_tree()
     self.filepath = '{}.h5'.format(self.id())
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='w',
                              dialect=nsdf.dialect.NANPADDED)
     writer.creator = ['Subhasis Ray']
     writer.title = self.id()
     mitral_somata = []
     for cell in self.mdict['mitral_cells']:
         for name, comp in list(cell.children.items()):
             if name == 'mc_0':
                 mitral_somata.append(comp.uid)
     self.sources = mitral_somata
     self.popname = 'pop1'
     ds = writer.add_nonuniform_ds(self.popname, self.sources)
     rate = 100.0
     self.dlen = np.random.poisson(lam=rate, size=len(self.sources))
     self.data_object = nsdf.NonuniformData(name='Vm',
                                            unit='mV',
                                            tunit='ms')
     self.src_name_dict = {}
     for ii, uid in enumerate(mitral_somata):
         data = np.random.uniform(-65, -55, size=self.dlen[ii])
         time = np.random.uniform(0, 1, size=self.dlen[ii])
         self.data_object.put_data(uid, (data, time))
     dd = writer.add_nonuniform_nan(ds, self.data_object)
예제 #3
0
 def setUp(self):
     """Create a poisson spike train for each cell in mitral population and
     save the data as 1D event data"""
     self.mdict = create_ob_model_tree()
     self.filepath = '{}.h5'.format(self.id())
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='w',
                              dialect=nsdf.dialect.NANPADDED)
     writer.title = self.id()
     self.sources = [cell.uid for cell in self.mdict['mitral_cells']]
     self.popname = 'pop1'
     self.field = 'spike'
     self.unit = 's'
     self.varname = 'spike'
     ds = writer.add_event_ds(self.popname, self.sources)
     self.data_object = nsdf.EventData(self.varname,
                                       unit=self.unit,
                                       field=self.field)
     self.src_name_dict = {}
     rate = 100.0
     self.dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, cell in enumerate(self.mdict['mitral_cells']):
         uid = cell.uid
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=self.dlen[ii]))
         self.data_object.put_data(uid, data)
         # this is not required to be cell.name, any valid hdf5
         # name will do
         self.src_name_dict[uid] = cell.name
     dd = writer.add_event_nan(ds, self.data_object)
예제 #4
0
 def test_append_data(self):
     """Try appending data to existing NaN-padded nonuniform dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='a',
                              dialect=nsdf.dialect.NANPADDED)
     source_ds = writer.mapping[nsdf.NONUNIFORM][self.popname]
     self.assertTrue(nsdf.match_datasets(self.sources, source_ds))
     rate = 100.0
     new_dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, uid in enumerate(self.sources):
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=new_dlen[ii]))
         time = np.random.uniform(0, 1, size=new_dlen[ii])
         self.data_object.put_data(uid, (data, time))
     writer.add_nonuniform_nan(source_ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         data_path = '/data/{}/{}/{}'.format(nsdf.NONUNIFORM, self.popname,
                                             self.data_object.name)
         dataset = fd[data_path]
         time_ds = dataset.dims[1]['time']
         for ii, uid in enumerate(self.sources):
             orig_data, orig_time = self.data_object.get_data(uid)
             file_data = dataset[ii,
                                 self.dlen[ii]:self.dlen[ii] + new_dlen[ii]]
             nptest.assert_allclose(orig_data, file_data)
             nptest.assert_allclose(dataset[self.dlen[ii] + new_dlen[ii]:],
                                    np.nan)
             file_time = time_ds[ii,
                                 self.dlen[ii]:self.dlen[ii] + new_dlen[ii]]
             nptest.assert_allclose(orig_time, file_time)
             nptest.assert_allclose(
                 time_ds[ii, self.dlen[ii] + new_dlen[ii]:], np.nan)
     os.remove(self.filepath)
예제 #5
0
def hh_compartment_with_channels():
    filename = 'hh_compartment_with_channels.h5'
    compartment = nsdf.ModelComponent('compartment', uid=uuid1().hex)
    na_channel = nsdf.ModelComponent('NaChannel', uid=uuid1().hex,
                                     parent=compartment)
    k_channel =  nsdf.ModelComponent('KChannel', uid=uuid1().hex,
                                     parent=compartment)
    writer = nsdf.NSDFWriter(filename, mode='w')
    writer.add_modeltree(compartment)
    source_ds = writer.add_static_ds(compartment.name, [compartment.uid])
    # Add membrane resitance
    data_obj = nsdf.StaticData('Rm', unit='Mohm')
    data_obj.put_data(compartment.uid, 1e3)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane capacitance
    data_obj = nsdf.StaticData('Cm', unit='pF')
    data_obj.put_data(compartment.uid, 0.9)
    writer.add_static_data(source_ds, data_obj)
    # Add leak reversal potential
    data_obj = nsdf.StaticData('Em', unit='mV')
    data_obj.put_data(compartment.uid, -65.0)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane potential
    source_ds = writer.add_uniform_ds(compartment.name, [compartment.uid])
    data_obj = nsdf.UniformData('Vm', unit='mV')
    data_obj.put_data(compartment.uid,
                      np.random.uniform(low=-67.0, high=-63.0, size=100))
    data_obj.set_dt(0.1, unit='ms')
    writer.add_uniform_data(source_ds, data_obj)
    source_ds = writer.add_uniform_ds('channel', [na_channel.uid,
                                                  k_channel.uid])
    data_obj = nsdf.UniformData('Ik', unit='nA', dt=0.1, tunit='ms')
    data_obj.update_source_data_dict([(na_channel.uid, ina),
                                      (k_channel.uid, ik)])
    writer.add_uniform_data(source_ds, data_obj)
예제 #6
0
    def setUp(self):
        self.mdict = create_ob_model_tree()
        self.filepath = '{}.h5'.format(self.id())
        writer = nsdf.NSDFWriter(self.filepath,
                                 mode='w',
                                 dialect=nsdf.dialect.NUREGULAR)
        writer.title = self.id()
        mitral_somata = []
        for cell in self.mdict['mitral_cells']:
            for name, comp in list(cell.children.items()):
                if name == 'mc_0':
                    mitral_somata.append(comp.uid)

        self.popname = 'pop1'
        ds = writer.add_nonuniform_ds(self.popname, mitral_somata)
        self.dlen = 1000
        self.data_object = nsdf.NonuniformRegularData(name='Vm',
                                                      unit='mV',
                                                      tunit='ms')
        self.data_object.set_times(np.random.uniform(0, 1, size=self.dlen))
        self.src_name_dict = {}
        for ii, uid in enumerate(mitral_somata):
            data = np.random.uniform(-65, -55, size=self.dlen)
            self.data_object.put_data(uid, data)
        dd = writer.add_nonuniform_regular(ds, self.data_object)
예제 #7
0
    def setUp(self):
        self.mdict = create_ob_model_tree()
        self.filepath = '{}.h5'.format(self.id())
        writer = nsdf.NSDFWriter(self.filepath,
                                 mode='w',
                                 dialect=nsdf.dialect.VLEN)
        writer.title = self.id()
        mitral_somata = []
        for cell in self.mdict['mitral_cells']:
            for name, comp in list(cell.children.items()):
                if name == 'mc_0':
                    mitral_somata.append(comp.uid)

        self.popname = 'pop1'
        self.dlen = np.random.randint(10, 100, size=len(mitral_somata))
        self.field = 'Vm'
        self.unit = 'mV'
        self.tunit = 's'
        self.varname = 'Vm'
        ds = writer.add_nonuniform_ds(self.popname, mitral_somata)
        # FIXME: vlen does not support float64
        self.data_object = nsdf.NonuniformData(self.varname,
                                               unit=self.unit,
                                               field=self.field,
                                               tunit=self.tunit,
                                               dtype=np.float32)
        self.src_name_dict = {}
        for ii, uid in enumerate(mitral_somata):
            data = np.random.uniform(-65, -55, size=self.dlen[ii])
            times = np.cumsum(
                np.random.exponential(scale=0.01, size=self.dlen[ii]))
            self.data_object.put_data(uid, (data, times))
        dd = writer.add_nonuniform_vlen(ds, self.data_object)
예제 #8
0
 def test_append_data(self):
     """Try appending data to existing nonuniform 1d datasets"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath, mode='a')
     ds = writer.mapping[nsdf.NONUNIFORM][self.popname][
         self.data_object.name]
     for uid in ds['source']:
         data = np.random.uniform(-65, -55, size=self.dlen)
         times = np.cumsum(np.random.exponential(scale=0.01,
                                                 size=self.dlen))
         self.data_object.put_data(uid, (data, times))
     data = writer.add_nonuniform_1d(ds, self.data_object,
                                     self.src_name_dict)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         nucontainer = fd['/data'][nsdf.NONUNIFORM]
         data_grp = nucontainer[self.popname][self.data_object.name]
         for dataset_name in data_grp:
             dataset = data_grp[dataset_name]
             srcuid = dataset.attrs['source']
             nptest.assert_allclose(
                 self.data_object.get_data(srcuid)[0], dataset[-self.dlen:])
             ts = dataset.dims[0]['time']
             nptest.assert_allclose(
                 self.data_object.get_data(srcuid)[1], ts[-self.dlen:])
     os.remove(self.filepath)
예제 #9
0
    def setUp(self):
        self.mdict = create_ob_model_tree()
        self.filepath = '{}.h5'.format(self.id())
        writer = nsdf.NSDFWriter(self.filepath,
                                 mode='w',
                                 dialect=nsdf.dialect.ONED)
        writer.title = self.id()
        mitral_somata = []
        for cell in self.mdict['mitral_cells']:
            for name, comp in list(cell.children.items()):
                if name == 'mc_0':
                    mitral_somata.append(comp.uid)

        self.popname = 'pop1'
        self.data_object = nsdf.NonuniformData('Vm',
                                               unit='mV',
                                               field='Vm',
                                               tunit='ms')
        ds = writer.add_nonuniform_ds_1d(self.popname, self.data_object.name,
                                         mitral_somata)
        self.dlen = 1000
        self.src_name_dict = {}
        for ii, uid in enumerate(mitral_somata):
            data = np.random.uniform(-65, -55, size=self.dlen)
            times = np.cumsum(np.random.exponential(scale=0.01,
                                                    size=self.dlen))
            self.data_object.put_data(uid, (data, times))
            self.src_name_dict[uid] = str('vm_{}'.format(ii))
        dd = writer.add_nonuniform_1d(ds, self.data_object, self.src_name_dict)
예제 #10
0
def example():
    directory = os.path.dirname(granule.__file__)
    current = os.getcwd()
    os.chdir(directory)
    start_time = datetime.now()    
    granule.loadGran98NeuroML_L123(granule.filename)
    end_time = datetime.now()
    tvec = np.arange(0.0, granule.runtime, granule.plotdt)
    soma_path = '/cells[0]/Gran_0[0]/Soma_0[0]'
    ca = moose.element('{}/Gran_CaPool_98/data/somaCa'.format(soma_path))
    vm = moose.element('{}/data[0]/somaVm[0]'.format(soma_path))
    os.chdir(current)
    writer = nsdf.NSDFWriter('granulecell.h5', mode='w', compression='gzip')
    writer.add_model_filecontents([directory])
    ca_data = nsdf.UniformData('Ca', unit='mM', dt=granule.plotdt, tunit='s')
    ca_data.put_data(soma_path, ca.vector)
    source_ds = writer.add_uniform_ds('GranuleSoma', [soma_path])
    writer.add_uniform_data(source_ds, ca_data)
    vm_data = nsdf.UniformData('Vm', unit='V', dt=granule.plotdt, tunit='s')
    vm_data.put_data(soma_path, vm.vector)
    writer.add_uniform_data(source_ds, vm_data)
    writer.title = 'Sample NSDF file for olfactory bulb granule cell model'
    writer.description = 'This file stores the entire model'    \
                         ' directory in `/model/filecontent`'
    writer.tstart = start_time
    writer.tend = end_time
    writer.creator = [os.environ['USER']]
    writer.contributor = ['Subhasis Ray', 'Aditya Gilra']
    writer.license = 'CC BY-SA'
    writer.software = ['Python2.7', 'moose', 'nsdf python library']
    writer.method = ['exponential Euler']
    print 'Finished writing example NSDF file for GranuleCell demo'
예제 #11
0
 def setUp(self):
     self.mdict = create_ob_model_tree()
     # self.mdict['model_tree'].print_tree()
     self.filepath = '{}.h5'.format(self.id())
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='w',
                              dialect=nsdf.dialect.ONED)
     writer.add_modeltree(self.mdict['model_tree'])
     # print '######## Model Tree ################'
     # writer.modelroot.print_tree()
     # print '========================'
     self.granule_somata = []
     self.popname = 'pop0'
     for cell in self.mdict['granule_cells']:
         for name, comp in list(cell.children.items()):
             if name == 'gc_0':
                 self.granule_somata.append(comp.uid)
     uds = writer.add_uniform_ds(self.popname, self.granule_somata)
     self.data_object = nsdf.UniformData('Vm', unit='mV', field='Vm')
     self.dlen = 5
     for uid in self.granule_somata:
         self.data_object.put_data(
             uid, np.random.uniform(-65, -55, size=self.dlen))
     self.data_object.set_dt(1e-4, 's')
     self.tstart = 0.0
     data = writer.add_uniform_data(uds,
                                    self.data_object,
                                    tstart=self.tstart)
예제 #12
0
def create_example():
    # First create the model tree
    model = nsdf.ModelComponent('LeisureCenter', uid=uuid1().hex)
    poolroom = nsdf.ModelComponent('PoolRoom', uid=uuid1().hex, parent=model)
    tables = []
    balls = []
    for ii in range(2):
        tables.append(
            nsdf.ModelComponent('table_{}'.format(ii),
                                uid=uuid1().hex,
                                parent=poolroom))
        for jj in range(3):
            balls.append(
                nsdf.ModelComponent('ball_{}'.format(jj),
                                    uid=uuid1().hex,
                                    parent=tables[-1]))
    id_path_dict = model.get_id_path_dict()
    path_id_dict = {value: key for key, value in id_path_dict.items()}

    # Create the NSDF writer object
    writer = nsdf.NSDFWriter('poolroom.h5', mode='w')
    writer.add_modeltree(model)
    dataobj = nsdf.StaticData('area', unit='m^2')
    dataobj.put_data(poolroom.uid, [100.0])
    source_ds = writer.add_static_ds('rooms', [poolroom.uid])
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_static_ds('tables', [tab.uid for tab in tables])
    dataobj = nsdf.StaticData('height', unit='m')
    for tab in tables:
        dataobj.put_data(tab.uid, 1.2)
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_nonuniform_ds_1d('tables', 'players',
                                            [tab.uid for tab in tables])
    dataobj = nsdf.NonuniformData('players',
                                  unit='item',
                                  tunit='hour',
                                  dtype=np.int32)
    for tab in tables:
        times = np.cumsum(np.random.exponential(1 / 10.0, size=10))
        dataobj.put_data(tab.uid, (np.random.randint(10, size=10), times))
    writer.add_nonuniform_1d(source_ds, dataobj,
                             {tab.uid: tab.name
                              for tab in tables})
    source_ds = writer.add_uniform_ds('balls', [ball.uid for ball in balls])
    dataobj = nsdf.UniformData('x', unit='cm')
    for ball in balls:
        dataobj.put_data(ball.uid, np.random.rand(10) * 10)
    dataobj.set_dt(1e-2, 's')
    writer.add_uniform_data(source_ds, dataobj)

    source_ds = writer.add_event_ds_1d('balls', 'hit',
                                       [ball.uid for ball in balls])
    dataobj = nsdf.EventData('hit', unit='s')
    source_name_dict = {}
    for ball in balls:
        source_name_dict[ball.uid] = '{}_{}'.format(ball.name, ball.uid)
        data = np.cumsum(np.random.exponential(1 / 100.0, size=10))
        dataobj.put_data(ball.uid, data)
    writer.add_event_1d(source_ds, dataobj, source_name_dict)
예제 #13
0
def save_NSDF(cPlotDt, ePlotDt, voxel_val_dict, vox_info):  #added by Chaitanya
    sys.path.append('../..')
    import nsdf
    chem_sources = []
    writer = nsdf.NSDFWriter('moose_multi.h5', mode='w')
    data_obj = nsdf.UniformData('conc', unit='mM')
    for x in moose.wildcardFind('/graphs/chem/#[ISA=Table]'):
        chem_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    chem_source_ds = writer.add_uniform_ds('chem', chem_sources)
    data_obj.set_dt(cPlotDt, unit='s')
    writer.add_uniform_data(chem_source_ds, data_obj)

    data_obj = nsdf.UniformData('Vm', unit='V')
    elec_sources = []
    for x in moose.wildcardFind('/graphs/elec/#[ISA=Table]'):
        elec_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    elec_source_ds = writer.add_uniform_ds('elec', elec_sources)
    data_obj.set_dt(ePlotDt, unit='s')
    writer.add_uniform_data(elec_source_ds, data_obj)

    data_obj = nsdf.UniformData('[Ca]', unit='mM')
    ca_sources = []
    for x in moose.wildcardFind('/graphs/ca/#[ISA=Table]'):
        ca_sources.append(x.name)
        data_obj.put_data(x.name, x.vector)
    ca_source_ds = writer.add_uniform_ds('Ca', ca_sources)
    data_obj.set_dt(ePlotDt, unit='s')
    writer.add_uniform_data(ca_source_ds, data_obj)

    h5 = writer._fd  #Falling back to using native h5py operations. Multidimensional uniform dataset.
    ds = h5.create_dataset('/data/uniform/chem/voxel',
                           dtype=numpy.float32,
                           shape=(vox_info[0], vox_info[1],
                                  len(voxel_val_dict)))
    idx = 0
    label_list = []
    for ii, jj in voxel_val_dict.iteritems():
        ds[:, :, idx] = jj
        label_list.append(ii)
        idx += 1
    label_ds = h5.create_dataset('/map/uniform/spine_vox', data=label_list)
    voxel_ds = h5.create_dataset('/map/uniform/vox_number',
                                 data=range(vox_info[0]))
    tie_data_map(ds, label_ds, 'source', axis=2)
    tie_data_map(ds, voxel_ds, 'voxel_number', axis=0)
    ds.attrs.create('dt', data=vox_info[2])
    ds.attrs.create('field', data='conc')
    ds.attrs.create('tstart', data=0.0)
    ds.attrs.create('unit', data='mM')
    ds.attrs.create('timeunit', data='s')
예제 #14
0
파일: traub2005.py 프로젝트: nsdf/nsdf
def create_example(dialect=nsdf.dialect.ONED, simtime=10.0, dt=1e-4):
    """Create a sample NSDF file using the specified dialect.

    The file stores the model tree (down to single compartments) and

    spike times from all the cells, categorized into populations by
    celltype.
    
    Vm for 10% of the cells of each type as uniformly sampled data.

    """
    start_time = datetime.now()
    writer = nsdf.NSDFWriter('traub_et_al_2005_{}.h5'.format(dialect),
                             mode='w',
                             dialect=dialect)
    print('Start add_modeltree')
    writer.add_modeltree(model)
    print('End add_modeltree')
    for celltype, cell_list in list(model.cells.items()):
        event_sources = [cell.compartments[cell.presyn - 1]     \
                         for cell in cell_list]
        event_data = nsdf.EventData('spiketime', unit='s', dtype=FLOATDTYPE)
        for src in event_sources:
            num_spikes = np.random.randint(EVENT_MAX)
            times = np.cumsum(np.random.exponential(1 / 10.0, size=num_spikes))
            times = times[times < simtime].copy()
            event_data.put_data(src.uid, times)
        if dialect in (nsdf.dialect.ONED, nsdf.dialect.NUREGULAR):
            event_ds = writer.add_event_ds_1d(celltype, 'spiketime',
                                              event_data.get_sources())
            writer.add_event_1d(event_ds, event_data)
        else:
            event_ds = writer.add_event_ds(celltype, event_data.get_sources())
            dialect_eventwriter_map[dialect](writer, event_ds, event_data)
        vm_sources = random.sample(event_sources, len(event_sources) // 10)
        vm_data = nsdf.UniformData('Vm', unit='V', field='Vm')
        vm_data.set_dt(dt, unit='s')
        for src in vm_sources:
            vm = np.random.uniform(-120e-3, 40e-3, size=int(simtime // dt))
            vm_data.put_data(src.uid, vm)
        vm_ds = writer.add_uniform_ds(celltype, vm_data.get_sources())
        writer.add_uniform_data(vm_ds, vm_data)
    end_time = datetime.now()
    writer.title = 'Sample NSDF file for Traub et al 2005 model'
    writer.description = 'This file uses {} dialect of NSDF'.format(dialect)
    writer.tstart = start_time
    writer.tend = end_time
    writer.creator = [os.environ['USER']]
    writer.license = 'CC BY-SA'
    writer.software = ['Python2.7', 'nsdf python library']
    print('Finished writing example NSDF file for dialect {}'.format(dialect))
예제 #15
0
def benchmark_write_nanpadded(**kwargs):
    compression = kwargs.get('compression', '')
    filename = '{}_NAN_{}.h5'.format(DATAFILE.split('.')[0], compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             mode='w',
                             dialect=nsdf.dialect.VLEN,
                             **kwargs)
    cont_rec_sources = writer.add_uniform_ds('continuous_recorded',
                                             ca_data.get_sources())
    writer.add_uniform_data(cont_rec_sources, ca_data, fixed=True)
    writer.add_uniform_data(cont_rec_sources, Vm_data, fixed=True)
    spike_sources = writer.add_event_ds('all_cells', spike_data.get_sources())
    writer.add_event_vlen(spike_sources, spike_data, fixed=True)
예제 #16
0
def benchmark_write_oned(**kwargs):
    """Write LFP, Vm, spike trains, and any other data."""
    compression = kwargs.get('compression', '')
    filename = '{}_ONED_{}.h5'.format(DATAFILE.split('.')[0], compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             mode='w',
                             dialect=nsdf.dialect.ONED,
                             **kwargs)
    cont_rec_sources = writer.add_uniform_ds('continuous_recorded',
                                             ca_data.get_sources())
    writer.add_uniform_data(cont_rec_sources, ca_data)
    writer.add_uniform_data(cont_rec_sources, Vm_data)
    spike_sources = writer.add_event_ds_1d('all_cells', 'spike',
                                           spike_data.get_sources())
    writer.add_event_1d(spike_sources, spike_data)
예제 #17
0
 def test_append_data(self):
     """Try appending data to existing uniformly sampled dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath, mode='a')
     ds = writer.mapping['uniform'][self.popname]
     for uid in self.granule_somata:
         self.data_object.put_data(
             uid, np.random.uniform(-65, -55, size=self.dlen))
     data = writer.add_uniform_data(ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         uniform_container = fd['/data'][nsdf.UNIFORM]
         data = uniform_container[self.popname][self.data_object.name]
         for row, source in zip(data, data.dims[0]['source']):
             nptest.assert_allclose(row[-self.dlen:],
                                    self.data_object.get_data(source))
     os.remove(self.filepath)
예제 #18
0
def hh_vm():
    model = nsdf.ModelComponent('compartment')
    data_container = nsdf.UniformData('Vm', unit='mV', dt=0.1, tunit='ms')
    data_container.put_data(model.uid, vm_array)                            
    writer = nsdf.NSDFWriter('hh_vm.h5', mode='w')
    writer.set_properties({'title': 'Example Vm recording',
                           'creator': 'user',
                           'software': ['python2.7', 'python-nsdf-0.1'],
                           'method': ['np.random.rand'],
                           'description': 'Randomly generated Vm for a single' \
                                          'compartment',
                           'rights': 'CC-BY-SA',
                           'tstart': datetime.now(),
                           'tend': (datetime.now() + timedelta(seconds=3)),
                           'contributor': ['Chaitanya Chintaluri',
                                           'Daniel Wocjcik',
                                           'Upinder Bhalla']})
                           
    writer.add_modeltree(model)
    source_ds = writer.add_uniform_ds('compartment_population', [model.uid])
    writer.add_uniform_data(source_ds, data_container)
예제 #19
0
def benchmark_write_oned_incremental(**kwargs):
    compression = kwargs.get('compression', '')
    prefix = DATAFILE.split('.')[0]
    filename = '{}_ONED_{}_incr.h5'.format(prefix, compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             dialect=nsdf.dialect.ONED,
                             mode='w',
                             **kwargs)
    cont_src = writer.add_uniform_ds('continuous_recorded',
                                     ca_data.get_sources())
    spike_sources = writer.add_event_ds_1d('all_cells', 'spike',
                                           spike_data.get_sources())
    for ii in range(
            0,
            int(DATA['simtime'] / DATA['dt'] + 0.5) + INCREMENTAL_STEP / 2,
            INCREMENTAL_STEP):
        ca_data_tmp = nsdf.UniformData(ca_data.name,
                                       unit=ca_data.unit,
                                       dt=ca_data.dt,
                                       tunit=ca_data.tunit)
        for src, data in ca_data.get_source_data_dict().items():
            ca_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, ca_data_tmp)
        Vm_data_tmp = nsdf.UniformData(Vm_data.name,
                                       unit=Vm_data.unit,
                                       dt=Vm_data.dt,
                                       tunit=Vm_data.tunit)
        for src, data in Vm_data.get_source_data_dict().items():
            Vm_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, Vm_data_tmp)
        tstart = ii * Vm_data.dt
        tend = (ii + INCREMENTAL_STEP) * Vm_data.dt
        spike_data_tmp = nsdf.EventData(spike_data.name,
                                        unit=spike_data.unit,
                                        dtype=spike_data.dtype)
        for src, data in spike_data.get_source_data_dict().items():
            spike_data_tmp.put_data(src,
                                    data[(data >= tstart) & (data < tend)])
        writer.add_event_1d(spike_sources, spike_data_tmp)
예제 #20
0
def hh_compartment():
    filename = 'hh_compartment.h5'
    model = nsdf.ModelComponent('compartment', uid=uuid1().hex)
    writer = nsdf.NSDFWriter(filename, mode='w')
    writer.add_modeltree(model)
    source_ds = writer.add_static_ds(model.name, [model.uid])
    # Add membrane resitance
    data_obj = nsdf.StaticData('Rm', unit='Mohm')
    data_obj.put_data(model.uid, 1e3)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane capacitance
    data_obj = nsdf.StaticData('Cm', unit='pF')
    data_obj.put_data(model.uid, 0.9)
    writer.add_static_data(source_ds, data_obj)
    # Add leak reversal potential
    data_obj = nsdf.StaticData('Em', unit='mV')
    data_obj.put_data(model.uid, -65.0)
    writer.add_static_data(source_ds, data_obj)
    # Add membrane potential
    source_ds = writer.add_uniform_ds(model.name, [model.uid])
    data_obj = nsdf.UniformData('Vm', unit='mV', dt=0.1, tunit='ms')
    data_obj.put_data(model.uid, vm_array)
    writer.add_uniform_data(source_ds, data_obj)
예제 #21
0
 def test_append_data(self):
     """Try appending data to existing 1D event dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath, mode='a')
     ds = writer.mapping[nsdf.EVENT][self.popname][self.data_object.name]
     rate = 100.0
     new_dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, cell in enumerate(self.mdict['mitral_cells']):
         uid = cell.uid
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=new_dlen[ii]))
         self.data_object.put_data(uid, data)
     writer.add_event_1d(ds, self.data_object, self.src_name_dict)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         eventcontainer = fd['/data'][nsdf.EVENT]
         data_grp = eventcontainer[self.popname][self.data_object.name]
         for ii, cell in enumerate(self.mdict['mitral_cells']):
             uid = cell.uid
             dataset = data_grp[cell.name]
             nptest.assert_allclose(self.data_object.get_data(uid),
                                    dataset[self.dlen[ii]:])
     os.remove(self.filepath)
예제 #22
0
 def test_append_data(self):
     """Try appending data to existing nonuniformly sampled vlen dataset"""
     # start over for appending data
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='a',
                              dialect=nsdf.dialect.VLEN)
     ds = writer.mapping[nsdf.NONUNIFORM][self.popname]
     dlen = np.random.randint(10, 100, size=ds.shape[0])
     for iii, uid in enumerate(ds):
         data = np.random.uniform(-65, -55, size=dlen[iii])
         times = np.cumsum(np.random.exponential(scale=0.01,
                                                 size=dlen[iii]))
         self.data_object.put_data(uid, (data, times))
     data = writer.add_nonuniform_vlen(ds, self.data_object)
     del writer
     with h5.File(self.filepath, 'r') as fd:
         ds = fd['map'][nsdf.NONUNIFORM][self.popname]
         dataset = fd['/data'][nsdf.NONUNIFORM][self.popname][self.varname]
         ts = dataset.dims[0]['time']
         for iii, source in enumerate(ds):
             data, times = self.data_object.get_data(source)
             nptest.assert_allclose(data, dataset[iii][-len(data):])
             nptest.assert_allclose(times, ts[iii][-len(data):])
     os.remove(self.filepath)
예제 #23
0
 def _init_writing(self):
     return nsdf.NSDFWriter(self.filename, mode='w')
예제 #24
0
def create_test_data_file(filename, dialect):
    """Create a datafile at path `filename` using dialect `dialect`. 

    """
    tstart = datetime.now()
    mdict = create_ob_model_tree()
    uniform_data = nsdf.UniformData('Vm',
                                    unit='mV',
                                    field='Vm',
                                    dt=1e-2,
                                    tunit='ms')
    for cell in mdict['granule_cells']:
        uniform_data.put_data(cell.children['gc_0'].uid,
                              np.random.uniform(-63, -57, 100))

    if dialect == nsdf.dialect.NUREGULAR:
        size = 150
        nonuniform_data = nsdf.NonuniformRegularData('Im',
                                                     unit='pA',
                                                     field='Im',
                                                     tunit='ms')
        nonuniform_data.set_times(np.cumsum(np.random.rand(size)))
        for ii, cell in enumerate(mdict['mitral_cells']):
            nonuniform_data.put_data(cell.children['mc_0'].uid,
                                     np.random.rand(size))
    else:
        nonuniform_data = nsdf.NonuniformData('Im',
                                              unit='pA',
                                              field='Im',
                                              tunit='ms',
                                              dtype=np.float32)
        sizes = 150 + np.random.randint(-50, 50, len(mdict['mitral_cells']))
        for ii, cell in enumerate(mdict['mitral_cells']):
            data = np.random.rand(sizes[ii])
            times = np.cumsum(np.random.rand(sizes[ii]))
            assert len(data) == len(times)
            nonuniform_data.put_data(cell.children['mc_0'].uid, (data, times))
    sizes = 200 + np.random.randint(-50, 50, len(mdict['cells']))
    event_data = nsdf.EventData('spike', unit='ms', dtype=np.float32)
    for ii, cell in enumerate(mdict['cells']):
        times = np.cumsum(np.random.exponential(scale=0.01, size=sizes[ii]))
        event_data.put_data(cell.uid, times)
    writer = nsdf.NSDFWriter(filename, dialect=dialect, mode='w')
    writer.add_modeltree(mdict['model_tree'])
    uniform_ds = writer.add_uniform_ds('granule', uniform_data.get_sources())
    writer.add_uniform_data(uniform_ds, uniform_data)

    if dialect == nsdf.dialect.ONED:
        nonuniform_ds = writer.add_nonuniform_ds_1d(
            'mitral', 'Im', nonuniform_data.get_sources())
    else:
        nonuniform_ds = writer.add_nonuniform_ds('mitral',
                                                 nonuniform_data.get_sources())
    if (dialect == nsdf.dialect.ONED) or (dialect == nsdf.dialect.NUREGULAR):
        event_ds = writer.add_event_ds_1d('cells', 'spike',
                                          event_data.get_sources())
    else:
        event_ds = writer.add_event_ds('cells', event_data.get_sources())
    if dialect == nsdf.dialect.ONED:
        writer.add_nonuniform_1d(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.NUREGULAR:
        writer.add_nonuniform_regular(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.VLEN:
        writer.add_nonuniform_vlen(nonuniform_ds, nonuniform_data)
        writer.add_event_vlen(event_ds, event_data)
    elif dialect == nsdf.dialect.NANPADDED:
        writer.add_nonuniform_nan(nonuniform_ds, nonuniform_data)
        writer.add_event_nan(event_ds, event_data)
    else:
        raise Exception('unknown dialect: {}'.format(dialect))
    tend = datetime.now()
    description = 'Testing nsdf reader'
    title = 'NSDFReader.test'
    creator = ['Subhasis Ray']
    contributor = ['Chaitanya Chintaluri', 'Daniel Wojcik', 'Upinder Bhalla']
    software = ['Python']
    method = ['manual']
    license = 'CC-BY-SA'
    rights = 'Subhasis Ray, 2014'
    writer.tstart = tstart
    writer.tend = tend
    writer.title = title
    writer.creator = creator
    writer.description = description
    writer.contributor = contributor
    writer.software = software
    writer.method = method
    writer.rights = rights
    writer.license = license
    return {
        'uniform_data': uniform_data,
        'nonuniform_data': nonuniform_data,
        'event_data': event_data,
        'title': title,
        'creator': creator,
        'contributor': contributor,
        'software': software,
        'tstart': tstart,
        'tend': tend,
        'description': description,
        'method': method,
        'rights': rights,
        'license': license,
    }
예제 #25
0
        morp_dset[:, 'y1'] = loc_array[:, 5]
        morp_dset[:, 'z1'] = loc_array[:, 6]
        morp_dset[:, 'd1'] = loc_array[:, 7]

        morp_dset.attrs.create(
            'unit', ['um', 'um', 'um', 'um', 'um', 'um', 'um', 'um'])
        morp_dset.attrs.create(
            'field', ['x0', 'y0', 'z0', 'd0', 'x1', 'y1', 'z1', 'd1'])

        morp_names = h5.create_dataset('/map/static/Hay', data=loc_names)
        tie_data_map(morp_dset, morp_names, 'source', 0)

    elif dump_type == 2:
        sys.path.append('../../')
        import nsdf
        writer = nsdf.NSDFWriter('hay_currents_nsdf.h5', mode='w')
        curr_source_ds = writer.add_uniform_ds('hay_currs', i_cp_names)
        data_obj_1 = nsdf.UniformData('i', unit='nA')
        data_obj_2 = nsdf.UniformData('i_pas', unit='nA')
        data_obj_3 = nsdf.UniformData('i_cap', unit='nA')
        data_obj_4 = nsdf.UniformData('i_ca', unit='nA')
        data_obj_5 = nsdf.UniformData('i_na', unit='nA')
        data_obj_6 = nsdf.UniformData('i_k', unit='nA')
        data_obj_7 = nsdf.UniformData('i_ih', unit='nA')

        for ii, source in enumerate(i_cp_names):
            data_obj_1.put_data(source, total_hdf5[ii])
            data_obj_2.put_data(source, i_pas_hdf5[ii])
            data_obj_3.put_data(source, i_cap_hdf5[ii])
            data_obj_4.put_data(source, i_ca_hdf5[ii])
            data_obj_5.put_data(source, i_na_hdf5[ii])