示例#1
0
 def setUp(self):
     """Create a poisson spike train for each cell in mitral population and
     save the data as 1D event data"""
     self.mdict = create_ob_model_tree()
     self.filepath = '{}.h5'.format(self.id())
     writer = nsdf.NSDFWriter(self.filepath,
                              mode='w',
                              dialect=nsdf.dialect.NANPADDED)
     writer.title = self.id()
     self.sources = [cell.uid for cell in self.mdict['mitral_cells']]
     self.popname = 'pop1'
     self.field = 'spike'
     self.unit = 's'
     self.varname = 'spike'
     ds = writer.add_event_ds(self.popname, self.sources)
     self.data_object = nsdf.EventData(self.varname,
                                       unit=self.unit,
                                       field=self.field)
     self.src_name_dict = {}
     rate = 100.0
     self.dlen = np.random.poisson(lam=rate, size=len(self.sources))
     for ii, cell in enumerate(self.mdict['mitral_cells']):
         uid = cell.uid
         data = np.cumsum(
             np.random.exponential(scale=1.0 / rate, size=self.dlen[ii]))
         self.data_object.put_data(uid, data)
         # this is not required to be cell.name, any valid hdf5
         # name will do
         self.src_name_dict[uid] = cell.name
     dd = writer.add_event_nan(ds, self.data_object)
示例#2
0
def create_example():
    # First create the model tree
    model = nsdf.ModelComponent('LeisureCenter', uid=uuid1().hex)
    poolroom = nsdf.ModelComponent('PoolRoom', uid=uuid1().hex, parent=model)
    tables = []
    balls = []
    for ii in range(2):
        tables.append(
            nsdf.ModelComponent('table_{}'.format(ii),
                                uid=uuid1().hex,
                                parent=poolroom))
        for jj in range(3):
            balls.append(
                nsdf.ModelComponent('ball_{}'.format(jj),
                                    uid=uuid1().hex,
                                    parent=tables[-1]))
    id_path_dict = model.get_id_path_dict()
    path_id_dict = {value: key for key, value in id_path_dict.items()}

    # Create the NSDF writer object
    writer = nsdf.NSDFWriter('poolroom.h5', mode='w')
    writer.add_modeltree(model)
    dataobj = nsdf.StaticData('area', unit='m^2')
    dataobj.put_data(poolroom.uid, [100.0])
    source_ds = writer.add_static_ds('rooms', [poolroom.uid])
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_static_ds('tables', [tab.uid for tab in tables])
    dataobj = nsdf.StaticData('height', unit='m')
    for tab in tables:
        dataobj.put_data(tab.uid, 1.2)
    writer.add_static_data(source_ds, dataobj)
    source_ds = writer.add_nonuniform_ds_1d('tables', 'players',
                                            [tab.uid for tab in tables])
    dataobj = nsdf.NonuniformData('players',
                                  unit='item',
                                  tunit='hour',
                                  dtype=np.int32)
    for tab in tables:
        times = np.cumsum(np.random.exponential(1 / 10.0, size=10))
        dataobj.put_data(tab.uid, (np.random.randint(10, size=10), times))
    writer.add_nonuniform_1d(source_ds, dataobj,
                             {tab.uid: tab.name
                              for tab in tables})
    source_ds = writer.add_uniform_ds('balls', [ball.uid for ball in balls])
    dataobj = nsdf.UniformData('x', unit='cm')
    for ball in balls:
        dataobj.put_data(ball.uid, np.random.rand(10) * 10)
    dataobj.set_dt(1e-2, 's')
    writer.add_uniform_data(source_ds, dataobj)

    source_ds = writer.add_event_ds_1d('balls', 'hit',
                                       [ball.uid for ball in balls])
    dataobj = nsdf.EventData('hit', unit='s')
    source_name_dict = {}
    for ball in balls:
        source_name_dict[ball.uid] = '{}_{}'.format(ball.name, ball.uid)
        data = np.cumsum(np.random.exponential(1 / 100.0, size=10))
        dataobj.put_data(ball.uid, data)
    writer.add_event_1d(source_ds, dataobj, source_name_dict)
示例#3
0
文件: traub2005.py 项目: nsdf/nsdf
def create_example(dialect=nsdf.dialect.ONED, simtime=10.0, dt=1e-4):
    """Create a sample NSDF file using the specified dialect.

    The file stores the model tree (down to single compartments) and

    spike times from all the cells, categorized into populations by
    celltype.
    
    Vm for 10% of the cells of each type as uniformly sampled data.

    """
    start_time = datetime.now()
    writer = nsdf.NSDFWriter('traub_et_al_2005_{}.h5'.format(dialect),
                             mode='w',
                             dialect=dialect)
    print('Start add_modeltree')
    writer.add_modeltree(model)
    print('End add_modeltree')
    for celltype, cell_list in list(model.cells.items()):
        event_sources = [cell.compartments[cell.presyn - 1]     \
                         for cell in cell_list]
        event_data = nsdf.EventData('spiketime', unit='s', dtype=FLOATDTYPE)
        for src in event_sources:
            num_spikes = np.random.randint(EVENT_MAX)
            times = np.cumsum(np.random.exponential(1 / 10.0, size=num_spikes))
            times = times[times < simtime].copy()
            event_data.put_data(src.uid, times)
        if dialect in (nsdf.dialect.ONED, nsdf.dialect.NUREGULAR):
            event_ds = writer.add_event_ds_1d(celltype, 'spiketime',
                                              event_data.get_sources())
            writer.add_event_1d(event_ds, event_data)
        else:
            event_ds = writer.add_event_ds(celltype, event_data.get_sources())
            dialect_eventwriter_map[dialect](writer, event_ds, event_data)
        vm_sources = random.sample(event_sources, len(event_sources) // 10)
        vm_data = nsdf.UniformData('Vm', unit='V', field='Vm')
        vm_data.set_dt(dt, unit='s')
        for src in vm_sources:
            vm = np.random.uniform(-120e-3, 40e-3, size=int(simtime // dt))
            vm_data.put_data(src.uid, vm)
        vm_ds = writer.add_uniform_ds(celltype, vm_data.get_sources())
        writer.add_uniform_data(vm_ds, vm_data)
    end_time = datetime.now()
    writer.title = 'Sample NSDF file for Traub et al 2005 model'
    writer.description = 'This file uses {} dialect of NSDF'.format(dialect)
    writer.tstart = start_time
    writer.tend = end_time
    writer.creator = [os.environ['USER']]
    writer.license = 'CC BY-SA'
    writer.software = ['Python2.7', 'nsdf python library']
    print('Finished writing example NSDF file for dialect {}'.format(dialect))
示例#4
0
def benchmark_write_oned_incremental(**kwargs):
    compression = kwargs.get('compression', '')
    prefix = DATAFILE.split('.')[0]
    filename = '{}_ONED_{}_incr.h5'.format(prefix, compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             dialect=nsdf.dialect.ONED,
                             mode='w',
                             **kwargs)
    cont_src = writer.add_uniform_ds('continuous_recorded',
                                     ca_data.get_sources())
    spike_sources = writer.add_event_ds_1d('all_cells', 'spike',
                                           spike_data.get_sources())
    for ii in range(
            0,
            int(DATA['simtime'] / DATA['dt'] + 0.5) + INCREMENTAL_STEP / 2,
            INCREMENTAL_STEP):
        ca_data_tmp = nsdf.UniformData(ca_data.name,
                                       unit=ca_data.unit,
                                       dt=ca_data.dt,
                                       tunit=ca_data.tunit)
        for src, data in ca_data.get_source_data_dict().items():
            ca_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, ca_data_tmp)
        Vm_data_tmp = nsdf.UniformData(Vm_data.name,
                                       unit=Vm_data.unit,
                                       dt=Vm_data.dt,
                                       tunit=Vm_data.tunit)
        for src, data in Vm_data.get_source_data_dict().items():
            Vm_data_tmp.put_data(src, data[ii:ii + INCREMENTAL_STEP])
        writer.add_uniform_data(cont_src, Vm_data_tmp)
        tstart = ii * Vm_data.dt
        tend = (ii + INCREMENTAL_STEP) * Vm_data.dt
        spike_data_tmp = nsdf.EventData(spike_data.name,
                                        unit=spike_data.unit,
                                        dtype=spike_data.dtype)
        for src, data in spike_data.get_source_data_dict().items():
            spike_data_tmp.put_data(src,
                                    data[(data >= tstart) & (data < tend)])
        writer.add_event_1d(spike_sources, spike_data_tmp)
示例#5
0
def create_test_data_file(filename, dialect):
    """Create a datafile at path `filename` using dialect `dialect`. 

    """
    tstart = datetime.now()
    mdict = create_ob_model_tree()
    uniform_data = nsdf.UniformData('Vm',
                                    unit='mV',
                                    field='Vm',
                                    dt=1e-2,
                                    tunit='ms')
    for cell in mdict['granule_cells']:
        uniform_data.put_data(cell.children['gc_0'].uid,
                              np.random.uniform(-63, -57, 100))

    if dialect == nsdf.dialect.NUREGULAR:
        size = 150
        nonuniform_data = nsdf.NonuniformRegularData('Im',
                                                     unit='pA',
                                                     field='Im',
                                                     tunit='ms')
        nonuniform_data.set_times(np.cumsum(np.random.rand(size)))
        for ii, cell in enumerate(mdict['mitral_cells']):
            nonuniform_data.put_data(cell.children['mc_0'].uid,
                                     np.random.rand(size))
    else:
        nonuniform_data = nsdf.NonuniformData('Im',
                                              unit='pA',
                                              field='Im',
                                              tunit='ms',
                                              dtype=np.float32)
        sizes = 150 + np.random.randint(-50, 50, len(mdict['mitral_cells']))
        for ii, cell in enumerate(mdict['mitral_cells']):
            data = np.random.rand(sizes[ii])
            times = np.cumsum(np.random.rand(sizes[ii]))
            assert len(data) == len(times)
            nonuniform_data.put_data(cell.children['mc_0'].uid, (data, times))
    sizes = 200 + np.random.randint(-50, 50, len(mdict['cells']))
    event_data = nsdf.EventData('spike', unit='ms', dtype=np.float32)
    for ii, cell in enumerate(mdict['cells']):
        times = np.cumsum(np.random.exponential(scale=0.01, size=sizes[ii]))
        event_data.put_data(cell.uid, times)
    writer = nsdf.NSDFWriter(filename, dialect=dialect, mode='w')
    writer.add_modeltree(mdict['model_tree'])
    uniform_ds = writer.add_uniform_ds('granule', uniform_data.get_sources())
    writer.add_uniform_data(uniform_ds, uniform_data)

    if dialect == nsdf.dialect.ONED:
        nonuniform_ds = writer.add_nonuniform_ds_1d(
            'mitral', 'Im', nonuniform_data.get_sources())
    else:
        nonuniform_ds = writer.add_nonuniform_ds('mitral',
                                                 nonuniform_data.get_sources())
    if (dialect == nsdf.dialect.ONED) or (dialect == nsdf.dialect.NUREGULAR):
        event_ds = writer.add_event_ds_1d('cells', 'spike',
                                          event_data.get_sources())
    else:
        event_ds = writer.add_event_ds('cells', event_data.get_sources())
    if dialect == nsdf.dialect.ONED:
        writer.add_nonuniform_1d(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.NUREGULAR:
        writer.add_nonuniform_regular(nonuniform_ds, nonuniform_data)
        writer.add_event_1d(event_ds, event_data)
    elif dialect == nsdf.dialect.VLEN:
        writer.add_nonuniform_vlen(nonuniform_ds, nonuniform_data)
        writer.add_event_vlen(event_ds, event_data)
    elif dialect == nsdf.dialect.NANPADDED:
        writer.add_nonuniform_nan(nonuniform_ds, nonuniform_data)
        writer.add_event_nan(event_ds, event_data)
    else:
        raise Exception('unknown dialect: {}'.format(dialect))
    tend = datetime.now()
    description = 'Testing nsdf reader'
    title = 'NSDFReader.test'
    creator = ['Subhasis Ray']
    contributor = ['Chaitanya Chintaluri', 'Daniel Wojcik', 'Upinder Bhalla']
    software = ['Python']
    method = ['manual']
    license = 'CC-BY-SA'
    rights = 'Subhasis Ray, 2014'
    writer.tstart = tstart
    writer.tend = tend
    writer.title = title
    writer.creator = creator
    writer.description = description
    writer.contributor = contributor
    writer.software = software
    writer.method = method
    writer.rights = rights
    writer.license = license
    return {
        'uniform_data': uniform_data,
        'nonuniform_data': nonuniform_data,
        'event_data': event_data,
        'title': title,
        'creator': creator,
        'contributor': contributor,
        'software': software,
        'tstart': tstart,
        'tend': tend,
        'description': description,
        'method': method,
        'rights': rights,
        'license': license,
    }
示例#6
0
DATA = read_data()
ca_data = nsdf.UniformData('Ca',
                           field='conc',
                           unit='mM',
                           dt=DATA['dt'],
                           tunit='s')
ca_data.update_source_data_dict(DATA['ca'])
Vm_data = nsdf.UniformData('Vm',
                           field='Vm',
                           unit='V',
                           dt=DATA['dt'],
                           tunit='s')
Vm_data.update_source_data_dict(DATA['Vm'])
spike_data = nsdf.EventData('spike',
                            field='spiketime',
                            unit='s',
                            dtype=np.float32)
spike_data.update_source_data_dict(DATA['spike'])


def benchmark_write_oned(**kwargs):
    """Write LFP, Vm, spike trains, and any other data."""
    compression = kwargs.get('compression', '')
    filename = '{}_ONED_{}.h5'.format(DATAFILE.split('.')[0], compression)
    filepath = os.path.join(DATADIR, filename)
    writer = nsdf.NSDFWriter(filepath,
                             mode='w',
                             dialect=nsdf.dialect.ONED,
                             **kwargs)
    cont_rec_sources = writer.add_uniform_ds('continuous_recorded',
                                             ca_data.get_sources())