示例#1
0
 def setUpContainer(self):
     # self.spike_unit1 = SpikeUnit('unit1', [0, 1, 2], 'spike unit1 description', 'spike units source')
     # self.spike_unit2 = SpikeUnit('unit2', [3, 4, 5], 'spike unit2 description', 'spike units source')
     ut = UnitTimes('UnitTimes integration test', name='UnitTimesTest')
     ut.add_spike_times(0, [0, 1, 2])
     ut.add_spike_times(1, [3, 4, 5])
     return ut
示例#2
0
 def test_add_spike_times(self):
     ut = UnitTimes('UnitTimes add_spike_times unit test')
     ut.add_spike_times(0, [0, 1, 2])
     ut.add_spike_times(1, [3, 4, 5])
     self.assertEqual(ut.unit_ids.data, [0, 1])
     self.assertEqual(ut.spike_times.data, [0, 1, 2, 3, 4, 5])
     self.assertEqual(len(ut.spike_times_index), 2)
     self.assertEqual(ut.spike_times_index[0].target.data,
                      [0, 1, 2, 3, 4, 5])
     self.assertEqual(ut.spike_times_index[0].slice, slice(0, 3))
     self.assertEqual(ut.spike_times_index[1].target.data,
                      [0, 1, 2, 3, 4, 5])
示例#3
0
 def test_init(self):
     ut = UnitTimes('UnitTimes constructor unit test')
     self.assertEqual(ut.source, 'UnitTimes constructor unit test')
     self.assertEqual(ut.name, 'UnitTimes')
     self.assertEqual(ut.unit_ids.data, list())
     self.assertEqual(ut.spike_times.data, list())
     self.assertEqual(ut.spike_times_index.data, list())
示例#4
0
def build_unit_times(fpath, fname, shanks=None, name='UnitTimes',
                     source=None, compress=True):
    """

    Parameters
    ----------
    fpath: str
    fname: str
    shanks: None | list(ints)
        shank numbers to process. If None, use 1:8
    name: str
    source: str
    compress

    Returns
    -------

    """

    fnamepath = os.path.join(fpath, fname)

    if shanks is None:
        shanks = range(1, 9)

    if source is None:
        source = fnamepath + '.res.*; ' + fnamepath + '.clu.*'

    ut = UnitTimes(name=name, source=source)

    cell_counter = 0
    for shank_num in shanks:
        df = get_clusters_single_shank(fpath, fname, shank_num)
        for cluster_num, idf, in df.groupby('id'):
            ut.add_spike_times(cell_counter, list(idf['time']))
            cell_counter += 1

    return ut
示例#5
0
    def test_init(self):
        unit_times = [1.0, 2.0]

        su1 = SpikeUnit('su1', unit_times, 'unit_description_1', 'unit_source_1')
        self.assertEqual(su1.times, unit_times)
        self.assertEqual(su1.unit_description, 'unit_description_1')
        self.assertEqual(su1.source, 'unit_source_1')

        su2 = SpikeUnit('su2', unit_times, 'unit_description_2', 'unit_source_2')
        self.assertEqual(su2.times, unit_times)
        self.assertEqual(su2.unit_description, 'unit_description_2')
        self.assertEqual(su2.source, 'unit_source_2')

        sul = [su1, su2]
        ut = UnitTimes('test_ut', sul)
        self.assertEqual(ut.source, 'test_ut')
        self.assertEqual(ut.spike_units, sul)
示例#6
0
                                                      source=fname + '.xml',
                                                      description=device_name,
                                                      device=device,
                                                      location='unknown')

# special electrodes
device_name = 'analog'
device = nwbfile.create_device(device_name, 'analog')
ainp_electrode_group = nwbfile.create_electrode_group(name=device_name +
                                                      '_electrodes',
                                                      source='source',
                                                      description=device_name,
                                                      device=device,
                                                      location='unknown')

ut = UnitTimes(name='spikes', source=source)
elec_inds = []
for i, (elec_id, elec_label) in tqdm(enumerate(elecs)):
    if elec_label[:4] == 'ainp':
        electrode_group = ainp_electrode_group
    else:
        electrode_group = elec_electrode_group
        elecs_data = nev_file.getdata([elec_id])
        spikes = (np.array(elecs_data['spike_events']['TimeStamps'][0]) /
                  30000).tolist()
        elec_inds.append(i)
        ut.add_spike_times(elec_id, spikes)
    nwbfile.add_electrode(
        elec_id,
        np.nan,
        np.nan,
示例#7
0
 def test_get_spike_times(self):
     ut = UnitTimes('UnitTimes add_spike_times unit test')
     ut.add_spike_times(0, [0, 1, 2])
     ut.add_spike_times(1, [3, 4, 5])
     self.assertTrue(all(ut.get_unit_spike_times(0) == np.array([0, 1, 2])))
     self.assertTrue(all(ut.get_unit_spike_times(1) == np.array([3, 4, 5])))
示例#8
0
            resolution=.001,
            conversion=1.,
            unit='V')))

###

from pynwb.misc import UnitTimes

# gen spiking data
all_spikes = []
for unit in range(20):
    n_spikes = np.random.poisson(lam=10)
    all_spikes.append(np.random.randn(n_spikes))

# write UnitTimes object
ut = UnitTimes(name='name', source='source')
for i, unit_spikes in enumerate(all_spikes):
    ut.add_spike_times(i, unit_spikes)

spiking_module = nwbfile.create_processing_module(
    name='spikes', source='source', description='data relevant to spiking')

spiking_module.add_container(ut)

###

from pynwb.behavior import SpatialSeries, Position

position_data = np.array([np.linspace(0, 10, 100), np.linspace(1, 8, 100)]).T
tt_position = np.linspace(0, 100) / 200
示例#9
0
def convert_file1(fpath,
                  session_start_time,
                  session_description='simulated MEC and LEC data'):

    fname = os.path.split(fpath)[1]
    identifier = fname[:-4]
    institution = 'Stanford'
    lab = 'Soltesz'
    source = fname[:-4]

    # extract data
    spike_units = []
    with File(fpath, 'r') as f:
        for cell_type in ('MPP', 'LPP'):
            spiketrain = f['Populations'][cell_type]['Vector Stimulus 0'][
                'spiketrain']
            for i, (start, fin) in tqdm(
                    enumerate(pairwise(spiketrain['Attribute Pointer'])),
                    total=len(spiketrain['Attribute Pointer']),
                    desc=cell_type):
                if not (start == fin):
                    UnitData = spiketrain['Attribute Value'][start:fin] / 1000
                    spike_units.append(
                        SpikeUnit(name=cell_type + '{:05d}'.format(i),
                                  times=UnitData,
                                  unit_description=cell_type,
                                  source=source))

        ## Position
        x = f['Trajectory 0']['x']
        y = f['Trajectory 0']['y']
        rate = 1 / (f['Trajectory 0']['t'][1] -
                    f['Trajectory 0']['t'][0]) * 1000

        pos_data = np.array([x, y]).T

    # write to NWB
    nwbfile = NWBFile(source,
                      session_description,
                      identifier,
                      session_start_time,
                      datetime.now(),
                      institution=institution,
                      lab=lab)

    rf_module = nwbfile.create_processing_module('receptive fields', source,
                                                 'spike times')

    spatial_series = SpatialSeries('Position',
                                   source,
                                   pos_data,
                                   reference_frame='NA',
                                   conversion=1 / 100.,
                                   resolution=0.1,
                                   starting_time=0.0,
                                   rate=rate)

    behav_ts = Position(source, spatial_series)
    unit_times = UnitTimes(source,
                           spike_units,
                           name='simulated cell spike data')

    rf_module.add_container(unit_times)
    rf_module.add_container(behav_ts)
示例#10
0
 def test_get_spike_times(self):
     ut = UnitTimes('UnitTimes add_spike_times unit test')
     ut.add_spike_times(0, [0, 1, 2])
     ut.add_spike_times(1, [3, 4, 5])
     self.assertEqual(ut.get_unit_spike_times(0), [0, 1, 2])
     self.assertEqual(ut.get_unit_spike_times(1), [3, 4, 5])