def __run_test_tokenizer(self, block_name):
        _, animal_name, _ = split_block_folder(block_name)
        block_metadata_path = os.path.join(self.data_path, animal_name,
                                           block_name, f"{block_name}.yaml")
        metadata = MetadataManager(block_folder=block_name,
                                   block_metadata_path=block_metadata_path,
                                   metadata_lib_path=self.metadata_lib_path,
                                   legacy_block=False).extract_metadata()
        stim_configs = metadata['stimulus']
        stim_vals = StimValueExtractor(stim_configs,
                                       self.stim_lib_path).extract()

        dataset = AuditoryDataScanner(block_name,
                                      data_path=self.data_path,
                                      stim_lib_path=self.stim_lib_path,
                                      use_htk=False).extract_dataset()

        # create an empty NWB file
        nwb_content = NWBFile(
            session_description='test stim tokenizers',  # required
            identifier=str(uuid.uuid1()),  # required
            session_start_time=get_default_time())  # required
        # add mark track
        mark_time_series = MarkManager(dataset).get_mark_track(
            starting_time=0.0)
        nwb_content.add_stimulus(mark_time_series)

        # tokenize and add trials
        trials_manager = TrialsManager(block_name, stim_configs)
        trials_manager.add_trials(nwb_content, stim_vals)
Beispiel #2
0
    def run_conversion(self,
                       nwbfile: NWBFile,
                       metadata: dict = None,
                       overwrite: bool = False):
        assert isinstance(nwbfile,
                          NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
        metadata_default = self.get_metadata()
        metadata = dict_deep_update(metadata_default, metadata)
        # Subject:
        if nwbfile.subject is None:
            nwbfile.subject = Subject(**metadata['Subject'])
        # adding behavior:
        start_time = 0.0
        rate = 1 / self.data_frame.time.diff().mean()
        beh_ts = []
        for behdict in self.beh_args:
            if 'cm' in behdict['unit']:
                conv = 1e-2
                behdict.update(unit='m')
            else:
                conv = 1
            behdict.update(starting_time=start_time,
                           rate=rate,
                           data=self.data_frame[behdict['name']].to_numpy() *
                           conv)
            beh_ts.append(TimeSeries(**behdict))
        if 'behavior' not in nwbfile.processing:
            beh_mod = nwbfile.create_processing_module(
                'behavior', 'Container for behavior time series')
            beh_mod.add(
                BehavioralTimeSeries(time_series=beh_ts,
                                     name='BehavioralTimeSeries'))
        else:
            beh_mod = nwbfile.processing['behavior']
            if 'BehavioralTimeSeries' not in beh_mod.data_interfaces:
                beh_mod.add(
                    BehavioralTimeSeries(time_series=beh_ts,
                                         name='BehavioralTimeSeries'))

        # adding stimulus:
        for inp_kwargs in self.stimulus_args:
            if inp_kwargs['name'] not in nwbfile.stimulus:
                inp_kwargs.update(
                    starting_time=start_time,
                    rate=rate,
                    data=self.data_frame[inp_kwargs['name']].to_numpy())
                nwbfile.add_stimulus(TimeSeries(**inp_kwargs))
Beispiel #3
0
    def run_conversion(self,
                       nwbfile: NWBFile,
                       metadata: dict = None,
                       stub_test: bool = False):
        conditions = intervals_from_traces(self.recording_extractor)
        mech_stim = TimeIntervals(
            name='MechanicalStimulus',
            description=
            "Activation times inferred from TTL commands for mechanical stimulus."
        )
        laser_stim = TimeIntervals(
            name='LaserStimulus',
            description=
            "Activation times inferred from TTL commands for cortical laser stimulus."
        )
        for j, table in enumerate([mech_stim, laser_stim]):
            for row in conditions[j]:
                table.add_row(
                    dict(start_time=float(row[0]), stop_time=float(row[1])))
        # TODO - these really should be IntervalSeries added to stimulus, rather than processing
        check_module(nwbfile, 'stimulus',
                     "Contains stimuli data.").add(mech_stim)
        check_module(nwbfile, 'stimulus',
                     "Contains stimuli data.").add(laser_stim)

        if stub_test or self.subset_channels is not None:
            recording = self.subset_recording(stub_test=stub_test)
        else:
            recording = self.recording_extractor

        # Pressure values
        nwbfile.add_stimulus(
            TimeSeries(
                name='MechanicalPressure',
                data=H5DataIO(recording.get_traces(0), compression="gzip"),
                unit=self.recording_extractor._channel_smrxinfo[0]['unit'],
                conversion=recording.get_channel_property(0, 'gain'),
                rate=recording.get_sampling_frequency(),
                description=
                "Pressure sensor attached to the mechanical stimulus used to repeatedly evoke spiking."
            ))
def test_stimulus_round_trip(nwb_filename):

    nwbfile = NWBFile(session_description='test ephys',
                      identifier='session_uuid',
                      session_start_time=datetime.datetime.now(),
                      file_create_date=datetime.datetime.now())
    device = nwbfile.create_device(name='electrode_0')

    electrode = nwbfile.create_ic_electrode(
        name="elec0", description=' some kind of electrode', device=device)

    data = np.array([1., 3.76, 0., 67, -2.89])
    meta_data = {
        "name": "test_stimulus_sweep",
        "sweep_number": 4,
        "unit": "amperes",
        "gain": 32.0,
        "resolution": 1.0,
        "conversion": 1.0E-3,
        "starting_time": 1.5,
        "rate": 7000.0,
        "stimulus_description": "STIMULUS_CODE"
    }

    time_series = CurrentClampStimulusSeries(data=data,
                                             electrode=electrode,
                                             **meta_data)

    nwbfile.add_stimulus(time_series)

    with NWBHDF5IO(nwb_filename, mode='w') as io:
        io.write(nwbfile)
    nwbfile_in = NWBHDF5IO(nwb_filename, mode='r').read()

    time_series_in = nwbfile_in.get_stimulus(meta_data["name"])

    assert np.allclose(data, time_series_in.data)
    for k, v in meta_data.items():
        assert getattr(time_series_in, k) == v
Beispiel #5
0
class NWBFileTest(unittest.TestCase):
    def setUp(self):
        self.start = datetime(2017, 5, 1, 12, 0, 0)
        self.path = 'nwbfile_test.h5'
        self.nwbfile = NWBFile(
            'a fake source',
            'a test session description for a test NWBFile',
            'FILE123',
            self.start,
            experimenter='A test experimenter',
            lab='a test lab',
            institution='a test institution',
            experiment_description='a test experiment description',
            session_id='test1',
            notes='my notes',
            pharmacology='drugs',
            protocol='protocol',
            related_publications='my pubs',
            slices='my slices',
            surgery='surgery',
            virus='a virus')

    def test_constructor(self):
        self.assertEqual(self.nwbfile.session_description,
                         'a test session description for a test NWBFile')
        self.assertEqual(self.nwbfile.identifier, 'FILE123')
        self.assertEqual(self.nwbfile.session_start_time, self.start)
        self.assertEqual(self.nwbfile.lab, 'a test lab')
        self.assertEqual(self.nwbfile.experimenter, 'A test experimenter')
        self.assertEqual(self.nwbfile.institution, 'a test institution')
        self.assertEqual(self.nwbfile.experiment_description,
                         'a test experiment description')
        self.assertEqual(self.nwbfile.session_id, 'test1')

    def test_create_electrode_group(self):
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        d = self.nwbfile.create_device('a fake device', 'a fake source')
        elecgrp = self.nwbfile.create_electrode_group(name, 'a fake source',
                                                      desc, loc, d)
        self.assertEqual(elecgrp.description, desc)
        self.assertEqual(elecgrp.location, loc)
        self.assertIs(elecgrp.device, d)

    def test_epoch_tags(self):
        tags1 = ['t1', 't2']
        tags2 = ['t3', 't4']
        tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
        ts = TimeSeries("test_ts",
                        "a hypothetical source",
                        list(range(len(tstamps))),
                        'unit',
                        timestamps=tstamps)
        expected_tags = tags1 + tags2
        self.nwbfile.create_epoch('a fake epoch', 0.0, 1.0, tags1, ts)
        self.nwbfile.create_epoch('a second fake epoch', 0.0, 1.0, tags2, ts)
        tags = self.nwbfile.epoch_tags
        six.assertCountEqual(self, expected_tags, tags)

    def test_add_acquisition(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.acquisition), 1)

    def test_add_stimulus(self):
        self.nwbfile.add_stimulus(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus), 1)

    def test_add_stimulus_template(self):
        self.nwbfile.add_stimulus_template(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus_template), 1)

    def test_add_acquisition_check_dups(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        with self.assertRaises(ValueError):
            self.nwbfile.add_acquisition(
                TimeSeries('test_ts',
                           'unit test test_add_acquisition',
                           [0, 1, 2, 3, 4, 5],
                           'grams',
                           timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))

    def test_get_acquisition_empty(self):
        with self.assertRaisesRegex(ValueError,
                                    "acquisition of NWBFile 'root' is empty"):
            self.nwbfile.get_acquisition()

    def test_get_acquisition_multiple_elements(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts1',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts2',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "more than one element in acquisition of NWBFile 'root' -- must specify a name"
        with self.assertRaisesRegex(ValueError, msg):
            self.nwbfile.get_acquisition()

    def test_add_acquisition_invalid_name(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "'TEST_TS' not found in acquisition of NWBFile 'root'"
        with self.assertRaisesRegex(KeyError, msg):
            self.nwbfile.get_acquisition("TEST_TS")

    def test_set_electrode_table(self):
        table = ElectrodeTable('test_table')  # noqa: F405
        dev1 = self.nwbfile.create_device('dev1',
                                          'a test source')  # noqa: F405
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'a test source',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        table.add_row(1, 1.0, 2.0, 3.0, -1.0, 'CA1', 'none',
                      'first channel of tetrode', group)
        table.add_row(2, 1.0, 2.0, 3.0, -2.0, 'CA1', 'none',
                      'second channel of tetrode', group)
        table.add_row(3, 1.0, 2.0, 3.0, -3.0, 'CA1', 'none',
                      'third channel of tetrode', group)
        table.add_row(4, 1.0, 2.0, 3.0, -4.0, 'CA1', 'none',
                      'fourth channel of tetrode', group)
        self.nwbfile.set_electrode_table(table)
        self.assertIs(self.nwbfile.ec_electrodes, table)
        self.assertIs(table.parent, self.nwbfile)

    def test_add_electrode(self):
        dev1 = self.nwbfile.create_device('dev1',
                                          'a test source')  # noqa: F405
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'a test source',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        self.nwbfile.add_electrode(1, 1.0, 2.0, 3.0, -1.0, 'CA1', 'none',
                                   'first channel of tetrode', group)
        self.assertEqual(self.nwbfile.ec_electrodes[0][0], 1)
        self.assertEqual(self.nwbfile.ec_electrodes[0][1], 1.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][2], 2.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][3], 3.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][4], -1.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][5], 'CA1')
        self.assertEqual(self.nwbfile.ec_electrodes[0][6], 'none')
        self.assertEqual(self.nwbfile.ec_electrodes[0][7],
                         'first channel of tetrode')
        self.assertEqual(self.nwbfile.ec_electrodes[0][8], group)

    def test_all_children(self):
        ts1 = TimeSeries('test_ts1',
                         'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = TimeSeries('test_ts2',
                         'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        device = self.nwbfile.create_device('a fake device', 'a fake source')
        elecgrp = self.nwbfile.create_electrode_group(name, 'a fake source',
                                                      desc, loc, device)
        children = self.nwbfile.all_children()
        self.assertIn(ts1, children)
        self.assertIn(ts2, children)
        self.assertIn(device, children)
        self.assertIn(elecgrp, children)
Beispiel #6
0
class NWBFileTest(TestCase):
    def setUp(self):
        self.start = datetime(2017, 5, 1, 12, 0, 0, tzinfo=tzlocal())
        self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
        self.create = [
            datetime(2017, 5, 1, 12, tzinfo=tzlocal()),
            datetime(2017, 5, 2, 13, 0, 0, 1, tzinfo=tzutc()),
            datetime(2017, 5, 2, 14, tzinfo=tzutc())
        ]
        self.path = 'nwbfile_test.h5'
        self.nwbfile = NWBFile(
            'a test session description for a test NWBFile',
            'FILE123',
            self.start,
            file_create_date=self.create,
            timestamps_reference_time=self.ref_time,
            experimenter='A test experimenter',
            lab='a test lab',
            institution='a test institution',
            experiment_description='a test experiment description',
            session_id='test1',
            notes='my notes',
            pharmacology='drugs',
            protocol='protocol',
            related_publications='my pubs',
            slices='my slices',
            surgery='surgery',
            virus='a virus',
            source_script='noscript',
            source_script_file_name='nofilename',
            stimulus_notes='test stimulus notes',
            data_collection='test data collection notes',
            keywords=('these', 'are', 'keywords'))

    def test_constructor(self):
        self.assertEqual(self.nwbfile.session_description,
                         'a test session description for a test NWBFile')
        self.assertEqual(self.nwbfile.identifier, 'FILE123')
        self.assertEqual(self.nwbfile.session_start_time, self.start)
        self.assertEqual(self.nwbfile.file_create_date, self.create)
        self.assertEqual(self.nwbfile.lab, 'a test lab')
        self.assertEqual(self.nwbfile.experimenter, ('A test experimenter', ))
        self.assertEqual(self.nwbfile.institution, 'a test institution')
        self.assertEqual(self.nwbfile.experiment_description,
                         'a test experiment description')
        self.assertEqual(self.nwbfile.session_id, 'test1')
        self.assertEqual(self.nwbfile.stimulus_notes, 'test stimulus notes')
        self.assertEqual(self.nwbfile.data_collection,
                         'test data collection notes')
        self.assertEqual(self.nwbfile.related_publications, ('my pubs', ))
        self.assertEqual(self.nwbfile.source_script, 'noscript')
        self.assertEqual(self.nwbfile.source_script_file_name, 'nofilename')
        self.assertEqual(self.nwbfile.keywords, ('these', 'are', 'keywords'))
        self.assertEqual(self.nwbfile.timestamps_reference_time, self.ref_time)

    def test_create_electrode_group(self):
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        d = self.nwbfile.create_device('a fake device')
        elecgrp = self.nwbfile.create_electrode_group(name, desc, loc, d)
        self.assertEqual(elecgrp.description, desc)
        self.assertEqual(elecgrp.location, loc)
        self.assertIs(elecgrp.device, d)

    def test_create_custom_intervals(self):
        df_words = pd.DataFrame({
            'start_time': [.1, 2.],
            'stop_time': [.8, 2.3],
            'label': ['hello', 'there']
        })
        words = TimeIntervals.from_dataframe(df_words, name='words')
        self.nwbfile.add_time_intervals(words)
        self.assertEqual(self.nwbfile.intervals['words'], words)

    def test_create_electrode_group_invalid_index(self):
        """
        Test the case where the user creates an electrode table region with
        indexes that are out of range of the amount of electrodes added.
        """
        nwbfile = NWBFile('a', 'b', datetime.now(tzlocal()))
        device = nwbfile.create_device('a')
        elecgrp = nwbfile.create_electrode_group('a',
                                                 'b',
                                                 device=device,
                                                 location='a')
        for i in range(4):
            nwbfile.add_electrode(np.nan,
                                  np.nan,
                                  np.nan,
                                  np.nan,
                                  'a',
                                  'a',
                                  elecgrp,
                                  id=i)
        with self.assertRaises(IndexError):
            nwbfile.create_electrode_table_region(list(range(6)), 'test')

    def test_access_group_after_io(self):
        """
        Motivated by #739
        """
        nwbfile = NWBFile('a', 'b', datetime.now(tzlocal()))
        device = nwbfile.create_device('a')
        elecgrp = nwbfile.create_electrode_group('a',
                                                 'b',
                                                 device=device,
                                                 location='a')
        nwbfile.add_electrode(np.nan,
                              np.nan,
                              np.nan,
                              np.nan,
                              'a',
                              'a',
                              elecgrp,
                              id=0)

        with NWBHDF5IO('electrodes_mwe.nwb', 'w') as io:
            io.write(nwbfile)

        with NWBHDF5IO('electrodes_mwe.nwb', 'a') as io:
            nwbfile_i = io.read()
            for aa, bb in zip(nwbfile_i.electrodes['group'][:],
                              nwbfile.electrodes['group'][:]):
                self.assertEqual(aa.name, bb.name)

        for i in range(4):
            nwbfile.add_electrode(np.nan,
                                  np.nan,
                                  np.nan,
                                  np.nan,
                                  'a',
                                  'a',
                                  elecgrp,
                                  id=i + 1)

        with NWBHDF5IO('electrodes_mwe.nwb', 'w') as io:
            io.write(nwbfile)

        with NWBHDF5IO('electrodes_mwe.nwb', 'a') as io:
            nwbfile_i = io.read()
            for aa, bb in zip(nwbfile_i.electrodes['group'][:],
                              nwbfile.electrodes['group'][:]):
                self.assertEqual(aa.name, bb.name)

        remove_test_file("electrodes_mwe.nwb")

    def test_access_processing(self):
        self.nwbfile.create_processing_module('test_mod', 'test_description')
        # test deprecate .modules
        with self.assertWarnsWith(DeprecationWarning,
                                  'replaced by NWBFile.processing'):
            modules = self.nwbfile.modules['test_mod']
        self.assertIs(self.nwbfile.processing['test_mod'], modules)

    def test_epoch_tags(self):
        tags1 = ['t1', 't2']
        tags2 = ['t3', 't4']
        tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
        ts = TimeSeries("test_ts",
                        list(range(len(tstamps))),
                        'unit',
                        timestamps=tstamps)
        expected_tags = tags1 + tags2
        self.nwbfile.add_epoch(0.0, 1.0, tags1, ts)
        self.nwbfile.add_epoch(0.0, 1.0, tags2, ts)
        tags = self.nwbfile.epoch_tags
        self.assertEqual(set(expected_tags), set(tags))

    def test_add_acquisition(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.acquisition), 1)

    def test_add_stimulus(self):
        self.nwbfile.add_stimulus(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus), 1)

    def test_add_stimulus_template(self):
        self.nwbfile.add_stimulus_template(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus_template), 1)

    def test_add_analysis(self):
        self.nwbfile.add_analysis(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.analysis), 1)

    def test_add_acquisition_check_dups(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        with self.assertRaises(ValueError):
            self.nwbfile.add_acquisition(
                TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                           'grams',
                           timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))

    def test_get_acquisition_empty(self):
        with self.assertRaisesWith(ValueError,
                                   "acquisition of NWBFile 'root' is empty"):
            self.nwbfile.get_acquisition()

    def test_get_acquisition_multiple_elements(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "more than one element in acquisition of NWBFile 'root' -- must specify a name"
        with self.assertRaisesWith(ValueError, msg):
            self.nwbfile.get_acquisition()

    def test_add_acquisition_invalid_name(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "\"'TEST_TS' not found in acquisition of NWBFile 'root'\""
        with self.assertRaisesWith(KeyError, msg):
            self.nwbfile.get_acquisition("TEST_TS")

    def test_set_electrode_table(self):
        table = ElectrodeTable()
        dev1 = self.nwbfile.create_device('dev1')
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-1.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-2.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-3.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-4.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        self.nwbfile.set_electrode_table(table)

        self.assertIs(self.nwbfile.electrodes, table)
        self.assertIs(table.parent, self.nwbfile)

    def test_add_unit_column(self):
        self.nwbfile.add_unit_column('unit_type', 'the type of unit')
        self.assertEqual(self.nwbfile.units.colnames, ('unit_type', ))

    def test_add_unit(self):
        self.nwbfile.add_unit(id=1)
        self.assertEqual(len(self.nwbfile.units), 1)
        self.nwbfile.add_unit(id=2)
        self.nwbfile.add_unit(id=3)
        self.assertEqual(len(self.nwbfile.units), 3)

    def test_add_trial_column(self):
        self.nwbfile.add_trial_column('trial_type', 'the type of trial')
        self.assertEqual(self.nwbfile.trials.colnames,
                         ('start_time', 'stop_time', 'trial_type'))

    def test_add_trial(self):
        self.nwbfile.add_trial(start_time=10.0, stop_time=20.0)
        self.assertEqual(len(self.nwbfile.trials), 1)
        self.nwbfile.add_trial(start_time=30.0, stop_time=40.0)
        self.nwbfile.add_trial(start_time=50.0, stop_time=70.0)
        self.assertEqual(len(self.nwbfile.trials), 3)

    def test_add_invalid_times_column(self):
        self.nwbfile.add_invalid_times_column(
            'comments', 'description of reason for omitting time')
        self.assertEqual(self.nwbfile.invalid_times.colnames,
                         ('start_time', 'stop_time', 'comments'))

    def test_add_invalid_time_interval(self):

        self.nwbfile.add_invalid_time_interval(start_time=0.0, stop_time=12.0)
        self.assertEqual(len(self.nwbfile.invalid_times), 1)
        self.nwbfile.add_invalid_time_interval(start_time=15.0, stop_time=16.0)
        self.nwbfile.add_invalid_time_interval(start_time=17.0, stop_time=20.5)
        self.assertEqual(len(self.nwbfile.invalid_times), 3)

    def test_add_invalid_time_w_ts(self):
        ts = TimeSeries(name='name', data=[1.2], rate=1.0, unit='na')
        self.nwbfile.add_invalid_time_interval(start_time=18.0,
                                               stop_time=20.6,
                                               timeseries=ts,
                                               tags=('hi', 'there'))

    def test_add_electrode(self):
        dev1 = self.nwbfile.create_device('dev1')
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        self.nwbfile.add_electrode(1.0,
                                   2.0,
                                   3.0,
                                   -1.0,
                                   'CA1',
                                   'none',
                                   group=group,
                                   id=1)
        elec = self.nwbfile.electrodes[0]
        self.assertEqual(elec.index[0], 1)
        self.assertEqual(elec.iloc[0]['x'], 1.0)
        self.assertEqual(elec.iloc[0]['y'], 2.0)
        self.assertEqual(elec.iloc[0]['z'], 3.0)
        self.assertEqual(elec.iloc[0]['location'], 'CA1')
        self.assertEqual(elec.iloc[0]['filtering'], 'none')
        self.assertEqual(elec.iloc[0]['group'], group)

    def test_all_children(self):
        ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        device = self.nwbfile.create_device('a fake device')
        elecgrp = self.nwbfile.create_electrode_group(name, desc, loc, device)
        children = self.nwbfile.all_children()
        self.assertIn(ts1, children)
        self.assertIn(ts2, children)
        self.assertIn(device, children)
        self.assertIn(elecgrp, children)

    def test_fail_if_source_script_file_name_without_source_script(self):
        with self.assertRaises(ValueError):
            # <-- source_script_file_name without source_script is not allowed
            NWBFile('a test session description for a test NWBFile',
                    'FILE123',
                    self.start,
                    source_script=None,
                    source_script_file_name='nofilename')

    def test_get_neurodata_type(self):
        ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        p1 = ts1.get_ancestor(neurodata_type='NWBFile')
        self.assertIs(p1, self.nwbfile)
        p2 = ts2.get_ancestor(neurodata_type='NWBFile')
        self.assertIs(p2, self.nwbfile)

    def test_print_units(self):
        self.nwbfile.add_unit(spike_times=[1., 2., 3.])
        expected = """units pynwb.misc.Units at 0x%d
Fields:
  colnames: ['spike_times']
  columns: (
    spike_times_index <class 'hdmf.common.table.VectorIndex'>,
    spike_times <class 'hdmf.common.table.VectorData'>
  )
  description: Autogenerated by NWBFile
  id: id <class 'hdmf.common.table.ElementIdentifiers'>
"""
        expected = expected % id(self.nwbfile.units)
        self.assertEqual(str(self.nwbfile.units), expected)

    def test_copy(self):
        self.nwbfile.add_unit(spike_times=[1., 2., 3.])
        device = self.nwbfile.create_device('a')
        elecgrp = self.nwbfile.create_electrode_group('a',
                                                      'b',
                                                      device=device,
                                                      location='a')
        self.nwbfile.add_electrode(np.nan,
                                   np.nan,
                                   np.nan,
                                   np.nan,
                                   'a',
                                   'a',
                                   elecgrp,
                                   id=0)
        self.nwbfile.add_electrode(np.nan, np.nan, np.nan, np.nan, 'b', 'b',
                                   elecgrp)
        elec_region = self.nwbfile.create_electrode_table_region([1], 'name')

        ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = ElectricalSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                               electrodes=elec_region,
                               timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        self.nwbfile.add_trial(start_time=50.0, stop_time=70.0)
        self.nwbfile.add_invalid_times_column(
            'comments', 'description of reason for omitting time')
        self.nwbfile.create_processing_module('test_mod', 'test_description')
        self.nwbfile.create_time_intervals('custom_interval',
                                           'a custom time interval')
        self.nwbfile.intervals['custom_interval'].add_interval(start_time=10.,
                                                               stop_time=20.)
        newfile = self.nwbfile.copy()

        # test dictionaries
        self.assertIs(self.nwbfile.devices['a'], newfile.devices['a'])
        self.assertIs(self.nwbfile.acquisition['test_ts1'],
                      newfile.acquisition['test_ts1'])
        self.assertIs(self.nwbfile.acquisition['test_ts2'],
                      newfile.acquisition['test_ts2'])
        self.assertIs(self.nwbfile.processing['test_mod'],
                      newfile.processing['test_mod'])

        # test dynamic tables
        self.assertIsNot(self.nwbfile.electrodes, newfile.electrodes)
        self.assertIs(self.nwbfile.electrodes['x'], newfile.electrodes['x'])
        self.assertIsNot(self.nwbfile.units, newfile.units)
        self.assertIs(self.nwbfile.units['spike_times'],
                      newfile.units['spike_times'])
        self.assertIsNot(self.nwbfile.trials, newfile.trials)
        self.assertIsNot(self.nwbfile.trials.parent, newfile.trials.parent)
        self.assertIs(self.nwbfile.trials.id, newfile.trials.id)
        self.assertIs(self.nwbfile.trials['start_time'],
                      newfile.trials['start_time'])
        self.assertIs(self.nwbfile.trials['stop_time'],
                      newfile.trials['stop_time'])
        self.assertIsNot(self.nwbfile.invalid_times, newfile.invalid_times)
        self.assertTupleEqual(self.nwbfile.invalid_times.colnames,
                              newfile.invalid_times.colnames)
        self.assertIsNot(self.nwbfile.intervals['custom_interval'],
                         newfile.intervals['custom_interval'])
        self.assertTupleEqual(
            self.nwbfile.intervals['custom_interval'].colnames,
            newfile.intervals['custom_interval'].colnames)
        self.assertIs(self.nwbfile.intervals['custom_interval']['start_time'],
                      newfile.intervals['custom_interval']['start_time'])
        self.assertIs(self.nwbfile.intervals['custom_interval']['stop_time'],
                      newfile.intervals['custom_interval']['stop_time'])

    def test_multi_experimenters(self):
        self.nwbfile = NWBFile('a test session description for a test NWBFile',
                               'FILE123',
                               self.start,
                               experimenter=('experimenter1', 'experimenter2'))
        self.assertTupleEqual(self.nwbfile.experimenter,
                              ('experimenter1', 'experimenter2'))

    def test_multi_publications(self):
        self.nwbfile = NWBFile('a test session description for a test NWBFile',
                               'FILE123',
                               self.start,
                               related_publications=('pub1', 'pub2'))
        self.assertTupleEqual(self.nwbfile.related_publications,
                              ('pub1', 'pub2'))
Beispiel #7
0
        name=stimulus,
        source='NA',
        data=dataset.get_stimulus_template(stimulus),
        unit='NA',
        format='raw',
        timestamps=[0.0])
    image_index = IndexSeries(
        name=stimulus,
        source='NA',
        data=dataset.get_stimulus_table(stimulus).frame.values,
        unit='NA',
        indexed_timeseries=visual_stimulus_images,
        timestamps=timestamps[dataset.get_stimulus_table(
            stimulus).start.values])
    nwbfile.add_stimulus_template(visual_stimulus_images)
    nwbfile.add_stimulus(image_index)

########################################
# 3) Besides the two-photon calcium image stack, the running speed of the animal was also recordered in this experiment.
# We can store this data as a TimeSeries, in the acquisition portion of the file.

running_speed = TimeSeries(name='running_speed',
                           source='Allen Brain Observatory: Visual Coding',
                           data=running_data,
                           timestamps=timestamps,
                           unit='cm/s')

nwbfile.add_acquisition(running_speed)

########################################
# 4) In NWB:N, an "epoch" is an interval of experiment time that can slice into a timeseries (for example running_speed,
Beispiel #8
0
def yuta2nwb(session_path='/Users/bendichter/Desktop/Buzsaki/SenzaiBuzsaki2017/YutaMouse41/YutaMouse41-150903',
             subject_xls=None, include_spike_waveforms=True, stub=True):

    subject_path, session_id = os.path.split(session_path)
    fpath_base = os.path.split(subject_path)[0]
    identifier = session_id
    mouse_number = session_id[9:11]
    if '-' in session_id:
        subject_id, date_text = session_id.split('-')
        b = False
    else:
        subject_id, date_text = session_id.split('b')
        b = True

    if subject_xls is None:
        subject_xls = os.path.join(subject_path, 'YM' + mouse_number + ' exp_sheet.xlsx')
    else:
        if not subject_xls[-4:] == 'xlsx':
            subject_xls = os.path.join(subject_xls, 'YM' + mouse_number + ' exp_sheet.xlsx')

    session_start_time = dateparse(date_text, yearfirst=True)

    df = pd.read_excel(subject_xls)

    subject_data = {}
    for key in ['genotype', 'DOB', 'implantation', 'Probe', 'Surgery', 'virus injection', 'mouseID']:
        names = df.iloc[:, 0]
        if key in names.values:
            subject_data[key] = df.iloc[np.argmax(names == key), 1]

    if isinstance(subject_data['DOB'], datetime):
        age = session_start_time - subject_data['DOB']
    else:
        age = None

    subject = Subject(subject_id=subject_id, age=str(age),
                      genotype=subject_data['genotype'],
                      species='mouse')

    nwbfile = NWBFile(session_description='mouse in open exploration and theta maze',
                      identifier=identifier,
                      session_start_time=session_start_time.astimezone(),
                      file_create_date=datetime.now().astimezone(),
                      experimenter='Yuta Senzai',
                      session_id=session_id,
                      institution='NYU',
                      lab='Buzsaki',
                      subject=subject,
                      related_publications='DOI:10.1016/j.neuron.2016.12.011')

    print('reading and writing raw position data...', end='', flush=True)
    ns.add_position_data(nwbfile, session_path)

    shank_channels = ns.get_shank_channels(session_path)[:8]
    all_shank_channels = np.concatenate(shank_channels)

    print('setting up electrodes...', end='', flush=True)
    hilus_csv_path = os.path.join(fpath_base, 'early_session_hilus_chans.csv')
    lfp_channel = get_reference_elec(subject_xls, hilus_csv_path, session_start_time, session_id, b=b)
    print(lfp_channel)
    custom_column = [{'name': 'theta_reference',
                      'description': 'this electrode was used to calculate LFP canonical bands',
                      'data': all_shank_channels == lfp_channel}]
    ns.write_electrode_table(nwbfile, session_path, custom_columns=custom_column, max_shanks=max_shanks)

    print('reading LFPs...', end='', flush=True)
    lfp_fs, all_channels_data = ns.read_lfp(session_path, stub=stub)

    lfp_data = all_channels_data[:, all_shank_channels]
    print('writing LFPs...', flush=True)
    # lfp_data[:int(len(lfp_data)/4)]
    lfp_ts = ns.write_lfp(nwbfile, lfp_data, lfp_fs, name='lfp',
                          description='lfp signal for all shank electrodes')

    for name, channel in special_electrode_dict.items():
        ts = TimeSeries(name=name, description='environmental electrode recorded inline with neural data',
                        data=all_channels_data[channel], rate=lfp_fs, unit='V', conversion=np.nan, resolution=np.nan)
        nwbfile.add_acquisition(ts)

    # compute filtered LFP
    print('filtering LFP...', end='', flush=True)
    all_lfp_phases = []
    for passband in ('theta', 'gamma'):
        lfp_fft = filter_lfp(lfp_data[:, all_shank_channels == lfp_channel].ravel(), lfp_fs, passband=passband)
        lfp_phase, _ = hilbert_lfp(lfp_fft)
        all_lfp_phases.append(lfp_phase[:, np.newaxis])
    data = np.dstack(all_lfp_phases)
    print('done.', flush=True)

    if include_spike_waveforms:
        print('writing waveforms...', end='', flush=True)
        for shankn in np.arange(1, 9, dtype=int):
            ns.write_spike_waveforms(nwbfile, session_path, shankn, stub=stub)
        print('done.', flush=True)

    decomp_series = DecompositionSeries(name='LFPDecompositionSeries',
                                        description='Theta and Gamma phase for reference LFP',
                                        data=data, rate=lfp_fs,
                                        source_timeseries=lfp_ts,
                                        metric='phase', unit='radians')
    decomp_series.add_band(band_name='theta', band_limits=(4, 10))
    decomp_series.add_band(band_name='gamma', band_limits=(30, 80))

    check_module(nwbfile, 'ecephys', 'contains processed extracellular electrophysiology data').add_data_interface(decomp_series)

    [nwbfile.add_stimulus(x) for x in ns.get_events(session_path)]

    # create epochs corresponding to experiments/environments for the mouse

    sleep_state_fpath = os.path.join(session_path, '{}--StatePeriod.mat'.format(session_id))

    exist_pos_data = any(os.path.isfile(os.path.join(session_path, '{}__{}.mat'.format(session_id, task_type['name'])))
                         for task_type in task_types)

    if exist_pos_data:
        nwbfile.add_epoch_column('label', 'name of epoch')

    for task_type in task_types:
        label = task_type['name']

        file = os.path.join(session_path, session_id + '__' + label + '.mat')
        if os.path.isfile(file):
            print('loading position for ' + label + '...', end='', flush=True)

            pos_obj = Position(name=label + '_position')

            matin = loadmat(file)
            tt = matin['twhl_norm'][:, 0]
            exp_times = find_discontinuities(tt)

            if 'conversion' in task_type:
                conversion = task_type['conversion']
            else:
                conversion = np.nan

            for pos_type in ('twhl_norm', 'twhl_linearized'):
                if pos_type in matin:
                    pos_data_norm = matin[pos_type][:, 1:]

                    spatial_series_object = SpatialSeries(
                        name=label + '_{}_spatial_series'.format(pos_type),
                        data=H5DataIO(pos_data_norm, compression='gzip'),
                        reference_frame='unknown', conversion=conversion,
                        resolution=np.nan,
                        timestamps=H5DataIO(tt, compression='gzip'))
                    pos_obj.add_spatial_series(spatial_series_object)

            check_module(nwbfile, 'behavior', 'contains processed behavioral data').add_data_interface(pos_obj)
            for i, window in enumerate(exp_times):
                nwbfile.add_epoch(start_time=window[0], stop_time=window[1],
                                  label=label + '_' + str(i))
            print('done.')

    # there are occasional mismatches between the matlab struct and the neuroscope files
    # regions: 3: 'CA3', 4: 'DG'

    df_unit_features = get_UnitFeatureCell_features(fpath_base, session_id, session_path)

    celltype_names = []
    for celltype_id, region_id in zip(df_unit_features['fineCellType'].values,
                                      df_unit_features['region'].values):
        if celltype_id == 1:
            if region_id == 3:
                celltype_names.append('pyramidal cell')
            elif region_id == 4:
                celltype_names.append('granule cell')
            else:
                raise Exception('unknown type')
        elif not np.isfinite(celltype_id):
            celltype_names.append('missing')
        else:
            celltype_names.append(celltype_dict[celltype_id])

    custom_unit_columns = [
        {
            'name': 'cell_type',
            'description': 'name of cell type',
            'data': celltype_names},
        {
            'name': 'global_id',
            'description': 'global id for cell for entire experiment',
            'data': df_unit_features['unitID'].values},
        {
            'name': 'max_electrode',
            'description': 'electrode that has the maximum amplitude of the waveform',
            'data': get_max_electrodes(nwbfile, session_path),
            'table': nwbfile.electrodes
        }]

    ns.add_units(nwbfile, session_path, custom_unit_columns, max_shanks=max_shanks)

    trialdata_path = os.path.join(session_path, session_id + '__EightMazeRun.mat')
    if os.path.isfile(trialdata_path):
        trials_data = loadmat(trialdata_path)['EightMazeRun']

        trialdatainfo_path = os.path.join(fpath_base, 'EightMazeRunInfo.mat')
        trialdatainfo = [x[0] for x in loadmat(trialdatainfo_path)['EightMazeRunInfo'][0]]

        features = trialdatainfo[:7]
        features[:2] = 'start_time', 'stop_time',
        [nwbfile.add_trial_column(x, 'description') for x in features[4:] + ['condition']]

        for trial_data in trials_data:
            if trial_data[3]:
                cond = 'run_left'
            else:
                cond = 'run_right'
            nwbfile.add_trial(start_time=trial_data[0], stop_time=trial_data[1], condition=cond,
                              error_run=trial_data[4], stim_run=trial_data[5], both_visit=trial_data[6])
    """
    mono_syn_fpath = os.path.join(session_path, session_id+'-MonoSynConvClick.mat')

    matin = loadmat(mono_syn_fpath)
    exc = matin['FinalExcMonoSynID']
    inh = matin['FinalInhMonoSynID']

    #exc_obj = CatCellInfo(name='excitatory_connections',
    #                      indices_values=[], cell_index=exc[:, 0] - 1, indices=exc[:, 1] - 1)
    #module_cellular.add_container(exc_obj)
    #inh_obj = CatCellInfo(name='inhibitory_connections',
    #                      indices_values=[], cell_index=inh[:, 0] - 1, indices=inh[:, 1] - 1)
    #module_cellular.add_container(inh_obj)
    """

    if os.path.isfile(sleep_state_fpath):
        matin = loadmat(sleep_state_fpath)['StatePeriod']

        table = TimeIntervals(name='states', description='sleep states of animal')
        table.add_column(name='label', description='sleep state')

        data = []
        for name in matin.dtype.names:
            for row in matin[name][0][0]:
                data.append({'start_time': row[0], 'stop_time': row[1], 'label': name})
        [table.add_row(**row) for row in sorted(data, key=lambda x: x['start_time'])]

        check_module(nwbfile, 'behavior', 'contains behavioral data').add_data_interface(table)

    if stub:
        out_fname = session_path + '_stub.nwb'
    else:
        out_fname = session_path + '.nwb'

    print('writing NWB file...', end='', flush=True)
    with NWBHDF5IO(out_fname, mode='w') as io:
        io.write(nwbfile)
    print('done.')

    print('testing read...', end='', flush=True)
    # test read
    with NWBHDF5IO(out_fname, mode='r') as io:
        io.read()
    print('done.')
Beispiel #9
0
def no2nwb(NOData, session_use, subjects):

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    df_session = subjects[subjects['session_id'] == session_use]

    print('session_use')
    print(session_use)
    print('age')
    print(str(df_session['age'].values[0]))
    print('epilepsy_diagnosis')
    print(str(df_session['epilepsy_diagnosis'].values[0]))

    nwb_subject = Subject(
        age=str(df_session['age'].values[0]),
        description=df_session['epilepsy_diagnosis'].values[0],
        sex=df_session['sex'].values[0],
        subject_id=df_session['subject_id'].values[0])

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='RecogMemory dataset session use 5' + session['session'],
        identifier=session['session_id'],
        session_start_time=datetime.datetime.now(),# TODO: need to check out the time for session start
        file_create_date=datetime.datetime.now(),
        experiment_description="learning: " + str(experiment_id_learn) + ", " + \
                               "recognition: " + \
                               str(experiment_id_recog),
        subject=nwb_subject
    )

    # Add event and experiment_id acquisition
    # event_ts = TimeSeries(name='events', source='NA', unit='NA', data=np.asarray(events[1].values),
    #                       timestamps=np.asarray(events[0].values))

    event_ts = TimeSeries(name='events',
                          unit='NA',
                          data=np.asarray(events[1].values),
                          timestamps=np.asarray(events[0].values))
    # experiment_ids = TimeSeries(name='experiment_ids', source='NA', unit='NA', data=np.asarray(events[2]),
    #                             timestamps=np.asarray(events[0].values))
    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values))
    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file2
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use, NOData.ls_cells(session_use)[0])
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)
        name = 'stimuli_recog_' + str(counter)
        stimulus_recog = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_recog)
        counter += 1

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)

        name = 'stimuli_learn_' + str(counter)

        stimulus_learn = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_learn)

        counter += 1

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]

    # Create the trial tables
    nwbfile.add_trial_column('stim_on', 'the time when the stimulus is shown')
    nwbfile.add_trial_column('stim_off', 'the time when the stimulus is off')
    nwbfile.add_trial_column('delay1_off', 'the time when delay1 is off')
    nwbfile.add_trial_column('delay2_off', 'the time when delay2 is off')
    nwbfile.add_trial_column('stim_phase',
                             'learning/recognition phase during the trial')
    nwbfile.add_trial_column('category_id', 'the category id of the stimulus')
    nwbfile.add_trial_column('category_name',
                             'the category name of the stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'the file path to the stimulus')
    nwbfile.add_trial_column('new_old_labels_recog',
                             'labels for new or old stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):
        # nwbfile.create_epoch(start_time=events_learn_stim_on.iloc[i][0],
        #                      stop_time=events_learn_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_learn',
        #                      description='learning phase stimulus')

        # nwbfile.add_trial({'start': events_learn_stim_on.iloc[i][0],
        #                    'end': events_learn_delay2_off.iloc[i][0],
        #                    'stim_on': events_learn_stim_on.iloc[i][0],
        #                    'stim_off': events_learn_stim_off.iloc[i][0],
        #                    'delay1_off': events_learn_delay1_off.iloc[i][0],
        #                    'delay2_off': events_learn_delay2_off.iloc[i][0],
        #                    'stim_phase': 'learn',
        #                    'category_id': cat_id_learn[i],
        #                    'category_name': cat_name_learn[i],
        #                    'external_image_file': stimuli_learn_path[i],
        #                    'new_old_labels_recog': -1})

        nwbfile.add_trial(start_time=events_learn_stim_on.iloc[i][0],
                          stop_time=events_learn_delay2_off.iloc[i][0],
                          stim_on=events_learn_stim_on.iloc[i][0],
                          stim_off=events_learn_stim_off.iloc[i][0],
                          delay1_off=events_learn_delay1_off.iloc[i][0],
                          delay2_off=events_learn_delay2_off.iloc[i][0],
                          stim_phase='learn',
                          category_id=cat_id_learn[i],
                          category_name=cat_name_learn[i],
                          external_image_file=stimuli_learn_path[i],
                          new_old_labels_recog='NA')

    for i in range(range_recog):
        # nwbfile.create_epoch(start_time=events_recog_stim_on.iloc[i][0],
        #                      stop_time=events_recog_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_recog',
        #                      description='recognition phase stimulus')

        nwbfile.add_trial(start_time=events_recog_stim_on.iloc[i][0],
                          stop_time=events_recog_delay2_off.iloc[i][0],
                          stim_on=events_recog_stim_on.iloc[i][0],
                          stim_off=events_recog_stim_off.iloc[i][0],
                          delay1_off=events_recog_delay1_off.iloc[i][0],
                          delay2_off=events_recog_delay2_off.iloc[i][0],
                          stim_phase='recog',
                          category_id=cat_id_recog[i],
                          category_name=cat_name_recog[i],
                          external_image_file=stimuli_recog_path[i],
                          new_old_labels_recog=new_old_recog[i])

    # Add the waveform clustering and the spike data.
    # Create necessary processing modules for different kinds of waveform data
    clustering_processing_module = ProcessingModule(
        'Spikes', 'The spike data contained')
    clusterWaveform_learn_processing_module = ProcessingModule(
        'MeanWaveforms_learn',
        'The mean waveforms for the clustered raw signal for learning phase')
    clusterWaveform_recog_processing_module = ProcessingModule(
        'MeanWaveforms_recog',
        'The mean waveforms for the clustered raw signal for recognition phase'
    )
    IsolDist_processing_module = ProcessingModule('IsoDist', 'The IsolDist')
    SNR_processing_module = ProcessingModule('SNR', 'SNR (signal-to-noise)')
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # Interate the channel list
    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        file_path = os.path.join('RecogMemory_MTL_release_v2', 'Data',
                                 'sorted', session['session'], task_descr,
                                 cell_name)
        try:
            cell_mat = loadmat(file_path)
        except FileNotFoundError:
            print("File not found")
            continue
        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_id = np.asarray([spike[0] for spike in spikes])
        spike_cluster_id = np.asarray([spike[1] for spike in spikes])
        spike_timestamps = np.asarray([spike[2] / 1000000 for spike in spikes])
        clustering = Clustering(description='Spikes of the channel detected',
                                num=spike_id,
                                peak_over_rms=np.asarray([0]),
                                times=spike_timestamps,
                                name='channel' + str(channel_id))
        clustering_processing_module.add_data_interface(clustering)

        for i in range(len(meanWaveform_learn[0][0][0][0])):
            waveform_mean_learn = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_learn[0][0][1][i]]),
                name='waveform_learn_cluster_id_' +
                str(meanWaveform_learn[0][0][0][0][i]))
            try:
                clusterWaveform_learn_processing_module.add_data_interface(
                    waveform_mean_learn)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding mean waveform recognition into the processing module
        for i in range(len(meanWaveform_recog[0][0][0][0])):
            waveform_mean_recog = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_recog[0][0][1][i]]),
                name='waveform_recog_cluster_id_' +
                str(meanWaveform_recog[0][0][0][0][i]))
            try:
                clusterWaveform_recog_processing_module.add_data_interface(
                    waveform_mean_recog)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding IsolDist_SNR data into the processing module
        # Here I use feature extraction to store the IsolDist_SNR data because
        # they are extracted from the original signals.
        # print(IsolDist_SNR[0][0][0])
        for i in range(len(IsolDist_SNR[0][0][1][0])):
            isoldist_data_interface = TimeSeries(
                data=[IsolDist_SNR[0][0][1][0][i]],
                unit='NA',
                timestamps=[0],
                name='IsolDist_' + str(IsolDist_SNR[0][0][0][0][i]))
            try:
                IsolDist_processing_module.add_data_interface(
                    isoldist_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding IsolDist to the processing module:'
                    + str(e))
                continue

            SNR_data_interface = TimeSeries(unit='NA',
                                            description='The SNR data',
                                            data=[IsolDist_SNR[0][0][2][0][i]],
                                            timestamps=[0],
                                            name='SNR_' +
                                            str(IsolDist_SNR[0][0][0][0][i]))

            try:
                SNR_processing_module.add_data_interface(SNR_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding SNR to the processing module:' +
                    str(e))
                continue

    nwbfile.add_processing_module(clustering_processing_module)
    nwbfile.add_processing_module(clusterWaveform_learn_processing_module)
    nwbfile.add_processing_module(clusterWaveform_recog_processing_module)
    nwbfile.add_processing_module(IsolDist_processing_module)
    nwbfile.add_processing_module(SNR_processing_module)

    return nwbfile
Beispiel #10
0
    def run_conversion(self,
                       nwbfile: NWBFile,
                       metadata_dict: dict,
                       stub_test: bool = False):
        session_path = Path(self.source_data["folder_path"])
        session_id = session_path.stem

        # Stimuli
        [nwbfile.add_stimulus(x) for x in get_events(session_path)]

        # States
        sleep_state_fpath = session_path / f"{session_id}.SleepState.states.mat"
        # label renaming specific to Peyrache
        state_label_names = dict(WAKEstate="Awake",
                                 NREMstate="Non-REM",
                                 REMstate="REM")
        if sleep_state_fpath.is_file():
            matin = loadmat(sleep_state_fpath)["SleepState"]["ints"][0][0]

            table = TimeIntervals(name="states",
                                  description="Sleep states of animal.")
            table.add_column(name="label", description="Sleep state.")

            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append(
                        dict(start_time=row[0],
                             stop_time=row[1],
                             label=state_label_names[name]))
            [
                table.add_row(**row)
                for row in sorted(data, key=lambda x: x["start_time"])
            ]
            check_module(nwbfile, "behavior",
                         "Contains behavioral data.").add(table)

        # Position
        pos_names = ["RedLED", "BlueLED"]
        pos_idx_from = [0, 2]
        pos_idx_to = [2, 4]

        # Raw position
        whlfile_path = session_path / f"{session_id}.whl"
        whl_data = np.loadtxt(whlfile_path)
        for name, idx_from, idx_to in zip(pos_names, pos_idx_from, pos_idx_to):
            nwbfile.add_acquisition(
                peyrache_spatial_series(
                    name=name,
                    description=
                    "Raw sensor data. Values of -1 indicate that LED detection failed.",
                    data=whl_data[:, idx_from:idx_to],
                    conversion=np.nan,  # whl file is in arbitrary grid units
                ))

        # Processed position
        posfile_path = session_path / f"{session_id}.pos"
        if posfile_path.is_file(
        ):  # at least Mouse32-140820 was missing a .pos file
            try:
                pos_data = np.loadtxt(posfile_path)
                pos_obj = Position(name="SubjectPosition")
                for name, idx_from, idx_to in zip(pos_names, pos_idx_from,
                                                  pos_idx_to):
                    pos_obj.add_spatial_series(
                        peyrache_spatial_series(
                            name=name,
                            description=
                            ("(x,y) coordinates tracking subject movement through the maze."
                             "Values of -1 indicate that LED detection failed."
                             ),
                            data=pos_data[:, idx_from:idx_to],
                            conversion=1e-2,  # from cm to m
                        ))
                check_module(nwbfile, "behavior",
                             "Contains behavioral data.").add(pos_obj)
            except ValueError:  # data issue present in at least Mouse17-170201
                warn(f"Skipping .pos file for session {session_id}!")

        # Epochs - only available for sessions with raw data
        epoch_file = session_path / "raw" / f"{session_id}-raw-info" / f"{session_id}-behaviors.txt"
        if epoch_file.is_file():
            epoch_data = pd.read_csv(epoch_file, header=1)[f"{session_id}:"]
            epoch_dat_inds = []
            epoch_names = []
            for epochs in epoch_data:
                inds, name = epochs.split(": ")
                epoch_dat_inds.append(inds.split(" "))
                epoch_names.append(name)

            epoch_windows = [0]
            for epoch in epoch_dat_inds:
                exp_end_times = []
                for dat_ind in epoch:
                    recording_file = session_path / "raw" / f"{session_id}{dat_ind}.dat"
                    info_extractor = NeuroscopeRecordingExtractor(
                        recording_file)
                    dat_end_time = info_extractor.get_num_frames(
                    ) / info_extractor.get_sampling_frequency()  # seconds
                    exp_end_times.extend([dat_end_time])
                epoch_windows.extend([epoch_windows[-1] + sum(exp_end_times)] *
                                     2)
            epoch_windows = np.array(epoch_windows[:-1]).reshape(-1, 2)

            for j, epoch_name in enumerate(epoch_names):
                nwbfile.add_epoch(start_time=epoch_windows[j][0],
                                  stop_time=epoch_windows[j][1],
                                  tags=[epoch_name])
Beispiel #11
0
    def run_conversion(self, nwbfile: NWBFile, metadata: dict):
        """
        Run conversion for this data interface.
        Reads labview experiment behavioral data and adds it to nwbfile.

        Parameters
        ----------
        nwbfile : NWBFile
        metadata : dict
        """
        print("Converting Labview data...")
        # Get list of trial summary files
        dir_behavior_labview = self.source_data['dir_behavior_labview']
        all_files = os.listdir(dir_behavior_labview)
        trials_files = [f for f in all_files if '_sum.txt' in f]
        trials_files.sort()

        # Get session_start_time from first file timestamps
        fpath = os.path.join(dir_behavior_labview, trials_files[0])
        colnames = [
            'Trial', 'StartT', 'EndT', 'Result', 'InitT', 'SpecificResults',
            'ProbLeft', 'OptoDur', 'LRew', 'RRew', 'InterT', 'LTrial',
            'ReactionTime', 'OptoCond', 'OptoTrial'
        ]
        df_0 = pd.read_csv(fpath, sep='\t', index_col=False, names=colnames)
        t0 = df_0['StartT'][0]  # initial time in Labview seconds

        # Add trials
        print("Converting Labview trials data...")
        if nwbfile.trials is not None:
            print(
                'Trials already exist in current nwb file. Labview behavior trials not added.'
            )
        else:
            # Make dataframe
            frames = []
            for f in trials_files:
                fpath = os.path.join(dir_behavior_labview, f)
                frames.append(
                    pd.read_csv(fpath,
                                sep='\t',
                                index_col=False,
                                names=colnames))
            df_trials_summary = pd.concat(frames)

            nwbfile.add_trial_column(
                name='results',
                description=
                "0 means sucess (rewarded trial), 1 means licks during intitial "
                "period, which leads to a failed trial. 2 means early lick failure. 3 means "
                "wrong lick or no response.")
            nwbfile.add_trial_column(
                name='init_t', description="duration of initial delay period.")
            nwbfile.add_trial_column(
                name='specific_results',
                description=
                "Possible outcomes classified based on raw data & meta file (_tr.m)."
            )
            nwbfile.add_trial_column(
                name='prob_left',
                description=
                "probability for left trials in order to keep the number of "
                "left and right trials balanced within the session. ")
            nwbfile.add_trial_column(
                name='opto_dur',
                description="the duration of optical stimulation.")
            nwbfile.add_trial_column(
                name='l_rew_n',
                description="counting the number of left rewards.")
            nwbfile.add_trial_column(
                name='r_rew_n',
                description="counting the number of rightrewards.")
            nwbfile.add_trial_column(name='inter_t',
                                     description="inter-trial delay period.")
            nwbfile.add_trial_column(
                name='l_trial',
                description=
                "trial type (which side the air-puff is applied). 1 means "
                "left-trial, 0 means right-trial")
            nwbfile.add_trial_column(
                name='reaction_time',
                description=
                "if it is a successful trial or wrong lick during response "
                "period trial: ReactionTime = time between the first decision "
                "lick and the beginning of the response period. If it is a failed "
                "trial due to early licks: reaction time = the duration of "
                "the air-puff period (in other words, when the animal licks "
                "during the sample period).")
            nwbfile.add_trial_column(
                name='opto_cond',
                description="0: no opto. 1: opto is on during sample period. "
                "2: opto is on half way through the sample period (0.5s) "
                "and 0.5 during the response period. 3. opto is on during "
                "the response period.")
            nwbfile.add_trial_column(
                name='opto_trial',
                description="1: opto trials. 0: Non-opto trials.")
            for index, row in df_trials_summary.iterrows():
                nwbfile.add_trial(
                    start_time=row['StartT'] - t0,
                    stop_time=row['EndT'] - t0,
                    results=int(row['Result']),
                    init_t=row['InitT'],
                    specific_results=int(row['SpecificResults']),
                    prob_left=row['ProbLeft'],
                    opto_dur=row['OptoDur'],
                    l_rew_n=int(row['LRew']),
                    r_rew_n=int(row['RRew']),
                    inter_t=row['InterT'],
                    l_trial=int(row['LTrial']),
                    reaction_time=int(row['ReactionTime']),
                    opto_cond=int(row['OptoCond']),
                    opto_trial=int(row['OptoTrial']),
                )

        # Get list of files: continuous data
        continuous_files = [f.replace('_sum', '') for f in trials_files]

        # Adds continuous behavioral data
        frames = []
        for f in continuous_files:
            fpath_lick = os.path.join(dir_behavior_labview, f)
            frames.append(pd.read_csv(fpath_lick, sep='\t', index_col=False))
        df_continuous = pd.concat(frames)

        # Behavioral data
        print("Converting Labview behavior data...")
        l1_ts = TimeSeries(name="left_lick",
                           data=df_continuous['Lick 1'].to_numpy(),
                           timestamps=df_continuous['Time'].to_numpy() - t0,
                           description="no description")
        l2_ts = TimeSeries(name="right_lick",
                           data=df_continuous['Lick 2'].to_numpy(),
                           timestamps=df_continuous['Time'].to_numpy() - t0,
                           description="no description")

        nwbfile.add_acquisition(l1_ts)
        nwbfile.add_acquisition(l2_ts)

        # Optogenetics stimulation data
        print("Converting Labview optogenetics data...")
        ogen_device = nwbfile.create_device(
            name=metadata['Ogen']['Device']['name'],
            description=metadata['Ogen']['Device']['description'])

        meta_ogen_site = metadata['Ogen']['OptogeneticStimulusSite']
        ogen_stim_site = OptogeneticStimulusSite(
            name=meta_ogen_site['name'],
            device=ogen_device,
            description=meta_ogen_site['description'],
            excitation_lambda=float(meta_ogen_site['excitation_lambda']),
            location=meta_ogen_site['location'])
        nwbfile.add_ogen_site(ogen_stim_site)

        meta_ogen_series = metadata['Ogen']['OptogeneticSeries']
        ogen_series = OptogeneticSeries(
            name=meta_ogen_series['name'],
            data=df_continuous['Opto'].to_numpy(),
            site=ogen_stim_site,
            timestamps=df_continuous['Time'].to_numpy() - t0,
            description=meta_ogen_series['description'],
        )
        nwbfile.add_stimulus(ogen_series)
Beispiel #12
0
    def convert_data(self,
                     nwbfile: NWBFile,
                     metadata_dict: dict,
                     stub_test: bool = False,
                     include_spike_waveforms: bool = False):
        session_path = self.input_args['folder_path']
        # TODO: check/enforce format?
        task_types = metadata_dict['task_types']

        subject_path, session_id = os.path.split(session_path)
        fpath_base = os.path.split(subject_path)[0]

        [nwbfile.add_stimulus(x) for x in get_events(session_path)]

        sleep_state_fpath = os.path.join(
            session_path, '{}--StatePeriod.mat'.format(session_id))

        exist_pos_data = any(
            os.path.isfile(
                os.path.join(
                    session_path, '{}__{}.mat'.format(session_id,
                                                      task_type['name'])))
            for task_type in task_types)

        if exist_pos_data:
            nwbfile.add_epoch_column('label', 'name of epoch')

        for task_type in task_types:
            label = task_type['name']

            file = os.path.join(session_path,
                                session_id + '__' + label + '.mat')
            if os.path.isfile(file):
                pos_obj = Position(name=label + '_position')

                matin = loadmat(file)
                tt = matin['twhl_norm'][:, 0]
                exp_times = find_discontinuities(tt)

                if 'conversion' in task_type:
                    conversion = task_type['conversion']
                else:
                    conversion = np.nan

                for pos_type in ('twhl_norm', 'twhl_linearized'):
                    if pos_type in matin:
                        pos_data_norm = matin[pos_type][:, 1:]

                        spatial_series_object = SpatialSeries(
                            name=label + '_{}_spatial_series'.format(pos_type),
                            data=H5DataIO(pos_data_norm, compression='gzip'),
                            reference_frame='unknown',
                            conversion=conversion,
                            resolution=np.nan,
                            timestamps=H5DataIO(tt, compression='gzip'))
                        pos_obj.add_spatial_series(spatial_series_object)

                check_module(
                    nwbfile, 'behavior',
                    'contains processed behavioral data').add_data_interface(
                        pos_obj)
                for i, window in enumerate(exp_times):
                    nwbfile.add_epoch(start_time=window[0],
                                      stop_time=window[1],
                                      label=label + '_' + str(i))

        trialdata_path = os.path.join(session_path,
                                      session_id + '__EightMazeRun.mat')
        if os.path.isfile(trialdata_path):
            trials_data = loadmat(trialdata_path)['EightMazeRun']

            trialdatainfo_path = os.path.join(fpath_base,
                                              'EightMazeRunInfo.mat')
            trialdatainfo = [
                x[0]
                for x in loadmat(trialdatainfo_path)['EightMazeRunInfo'][0]
            ]

            features = trialdatainfo[:7]
            features[:2] = 'start_time', 'stop_time',
            [
                nwbfile.add_trial_column(x, 'description')
                for x in features[4:] + ['condition']
            ]

            for trial_data in trials_data:
                if trial_data[3]:
                    cond = 'run_left'
                else:
                    cond = 'run_right'
                nwbfile.add_trial(start_time=trial_data[0],
                                  stop_time=trial_data[1],
                                  condition=cond,
                                  error_run=trial_data[4],
                                  stim_run=trial_data[5],
                                  both_visit=trial_data[6])

        if os.path.isfile(sleep_state_fpath):
            matin = loadmat(sleep_state_fpath)['StatePeriod']

            table = TimeIntervals(name='states',
                                  description='sleep states of animal')
            table.add_column(name='label', description='sleep state')

            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append({
                        'start_time': row[0],
                        'stop_time': row[1],
                        'label': name
                    })
            [
                table.add_row(**row)
                for row in sorted(data, key=lambda x: x['start_time'])
            ]

            check_module(nwbfile, 'behavior',
                         'contains behavioral data').add_data_interface(table)
Beispiel #13
0
def export_to_nwb(session_key,
                  nwb_output_dir=default_nwb_output_dir,
                  save=False,
                  overwrite=False):

    this_session = (experiment.Session & session_key).fetch1()
    print(f'Exporting to NWB 2.0 for session: {this_session}...')
    # ===============================================================================
    # ============================== META INFORMATION ===============================
    # ===============================================================================

    sess_desc = session_description_mapper[(
        experiment.ProjectSession & session_key).fetch1('project_name')]

    # -- NWB file - a NWB2.0 file for each session
    nwbfile = NWBFile(
        identifier='_'.join([
            'ANM' + str(this_session['subject_id']),
            this_session['session_date'].strftime('%Y-%m-%d'),
            str(this_session['session'])
        ]),
        session_description='',
        session_start_time=datetime.combine(this_session['session_date'],
                                            zero_zero_time),
        file_create_date=datetime.now(tzlocal()),
        experimenter=this_session['username'],
        institution=institution,
        experiment_description=sess_desc['experiment_description'],
        related_publications=sess_desc['related_publications'],
        keywords=sess_desc['keywords'])

    # -- subject
    subj = (lab.Subject & session_key).aggr(
        lab.Subject.Strain, ...,
        strains='GROUP_CONCAT(animal_strain)').fetch1()
    nwbfile.subject = pynwb.file.Subject(
        subject_id=str(this_session['subject_id']),
        description=
        f'source: {subj["animal_source"]}; strains: {subj["strains"]}',
        genotype=' x '.join((lab.Subject.GeneModification
                             & subj).fetch('gene_modification')),
        sex=subj['sex'],
        species=subj['species'],
        date_of_birth=datetime.combine(subj['date_of_birth'], zero_zero_time)
        if subj['date_of_birth'] else None)
    # -- virus
    nwbfile.virus = json.dumps([{
        k: str(v)
        for k, v in virus_injection.items() if k not in subj
    } for virus_injection in virus.VirusInjection * virus.Virus & session_key])

    # ===============================================================================
    # ======================== EXTRACELLULAR & CLUSTERING ===========================
    # ===============================================================================
    """
    In the event of multiple probe recording (i.e. multiple probe insertions), the clustering results 
    (and the associated units) are associated with the corresponding probe. 
    Each probe insertion is associated with one ElectrodeConfiguration (which may define multiple electrode groups)
    """

    dj_insert_location = ephys.ProbeInsertion.InsertionLocation.aggr(
        ephys.ProbeInsertion.RecordableBrainRegion.proj(
            brain_region='CONCAT(hemisphere, " ", brain_area)'), ...,
        brain_regions='GROUP_CONCAT(brain_region)')

    for probe_insertion in ephys.ProbeInsertion & session_key:
        electrode_config = (lab.ElectrodeConfig & probe_insertion).fetch1()

        electrode_groups = {}
        for electrode_group in lab.ElectrodeConfig.ElectrodeGroup & electrode_config:
            electrode_groups[electrode_group[
                'electrode_group']] = nwbfile.create_electrode_group(
                    name=electrode_config['electrode_config_name'] + '_g' +
                    str(electrode_group['electrode_group']),
                    description='N/A',
                    device=nwbfile.create_device(
                        name=electrode_config['probe']),
                    location=json.dumps({
                        k: str(v)
                        for k, v in (dj_insert_location
                                     & session_key).fetch1().items()
                        if k not in dj_insert_location.primary_key
                    }))

        for chn in (lab.ElectrodeConfig.Electrode * lab.Probe.Electrode
                    & electrode_config).fetch(as_dict=True):
            nwbfile.add_electrode(
                id=chn['electrode'],
                group=electrode_groups[chn['electrode_group']],
                filtering=hardware_filter,
                imp=-1.,
                x=chn['x_coord'] if chn['x_coord'] else np.nan,
                y=chn['y_coord'] if chn['y_coord'] else np.nan,
                z=chn['z_coord'] if chn['z_coord'] else np.nan,
                location=electrode_groups[chn['electrode_group']].location)

        # --- unit spike times ---
        nwbfile.add_unit_column(
            name='sampling_rate',
            description='Sampling rate of the raw voltage traces (Hz)')
        nwbfile.add_unit_column(name='quality',
                                description='unit quality from clustering')
        nwbfile.add_unit_column(
            name='posx',
            description=
            'estimated x position of the unit relative to probe (0,0) (um)')
        nwbfile.add_unit_column(
            name='posy',
            description=
            'estimated y position of the unit relative to probe (0,0) (um)')
        nwbfile.add_unit_column(
            name='cell_type',
            description='cell type (e.g. fast spiking or pyramidal)')

        for unit_key in (ephys.Unit * ephys.UnitCellType
                         & probe_insertion).fetch('KEY'):

            unit = (ephys.Unit * ephys.UnitCellType & probe_insertion
                    & unit_key).proj(..., '-spike_times').fetch1()
            if ephys.TrialSpikes & unit_key:
                obs_intervals = np.array(
                    list(
                        zip(*(ephys.TrialSpikes * experiment.SessionTrial
                              & unit_key).fetch('start_time',
                                                'stop_time')))).astype(float)
                tr_spike_times, tr_start, tr_go = (
                    ephys.TrialSpikes * experiment.SessionTrial *
                    (experiment.TrialEvent & 'trial_event_type = "go"')
                    & unit_key).fetch('spike_times', 'start_time',
                                      'trial_event_time')
                spike_times = np.hstack([
                    spks + float(t_go) + float(t_start) for spks, t_go, t_start
                    in zip(tr_spike_times, tr_start, tr_go)
                ])
            else:  # the case of unavailable `TrialSpikes`
                spike_times = (ephys.Unit & unit_key).fetch1('spike_times')
                obs_intervals = np.array(
                    list(
                        zip(*(experiment.SessionTrial & unit_key).fetch(
                            'start_time', 'stop_time')))).astype(float)
                obs_intervals = [
                    interval for interval in obs_intervals
                    if np.logical_and(spike_times >= interval[0],
                                      spike_times <= interval[-1]).any()
                ]

            # make an electrode table region (which electrode(s) is this unit coming from)
            nwbfile.add_unit(
                id=unit['unit'],
                electrodes=np.where(
                    np.array(nwbfile.electrodes.id.data) ==
                    unit['electrode'])[0],
                electrode_group=electrode_groups[unit['electrode_group']],
                obs_intervals=obs_intervals,
                sampling_rate=ecephys_fs,
                quality=unit['unit_quality'],
                posx=unit['unit_posx'],
                posy=unit['unit_posy'],
                cell_type=unit['cell_type'],
                spike_times=spike_times,
                waveform_mean=np.mean(unit['waveform'], axis=0),
                waveform_sd=np.std(unit['waveform'], axis=0))

    # ===============================================================================
    # ============================= BEHAVIOR TRACKING ===============================
    # ===============================================================================

    if tracking.LickTrace * experiment.SessionTrial & session_key:
        # re-concatenating trialized tracking traces
        lick_traces, time_vecs, trial_starts = (
            tracking.LickTrace * experiment.SessionTrial & session_key).fetch(
                'lick_trace', 'lick_trace_timestamps', 'start_time')
        behav_acq = pynwb.behavior.BehavioralTimeSeries(
            name='BehavioralTimeSeries')
        nwbfile.add_acquisition(behav_acq)
        behav_acq.create_timeseries(
            name='lick_trace',
            unit='a.u.',
            conversion=1.0,
            data=np.hstack(lick_traces),
            description=
            "Time-series of the animal's tongue movement when licking",
            timestamps=np.hstack(time_vecs + trial_starts.astype(float)))

    # ===============================================================================
    # ============================= PHOTO-STIMULATION ===============================
    # ===============================================================================
    stim_sites = {}
    for photostim in experiment.Photostim * experiment.PhotostimBrainRegion * lab.PhotostimDevice & session_key:

        stim_device = (nwbfile.get_device(photostim['photostim_device'])
                       if photostim['photostim_device'] in nwbfile.devices else
                       nwbfile.create_device(
                           name=photostim['photostim_device']))

        stim_site = pynwb.ogen.OptogeneticStimulusSite(
            name=photostim['stim_laterality'] + ' ' +
            photostim['stim_brain_area'],
            device=stim_device,
            excitation_lambda=float(photostim['excitation_wavelength']),
            location=json.dumps([{
                k: v
                for k, v in stim_locs.items()
                if k not in experiment.Photostim.primary_key
            } for stim_locs in (experiment.Photostim.PhotostimLocation.proj(
                ..., '-brain_area')
                                & photostim).fetch(as_dict=True)],
                                default=str),
            description='')
        nwbfile.add_ogen_site(stim_site)
        stim_sites[photostim['photo_stim']] = stim_site

    # re-concatenating trialized photostim traces
    dj_photostim = (experiment.PhotostimTrace * experiment.SessionTrial *
                    experiment.PhotostimEvent * experiment.Photostim
                    & session_key)

    for photo_stim, stim_site in stim_sites.items():
        if dj_photostim & {'photo_stim': photo_stim}:
            aom_input_trace, laser_power, time_vecs, trial_starts = (
                dj_photostim & {
                    'photo_stim': photo_stim
                }).fetch('aom_input_trace', 'laser_power',
                         'photostim_timestamps', 'start_time')

            aom_series = pynwb.ogen.OptogeneticSeries(
                name=stim_site.name + '_aom_input_trace',
                site=stim_site,
                conversion=1e-3,
                data=np.hstack(aom_input_trace),
                timestamps=np.hstack(time_vecs + trial_starts.astype(float)))
            laser_series = pynwb.ogen.OptogeneticSeries(
                name=stim_site.name + '_laser_power',
                site=stim_site,
                conversion=1e-3,
                data=np.hstack(laser_power),
                timestamps=np.hstack(time_vecs + trial_starts.astype(float)))

            nwbfile.add_stimulus(aom_series)
            nwbfile.add_stimulus(laser_series)

    # ===============================================================================
    # =============================== BEHAVIOR TRIALS ===============================
    # ===============================================================================

    # =============== TrialSet ====================
    # NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes: 'start_time' and 'stop_time'
    # Other trial-related information needs to be added in to the trial-table as additional columns (with column name
    # and column description)

    dj_trial = experiment.SessionTrial * experiment.BehaviorTrial
    skip_adding_columns = experiment.Session.primary_key + [
        'trial_uid', 'trial'
    ]

    if experiment.SessionTrial & session_key:
        # Get trial descriptors from TrialSet.Trial and TrialStimInfo
        trial_columns = [{
            'name':
            tag,
            'description':
            re.sub('\s+:|\s+', ' ',
                   re.search(f'(?<={tag})(.*)',
                             str(dj_trial.heading)).group()).strip()
        } for tag in dj_trial.heading.names if tag not in skip_adding_columns +
                         ['start_time', 'stop_time']]

        # Add new table columns to nwb trial-table for trial-label
        for c in trial_columns:
            nwbfile.add_trial_column(**c)

        # Add entry to the trial-table
        for trial in (dj_trial & session_key).fetch(as_dict=True):
            trial['start_time'] = float(trial['start_time'])
            trial['stop_time'] = float(
                trial['stop_time']) if trial['stop_time'] else np.nan
            trial['id'] = trial['trial']  # rename 'trial_id' to 'id'
            [trial.pop(k) for k in skip_adding_columns]
            nwbfile.add_trial(**trial)

    # ===============================================================================
    # =============================== BEHAVIOR TRIAL EVENTS ==========================
    # ===============================================================================

    behav_event = pynwb.behavior.BehavioralEvents(name='BehavioralEvents')
    nwbfile.add_acquisition(behav_event)

    for trial_event_type in (experiment.TrialEventType & experiment.TrialEvent
                             & session_key).fetch('trial_event_type'):
        event_times, trial_starts = (
            experiment.TrialEvent * experiment.SessionTrial
            & session_key & {
                'trial_event_type': trial_event_type
            }).fetch('trial_event_time', 'start_time')
        if len(event_times) > 0:
            event_times = np.hstack(
                event_times.astype(float) + trial_starts.astype(float))
            behav_event.create_timeseries(name=trial_event_type,
                                          unit='a.u.',
                                          conversion=1.0,
                                          data=np.full_like(event_times, 1),
                                          timestamps=event_times)

    photostim_event_time, trial_starts, photo_stim, power, duration = (
        experiment.PhotostimEvent * experiment.SessionTrial
        & session_key).fetch('photostim_event_time', 'start_time',
                             'photo_stim', 'power', 'duration')

    if len(photostim_event_time) > 0:
        behav_event.create_timeseries(
            name='photostim_start_time',
            unit='a.u.',
            conversion=1.0,
            data=power,
            timestamps=photostim_event_time.astype(float) +
            trial_starts.astype(float),
            control=photo_stim.astype('uint8'),
            control_description=stim_sites)
        behav_event.create_timeseries(
            name='photostim_stop_time',
            unit='a.u.',
            conversion=1.0,
            data=np.full_like(photostim_event_time, 0),
            timestamps=photostim_event_time.astype(float) +
            duration.astype(float) + trial_starts.astype(float),
            control=photo_stim.astype('uint8'),
            control_description=stim_sites)

    # =============== Write NWB 2.0 file ===============
    if save:
        save_file_name = ''.join([nwbfile.identifier, '.nwb'])
        if not os.path.exists(nwb_output_dir):
            os.makedirs(nwb_output_dir)
        if not overwrite and os.path.exists(
                os.path.join(nwb_output_dir, save_file_name)):
            return nwbfile
        with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name),
                       mode='w') as io:
            io.write(nwbfile)
            print(f'Write NWB 2.0 file: {save_file_name}')

    return nwbfile
Beispiel #14
0
def export_to_nwb(session_key,
                  nwb_output_dir=default_nwb_output_dir,
                  save=False,
                  overwrite=True):
    this_session = (acquisition.Session & session_key).fetch1()

    identifier = '_'.join([
        this_session['subject_id'],
        this_session['session_time'].strftime('%Y-%m-%d'),
        this_session['session_id']
    ])

    # =============== General ====================
    # -- NWB file - a NWB2.0 file for each session
    nwbfile = NWBFile(session_description=this_session['session_note'],
                      identifier=identifier,
                      session_start_time=this_session['session_time'],
                      file_create_date=datetime.now(tzlocal()),
                      experimenter='; '.join(
                          (acquisition.Session.Experimenter
                           & session_key).fetch('experimenter')),
                      institution=institution,
                      experiment_description=experiment_description,
                      related_publications=related_publications,
                      keywords=keywords)
    # -- subject
    subj = (subject.Subject & session_key).fetch1()
    nwbfile.subject = pynwb.file.Subject(
        subject_id=this_session['subject_id'],
        description=subj['subject_description'],
        genotype=' x '.join(
            (subject.Subject.Allele & session_key).fetch('allele')),
        sex=subj['sex'],
        species=subj['species'])

    # =============== Intracellular ====================
    cell = ((intracellular.Cell
             & session_key).fetch1() if len(intracellular.Cell
                                            & session_key) == 1 else None)
    if cell:
        # metadata
        whole_cell_device = nwbfile.create_device(name=cell['device_name'])
        ic_electrode = nwbfile.create_ic_electrode(
            name=cell['cell_id'],
            device=whole_cell_device,
            description='N/A',
            filtering='N/A',
            location='; '.join([
                f'{k}: {str(v)}'
                for k, v in dict((reference.BrainLocation & cell).fetch1(),
                                 depth=cell['cell_depth']).items()
            ]))
        # acquisition - membrane potential
        mp, mp_wo_spike, mp_start_time, mp_fs = (
            intracellular.MembranePotential & cell).fetch1(
                'membrane_potential', 'membrane_potential_wo_spike',
                'membrane_potential_start_time',
                'membrane_potential_sampling_rate')
        nwbfile.add_acquisition(
            pynwb.icephys.PatchClampSeries(name='PatchClampSeries',
                                           electrode=ic_electrode,
                                           unit='mV',
                                           conversion=1e-3,
                                           gain=1.0,
                                           data=mp,
                                           starting_time=mp_start_time,
                                           rate=mp_fs))
        # acquisition - current injection
        if (intracellular.CurrentInjection & cell):
            current_injection, ci_start_time, ci_fs = (
                intracellular.CurrentInjection & cell).fetch1(
                    'current_injection', 'current_injection_start_time',
                    'current_injection_sampling_rate')
            nwbfile.add_stimulus(
                pynwb.icephys.CurrentClampStimulusSeries(
                    name='CurrentClampStimulus',
                    electrode=ic_electrode,
                    conversion=1e-9,
                    gain=1.0,
                    data=current_injection,
                    starting_time=ci_start_time,
                    rate=ci_fs))

        # analysis - membrane potential without spike
        mp_rmv_spike = nwbfile.create_processing_module(
            name='icephys', description='Spike removal')
        mp_rmv_spike.add_data_interface(
            pynwb.icephys.PatchClampSeries(name='icephys',
                                           electrode=ic_electrode,
                                           unit='mV',
                                           conversion=1e-3,
                                           gain=1.0,
                                           data=mp_wo_spike,
                                           starting_time=mp_start_time,
                                           rate=mp_fs))

    # =============== Extracellular ====================
    probe_insertion = ((extracellular.ProbeInsertion
                        & session_key).fetch1() if extracellular.ProbeInsertion
                       & session_key else None)
    if probe_insertion:
        probe = nwbfile.create_device(name=probe_insertion['probe_name'])
        electrode_group = nwbfile.create_electrode_group(name='; '.join([
            f'{probe_insertion["probe_name"]}: {str(probe_insertion["channel_counts"])}'
        ]),
                                                         description='N/A',
                                                         device=probe,
                                                         location='; '.join([
                                                             f'{k}: {str(v)}'
                                                             for k, v in
                                                             (reference.
                                                              BrainLocation
                                                              & probe_insertion
                                                              ).fetch1().items(
                                                              )
                                                         ]))

        for chn in (reference.Probe.Channel
                    & probe_insertion).fetch(as_dict=True):
            nwbfile.add_electrode(
                id=chn['channel_id'],
                group=electrode_group,
                filtering=hardware_filter,
                imp=-1.,
                x=0.0,  # not available from data
                y=0.0,  # not available from data
                z=0.0,  # not available from data
                location=electrode_group.location)

        # --- unit spike times ---
        nwbfile.add_unit_column(
            name='sampling_rate',
            description='Sampling rate of the raw voltage traces (Hz)')
        nwbfile.add_unit_column(name='depth',
                                description='depth this unit (mm)')
        nwbfile.add_unit_column(name='spike_width',
                                description='spike width of this unit (ms)')
        nwbfile.add_unit_column(
            name='cell_type',
            description='cell type (e.g. wide width, narrow width spiking)')

        for unit in (extracellular.UnitSpikeTimes
                     & probe_insertion).fetch(as_dict=True):
            # make an electrode table region (which electrode(s) is this unit coming from)
            nwbfile.add_unit(
                id=unit['unit_id'],
                electrodes=(unit['channel_id'] if isinstance(
                    unit['channel_id'], np.ndarray) else [unit['channel_id']]),
                depth=unit['unit_depth'],
                sampling_rate=ecephys_fs,
                spike_width=unit['unit_spike_width'],
                cell_type=unit['unit_cell_type'],
                spike_times=unit['spike_times'],
                waveform_mean=unit['spike_waveform'])

    # =============== Behavior ====================
    # Note: for this study, raw behavioral data were not available, only trialized data were provided
    # here, we reconstruct raw behavioral data by concatenation
    trial_seg_setting = (analysis.TrialSegmentationSetting
                         & 'trial_seg_setting=0').fetch1()
    seg_behav_query = (
        behavior.TrialSegmentedLickTrace * acquisition.TrialSet.Trial *
        (analysis.RealignedEvent.RealignedEventTime
         & 'trial_event="trial_start"')
        & session_key & trial_seg_setting)

    if seg_behav_query:
        behav_acq = pynwb.behavior.BehavioralTimeSeries(name='lick_times')
        nwbfile.add_acquisition(behav_acq)
        seg_behav = pd.DataFrame(
            seg_behav_query.fetch('start_time', 'realigned_event_time',
                                  'segmented_lick_left_on',
                                  'segmented_lick_left_off',
                                  'segmented_lick_right_on',
                                  'segmented_lick_right_off')).T
        seg_behav.columns = [
            'start_time', 'realigned_event_time', 'segmented_lick_left_on',
            'segmented_lick_left_off', 'segmented_lick_right_on',
            'segmented_lick_right_off'
        ]
        for behav_name in [
                'lick_left_on', 'lick_left_off', 'lick_right_on',
                'lick_right_off'
        ]:
            lick_times = np.hstack(r['segmented_' + behav_name] -
                                   r.realigned_event_time + r.start_time
                                   for _, r in seg_behav.iterrows())
            behav_acq.create_timeseries(name=behav_name,
                                        unit='a.u.',
                                        conversion=1.0,
                                        data=np.full_like(lick_times, 1),
                                        timestamps=lick_times)

    # =============== Photostimulation ====================
    photostim = ((stimulation.PhotoStimulation
                  & session_key).fetch1() if stimulation.PhotoStimulation
                 & session_key else None)
    if photostim:
        photostim_device = (stimulation.PhotoStimDevice & photostim).fetch1()
        stim_device = nwbfile.create_device(
            name=photostim_device['device_name'])
        stim_site = pynwb.ogen.OptogeneticStimulusSite(
            name='-'.join([photostim['hemisphere'],
                           photostim['brain_region']]),
            device=stim_device,
            excitation_lambda=float(
                (stimulation.PhotoStimProtocol
                 & photostim).fetch1('photo_stim_excitation_lambda')),
            location='; '.join([
                f'{k}: {str(v)}' for k, v in (reference.ActionLocation
                                              & photostim).fetch1().items()
            ]),
            description=(stimulation.PhotoStimProtocol
                         & photostim).fetch1('photo_stim_notes'))
        nwbfile.add_ogen_site(stim_site)

        if photostim['photostim_timeseries'] is not None:
            nwbfile.add_stimulus(
                pynwb.ogen.OptogeneticSeries(
                    name='_'.join([
                        'photostim_on',
                        photostim['photostim_datetime'].strftime(
                            '%Y-%m-%d_%H-%M-%S')
                    ]),
                    site=stim_site,
                    resolution=0.0,
                    conversion=1e-3,
                    data=photostim['photostim_timeseries'],
                    starting_time=photostim['photostim_start_time'],
                    rate=photostim['photostim_sampling_rate']))

    # =============== TrialSet ====================
    # NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes:
    #                                                                       'id', 'start_time' and 'stop_time'.
    # Other trial-related information needs to be added in to the trial-table as additional columns (with column name
    # and column description)
    if acquisition.TrialSet & session_key:
        # Get trial descriptors from TrialSet.Trial and TrialStimInfo - remove '_trial' prefix (if any)
        trial_columns = [
            {
                'name':
                tag.replace('trial_', ''),
                'description':
                re.search(
                    f'(?<={tag})(.*)#(.*)',
                    str((acquisition.TrialSet.Trial *
                         stimulation.TrialPhotoStimParam
                         ).heading)).groups()[-1].strip()
            } for tag in (acquisition.TrialSet.Trial *
                          stimulation.TrialPhotoStimParam).heading.names
            if tag not in (acquisition.TrialSet.Trial
                           & stimulation.TrialPhotoStimParam).primary_key +
            ['start_time', 'stop_time']
        ]

        # Trial Events - discard 'trial_start' and 'trial_stop' as we already have start_time and stop_time
        # also add `_time` suffix to all events
        trial_events = set(((acquisition.TrialSet.EventTime & session_key) -
                            [{
                                'trial_event': 'trial_start'
                            }, {
                                'trial_event': 'trial_stop'
                            }]).fetch('trial_event'))
        event_names = [{
            'name': e + '_time',
            'description': d + ' - (s) relative to trial start time'
        } for e, d in zip(*(reference.ExperimentalEvent & [{
            'event': k
        } for k in trial_events]).fetch('event', 'description'))]
        # Add new table columns to nwb trial-table for trial-label
        for c in trial_columns + event_names:
            nwbfile.add_trial_column(**c)

        photostim_tag_default = {
            tag: ''
            for tag in stimulation.TrialPhotoStimParam.heading.names
            if tag not in stimulation.TrialPhotoStimParam.primary_key
        }

        # Add entry to the trial-table
        for trial in (acquisition.TrialSet.Trial
                      & session_key).fetch(as_dict=True):
            events = dict(
                zip(*(acquisition.TrialSet.EventTime & trial
                      & [{
                          'trial_event': e
                      } for e in trial_events]
                      ).fetch('trial_event', 'event_time')))

            trial_tag_value = ({
                **trial,
                **events,
                **(stimulation.TrialPhotoStimParam & trial).fetch1()
            } if (stimulation.TrialPhotoStimParam & trial) else {
                **trial,
                **events,
                **photostim_tag_default
            })

            trial_tag_value['id'] = trial_tag_value[
                'trial_id']  # rename 'trial_id' to 'id'
            [
                trial_tag_value.pop(k)
                for k in acquisition.TrialSet.Trial.primary_key
            ]

            # convert None to np.nan since nwb fields does not take None
            for k, v in trial_tag_value.items():
                trial_tag_value[k] = v if v is not None else np.nan

            trial_tag_value['delay_duration'] = float(
                trial_tag_value['delay_duration'])  # convert Decimal to float

            # Final tweaks: i) add '_time' suffix and ii) remove 'trial_' prefix
            events = {k + '_time': trial_tag_value.pop(k) for k in events}
            trial_attrs = {
                k.replace('trial_', ''): trial_tag_value.pop(k)
                for k in
                [n for n in trial_tag_value if n.startswith('trial_')]
            }

            nwbfile.add_trial(**trial_tag_value, **events, **trial_attrs)

    # =============== Write NWB 2.0 file ===============
    if save:
        save_file_name = ''.join([nwbfile.identifier, '.nwb'])
        if not os.path.exists(nwb_output_dir):
            os.makedirs(nwb_output_dir)
        if not overwrite and os.path.exists(
                os.path.join(nwb_output_dir, save_file_name)):
            return nwbfile
        with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name),
                       mode='w') as io:
            io.write(nwbfile)
            print(f'Write NWB 2.0 file: {save_file_name}')

    return nwbfile
Beispiel #15
0
class NWBFileTest(unittest.TestCase):
    def setUp(self):
        self.start = datetime(2017, 5, 1, 12, 0, 0, tzinfo=tzlocal())
        self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
        self.create = [
            datetime(2017, 5, 1, 12, tzinfo=tzlocal()),
            datetime(2017, 5, 2, 13, 0, 0, 1, tzinfo=tzutc()),
            datetime(2017, 5, 2, 14, tzinfo=tzutc())
        ]
        self.path = 'nwbfile_test.h5'
        self.nwbfile = NWBFile(
            'a test session description for a test NWBFile',
            'FILE123',
            self.start,
            file_create_date=self.create,
            timestamps_reference_time=self.ref_time,
            experimenter='A test experimenter',
            lab='a test lab',
            institution='a test institution',
            experiment_description='a test experiment description',
            session_id='test1',
            notes='my notes',
            pharmacology='drugs',
            protocol='protocol',
            related_publications='my pubs',
            slices='my slices',
            surgery='surgery',
            virus='a virus',
            source_script='noscript',
            source_script_file_name='nofilename',
            stimulus_notes='test stimulus notes',
            data_collection='test data collection notes',
            keywords=('these', 'are', 'keywords'))

    def test_constructor(self):
        self.assertEqual(self.nwbfile.session_description,
                         'a test session description for a test NWBFile')
        self.assertEqual(self.nwbfile.identifier, 'FILE123')
        self.assertEqual(self.nwbfile.session_start_time, self.start)
        self.assertEqual(self.nwbfile.file_create_date, self.create)
        self.assertEqual(self.nwbfile.lab, 'a test lab')
        self.assertEqual(self.nwbfile.experimenter, 'A test experimenter')
        self.assertEqual(self.nwbfile.institution, 'a test institution')
        self.assertEqual(self.nwbfile.experiment_description,
                         'a test experiment description')
        self.assertEqual(self.nwbfile.session_id, 'test1')
        self.assertEqual(self.nwbfile.stimulus_notes, 'test stimulus notes')
        self.assertEqual(self.nwbfile.data_collection,
                         'test data collection notes')
        self.assertEqual(self.nwbfile.source_script, 'noscript')
        self.assertEqual(self.nwbfile.source_script_file_name, 'nofilename')
        self.assertEqual(self.nwbfile.keywords, ('these', 'are', 'keywords'))
        self.assertEqual(self.nwbfile.timestamps_reference_time, self.ref_time)

    def test_create_electrode_group(self):
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        d = self.nwbfile.create_device('a fake device')
        elecgrp = self.nwbfile.create_electrode_group(name, desc, loc, d)
        self.assertEqual(elecgrp.description, desc)
        self.assertEqual(elecgrp.location, loc)
        self.assertIs(elecgrp.device, d)

    def test_create_electrode_group_invalid_index(self):
        """
        Test the case where the user creates an electrode table region with
        indexes that are out of range of the amount of electrodes added.
        """
        nwbfile = NWBFile('a', 'b', datetime.now(tzlocal()))
        device = nwbfile.create_device('a')
        elecgrp = nwbfile.create_electrode_group('a',
                                                 'b',
                                                 device=device,
                                                 location='a')
        for i in range(4):
            nwbfile.add_electrode(np.nan,
                                  np.nan,
                                  np.nan,
                                  np.nan,
                                  'a',
                                  'a',
                                  elecgrp,
                                  id=i)
        with self.assertRaises(IndexError):
            nwbfile.create_electrode_table_region(list(range(6)), 'test')

    def test_access_group_after_io(self):
        """
        Motivated by #739
        """
        nwbfile = NWBFile('a', 'b', datetime.now(tzlocal()))
        device = nwbfile.create_device('a')
        elecgrp = nwbfile.create_electrode_group('a',
                                                 'b',
                                                 device=device,
                                                 location='a')
        nwbfile.add_electrode(np.nan,
                              np.nan,
                              np.nan,
                              np.nan,
                              'a',
                              'a',
                              elecgrp,
                              id=0)

        with NWBHDF5IO('electrodes_mwe.nwb', 'w') as io:
            io.write(nwbfile)

        with NWBHDF5IO('electrodes_mwe.nwb', 'a') as io:
            nwbfile_i = io.read()
            for aa, bb in zip(nwbfile_i.electrodes['group'][:],
                              nwbfile.electrodes['group'][:]):
                self.assertEqual(aa.name, bb.name)

        for i in range(4):
            nwbfile.add_electrode(np.nan,
                                  np.nan,
                                  np.nan,
                                  np.nan,
                                  'a',
                                  'a',
                                  elecgrp,
                                  id=i + 1)

        with NWBHDF5IO('electrodes_mwe.nwb', 'w') as io:
            io.write(nwbfile)

        with NWBHDF5IO('electrodes_mwe.nwb', 'a') as io:
            nwbfile_i = io.read()
            for aa, bb in zip(nwbfile_i.electrodes['group'][:],
                              nwbfile.electrodes['group'][:]):
                self.assertEqual(aa.name, bb.name)

        os.remove("electrodes_mwe.nwb")

    def test_epoch_tags(self):
        tags1 = ['t1', 't2']
        tags2 = ['t3', 't4']
        tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
        ts = TimeSeries("test_ts",
                        list(range(len(tstamps))),
                        'unit',
                        timestamps=tstamps)
        expected_tags = tags1 + tags2
        self.nwbfile.add_epoch(0.0, 1.0, tags1, ts)
        self.nwbfile.add_epoch(0.0, 1.0, tags2, ts)
        tags = self.nwbfile.epoch_tags
        six.assertCountEqual(self, expected_tags, tags)

    def test_add_acquisition(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.acquisition), 1)

    def test_add_stimulus(self):
        self.nwbfile.add_stimulus(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus), 1)

    def test_add_stimulus_template(self):
        self.nwbfile.add_stimulus_template(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus_template), 1)

    def test_add_acquisition_check_dups(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        with self.assertRaises(ValueError):
            self.nwbfile.add_acquisition(
                TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                           'grams',
                           timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))

    def test_get_acquisition_empty(self):
        with self.assertRaisesRegex(ValueError,
                                    "acquisition of NWBFile 'root' is empty"):
            self.nwbfile.get_acquisition()

    def test_get_acquisition_multiple_elements(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "more than one element in acquisition of NWBFile 'root' -- must specify a name"
        with self.assertRaisesRegex(ValueError, msg):
            self.nwbfile.get_acquisition()

    def test_add_acquisition_invalid_name(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        msg = "'TEST_TS' not found in acquisition of NWBFile 'root'"
        with self.assertRaisesRegex(KeyError, msg):
            self.nwbfile.get_acquisition("TEST_TS")

    def test_set_electrode_table(self):
        table = ElectrodeTable()  # noqa: F405
        dev1 = self.nwbfile.create_device('dev1')  # noqa: F405
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-1.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-2.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-3.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        table.add_row(x=1.0,
                      y=2.0,
                      z=3.0,
                      imp=-4.0,
                      location='CA1',
                      filtering='none',
                      group=group,
                      group_name='tetrode1')
        self.nwbfile.set_electrode_table(table)
        self.assertIs(self.nwbfile.electrodes, table)
        self.assertIs(table.parent, self.nwbfile)

    def test_add_unit_column(self):
        self.nwbfile.add_unit_column('unit_type', 'the type of unit')
        self.assertEqual(self.nwbfile.units.colnames, ('unit_type', ))

    def test_add_unit(self):
        self.nwbfile.add_unit(id=1)
        self.assertEqual(len(self.nwbfile.units), 1)
        self.nwbfile.add_unit(id=2)
        self.nwbfile.add_unit(id=3)
        self.assertEqual(len(self.nwbfile.units), 3)

    def test_add_trial_column(self):
        self.nwbfile.add_trial_column('trial_type', 'the type of trial')
        self.assertEqual(self.nwbfile.trials.colnames,
                         ('start_time', 'stop_time', 'trial_type'))

    def test_add_trial(self):
        self.nwbfile.add_trial(start_time=10.0, stop_time=20.0)
        self.assertEqual(len(self.nwbfile.trials), 1)
        self.nwbfile.add_trial(start_time=30.0, stop_time=40.0)
        self.nwbfile.add_trial(start_time=50.0, stop_time=70.0)
        self.assertEqual(len(self.nwbfile.trials), 3)

    def test_add_invalid_times_column(self):
        self.nwbfile.add_invalid_times_column(
            'comments', 'description of reason for omitting time')
        self.assertEqual(self.nwbfile.invalid_times.colnames,
                         ('start_time', 'stop_time', 'comments'))

    def test_add_invalid_time_interval(self):

        self.nwbfile.add_invalid_time_interval(start_time=0.0, stop_time=12.0)
        self.assertEqual(len(self.nwbfile.invalid_times), 1)
        self.nwbfile.add_invalid_time_interval(start_time=15.0, stop_time=16.0)
        self.nwbfile.add_invalid_time_interval(start_time=17.0, stop_time=20.5)
        self.assertEqual(len(self.nwbfile.invalid_times), 3)

    def test_add_invalid_time_w_ts(self):
        ts = TimeSeries(name='name', data=[1.2], rate=1.0, unit='na')
        self.nwbfile.add_invalid_time_interval(start_time=18.0,
                                               stop_time=20.6,
                                               timeseries=ts,
                                               tags=('hi', 'there'))

    def test_add_electrode(self):
        dev1 = self.nwbfile.create_device('dev1')  # noqa: F405
        group = self.nwbfile.create_electrode_group('tetrode1',
                                                    'tetrode description',
                                                    'tetrode location', dev1)
        self.nwbfile.add_electrode(1.0,
                                   2.0,
                                   3.0,
                                   -1.0,
                                   'CA1',
                                   'none',
                                   group=group,
                                   id=1)
        self.assertEqual(self.nwbfile.ec_electrodes[0][0], 1)
        self.assertEqual(self.nwbfile.ec_electrodes[0][1], 1.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][2], 2.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][3], 3.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][4], -1.0)
        self.assertEqual(self.nwbfile.ec_electrodes[0][5], 'CA1')
        self.assertEqual(self.nwbfile.ec_electrodes[0][6], 'none')
        self.assertEqual(self.nwbfile.ec_electrodes[0][7], group)

    def test_all_children(self):
        ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        device = self.nwbfile.create_device('a fake device')
        elecgrp = self.nwbfile.create_electrode_group(name, desc, loc, device)
        children = self.nwbfile.all_children()
        self.assertIn(ts1, children)
        self.assertIn(ts2, children)
        self.assertIn(device, children)
        self.assertIn(elecgrp, children)

    def test_fail_if_source_script_file_name_without_source_script(self):
        with self.assertRaises(ValueError):
            # <-- source_script_file_name without source_script is not allowed
            NWBFile('a test session description for a test NWBFile',
                    'FILE123',
                    self.start,
                    source_script=None,
                    source_script_file_name='nofilename')

    def test_get_neurodata_type(self):
        ts1 = TimeSeries('test_ts1', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        ts2 = TimeSeries('test_ts2', [0, 1, 2, 3, 4, 5],
                         'grams',
                         timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
        self.nwbfile.add_acquisition(ts1)
        self.nwbfile.add_acquisition(ts2)
        p1 = ts1.get_ancestor(neurodata_type='NWBFile')
        self.assertIs(p1, self.nwbfile)
        p2 = ts2.get_ancestor(neurodata_type='NWBFile')
        self.assertIs(p2, self.nwbfile)
Beispiel #16
0
class ABF1Converter:
    """
    Converts Neuron2BrainLab's ABF1 files from a single cell (collected without amplifier settings from the
    multi-clamp commander) to a collective NeurodataWithoutBorders v2 file.

    Modeled after ABFConverter created by the Allen Institute.

    Parameters
    ----------
    inputPath: path to ABF file or a folder of ABF files to be converted
    outputFilePath: path to the output NWB file
    clampMode: 0 if voltage clamp 
               1 if current clamp
    gain: user-input value

    """
    def __init__(self, inputPath, outputFilePath, clampMode=None, gain=None):

        self.inputPath = inputPath

        if os.path.isfile(self.inputPath):

            abf = pyabf.ABF(self.inputPath)
            if abf.abfVersion["major"] != 1:
                raise ValueError(
                    f"The ABF version for the file {abf} is not supported.")

            self.fileNames = [os.path.basename(self.inputPath)]
            self.abfFiles = [abf]

        elif os.path.isdir(self.inputPath):
            check = 0
            abfFiles = []
            for dirpath, dirnames, filenames in os.walk(self.inputPath):

                # Find all .abf files in the directory
                if len(dirnames) == 0 and len(
                        glob.glob(dirpath + "/*.abf")) != 0:
                    check += 1
                    abfFiles += glob.glob(dirpath + "/*.abf")

            if check == 0:
                raise ValueError(f"{inputPath} contains no ABF Files.")

            # Arrange the ABF files in ascending order
            abfFiles.sort(key=lambda x: os.path.basename(x))

            # Collect file names for description
            self.fileNames = []
            for file in abfFiles:
                self.fileNames += [os.path.basename(file)]

            self.abfFiles = []
            for abfFile in abfFiles:
                # Load each ABF file using pyabf
                abf = pyabf.ABF(abfFile)

                # Check for ABF version
                if abf.abfVersion["major"] != 1:
                    raise ValueError(
                        f"The ABF version for the file {abf} is not supported."
                    )

                self.abfFiles += [abf]

        self.outputPath = outputFilePath

        # Take metadata input, and return hard coded values for None

        V_CLAMP_MODE = 0
        I_CLAMP_MODE = 1

        if clampMode:
            if clampMode == 0:
                self.clampMode = V_CLAMP_MODE
            else:
                self.clampMode = I_CLAMP_MODE
        else:
            self.clampMode = I_CLAMP_MODE

        if gain:
            self.gain = gain
        else:
            self.gain = 1.0

    # def _getHeader(self):
    #
    #     """
    #     Refer to "Unofficial Guide to the ABF File Format" by Scott Harden for bytestring values
    #     """
    #
    #     self.headerText = self.abf.headerText
    #
    #     return self.headerText

    def _getComments(self, abf):
        """
        Accesses the tag comments created in Clampfit
        """

        return abf.tagComments

    def _createNWBFile(self):
        """
        Creates the NWB file for the cell, as defined by PyNWB
        """

        self.start_time = self.abfFiles[
            0].abfDateTime  # not used for ordering sweeps due to inconsistency
        self.inputCellName = os.path.basename(self.inputPath)

        self.NWBFile = NWBFile(session_description="",
                               session_start_time=self.start_time,
                               identifier=self.inputCellName,
                               file_create_date=datetime.now(tzlocal()),
                               experimenter=None,
                               notes="")
        return self.NWBFile

    def _createDevice(self):

        self.device = self.NWBFile.create_device(name='Clampfit')

    def _createElectrode(self):

        self.electrode = self.NWBFile.create_ic_electrode(
            name='elec0', device=self.device, description='PLACEHOLDER')

    def _unitConversion(self, unit):

        # Returns a 2-list of base unit and conversion factor

        if unit == 'V':
            return 1.0, 'V'
        elif unit == 'mV':
            return 1e-3, 'V'
        elif unit == 'A':
            return 1.0, 'A'
        elif unit == 'pA':
            return 1e-12, 'A'
        else:
            # raise ValueError(f"{unit} is not a valid unit.")
            return 1.0, 'V'  # hard coded for units stored as '?'

    def _getClampMode(self):
        """
        Returns the clamp mode of the experiment.

        Voltage Clamp Mode = 0
        Current Clamp Mode = 1
        """

        return self.clampMode

    def _addStimulus(self):
        """
        Adds a stimulus class as defined by PyNWB to the NWB File.

        Written for experiments conducted from a single channel.
        For multiple channels, refer to https://github.com/AllenInstitute/ipfx/blob/master/ipfx/x_to_nwb/ABFConverter.py
        """

        sweepGlobal = 0
        for idx, abfFile in enumerate(self.abfFiles):

            for i in range(abfFile.sweepCount):

                # Collect data from pyABF
                abfFile.setSweep(i)
                seriesName = "Index_" + str(i + sweepGlobal)
                data = abfFile.sweepC
                conversion, unit = self._unitConversion(abfFile.sweepUnitsC)
                electrode = self.electrode
                gain = 1.0  # hard coded for White Noise data
                resolution = np.nan
                starting_time = 0.0
                rate = float(abfFile.dataRate)

                # Create a JSON file for the description field
                description = json.dumps(
                    {
                        "file_name": os.path.basename(self.fileNames[idx]),
                        "file_version": abfFile.abfVersionString,
                        "sweep_number": i,
                        "protocol": abfFile.protocol,
                        "protocol_path": abfFile.protocolPath,
                        "comments": self._getComments(abfFile)
                    },
                    sort_keys=True,
                    indent=4)

                # Determine the clamp mode
                if self.clampMode == 0:
                    stimulusClass = VoltageClampStimulusSeries
                elif self.clampMode == 1:
                    stimulusClass = CurrentClampStimulusSeries

                # Create a stimulus class
                stimulus = stimulusClass(name=seriesName,
                                         data=data,
                                         sweep_number=i,
                                         unit=unit,
                                         electrode=electrode,
                                         gain=gain,
                                         resolution=resolution,
                                         conversion=conversion,
                                         starting_time=starting_time,
                                         rate=rate,
                                         description=description)

                self.NWBFile.add_stimulus(stimulus)

            sweepGlobal += abfFile.sweepCount

        return True

    def _addAcquisition(self):
        """
        Adds an acquisition class as defined by PyNWB to the NWB File.

        Written for experiments conducted from a single channel.
        For multiple channels, refer to https://github.com/AllenInstitute/ipfx/blob/master/ipfx/x_to_nwb/ABFConverter.py
        """

        sweepGlobal = 0
        for idx, abfFile in enumerate(self.abfFiles):

            for i in range(abfFile.sweepCount):

                # Collect data from pyABF
                abfFile.setSweep(i)
                seriesName = "Index_" + str(i + sweepGlobal)
                data = abfFile.sweepY
                conversion, unit = self._unitConversion(abfFile.sweepUnitsY)
                electrode = self.electrode
                gain = 1.0  # hard coded for White Noise data
                resolution = np.nan
                starting_time = 0.0
                rate = float(abfFile.dataRate)

                # Create a JSON file for the description field
                description = json.dumps(
                    {
                        "file_name": os.path.basename(self.fileNames[idx]),
                        "file_version": abfFile.abfVersionString,
                        "sweep_number": i,
                        "protocol": abfFile.protocol,
                        "protocol_path": abfFile.protocolPath,
                        "comments": self._getComments(abfFile)
                    },
                    sort_keys=True,
                    indent=4)

                # Create an acquisition class
                # Note: voltage input produces current output; current input produces voltage output

                if self.clampMode == 0:
                    acquisition = CurrentClampSeries(
                        name=seriesName,
                        data=data,
                        sweep_number=i,
                        unit=unit,
                        electrode=electrode,
                        gain=gain,
                        resolution=resolution,
                        conversion=conversion,
                        starting_time=starting_time,
                        rate=rate,
                        description=description,
                        bias_current=np.nan,
                        bridge_balance=np.nan,
                        capacitance_compensation=np.nan,
                    )

                elif self.clampMode == 1:
                    acquisition = VoltageClampSeries(
                        name=seriesName,
                        data=data,
                        sweep_number=i,
                        unit=unit,
                        electrode=electrode,
                        gain=gain,
                        resolution=resolution,
                        conversion=conversion,
                        starting_time=starting_time,
                        rate=rate,
                        description=description,
                        capacitance_fast=np.nan,
                        capacitance_slow=np.nan,
                        resistance_comp_bandwidth=np.nan,
                        resistance_comp_correction=np.nan,
                        resistance_comp_prediction=np.nan,
                        whole_cell_capacitance_comp=np.nan,
                        whole_cell_series_resistance_comp=np.nan)

                self.NWBFile.add_acquisition(acquisition)

            sweepGlobal += abfFile.sweepCount

        return True

    def convert(self):
        """
        Iterates through the functions in the specified order.
        :return: True (for success)
        """

        # self._getHeader()
        self._createNWBFile()
        self._createDevice()
        self._createElectrode()
        self._getClampMode()
        self._addStimulus()
        self._addAcquisition()

        with NWBHDF5IO(self.outputPath, "w") as io:
            io.write(self.NWBFile, cache_spec=True)

        print(f"Successfully converted to {self.outputPath}.")

        return True
Beispiel #17
0
def nwb_copy_file(old_file, new_file, cp_objs={}):
    """
    Copy fields defined in 'obj', from existing NWB file to new NWB file.

    Parameters
    ----------
    old_file : str, path
        String such as '/path/to/old_file.nwb'.
    new_file : str, path
        String such as '/path/to/new_file.nwb'.
    cp_objs : dict
        Name:Value pairs (Group:Children) listing the groups and respective
        children from the current NWB file to be copied. Children can be:
        - Boolean, indicating an attribute (e.g. for institution, lab)
        - List of strings, containing several children names
        Example:
        {'institution':True,
         'lab':True,
         'acquisition':['microphone'],
         'ecephys':['LFP','DecompositionSeries']}
    """

    manager = get_manager()

    # Open original signal file
    with NWBHDF5IO(old_file, 'r', manager=manager,
                   load_namespaces=True) as io1:
        nwb_old = io1.read()

        # Creates new file
        nwb_new = NWBFile(session_description=str(nwb_old.session_description),
                          identifier='',
                          session_start_time=datetime.now(tzlocal()))
        with NWBHDF5IO(new_file, mode='w', manager=manager,
                       load_namespaces=False) as io2:
            # Institution name ------------------------------------------------
            if 'institution' in cp_objs:
                nwb_new.institution = str(nwb_old.institution)

            # Lab name --------------------------------------------------------
            if 'lab' in cp_objs:
                nwb_new.lab = str(nwb_old.lab)

            # Session id ------------------------------------------------------
            if 'session' in cp_objs:
                nwb_new.session_id = nwb_old.session_id

            # Devices ---------------------------------------------------------
            if 'devices' in cp_objs:
                for aux in list(nwb_old.devices.keys()):
                    dev = Device(nwb_old.devices[aux].name)
                    nwb_new.add_device(dev)

            # Electrode groups ------------------------------------------------
            if 'electrode_groups' in cp_objs:
                for aux in list(nwb_old.electrode_groups.keys()):
                    nwb_new.create_electrode_group(
                        name=str(nwb_old.electrode_groups[aux].name),
                        description=str(nwb_old.electrode_groups[
                            aux].description),
                        location=str(nwb_old.electrode_groups[aux].location),
                        device=nwb_new.get_device(
                            nwb_old.electrode_groups[aux].device.name)
                    )

            # Electrodes ------------------------------------------------------
            if 'electrodes' in cp_objs:
                nElec = len(nwb_old.electrodes['x'].data[:])
                for aux in np.arange(nElec):
                    nwb_new.add_electrode(
                        x=nwb_old.electrodes['x'][aux],
                        y=nwb_old.electrodes['y'][aux],
                        z=nwb_old.electrodes['z'][aux],
                        imp=nwb_old.electrodes['imp'][aux],
                        location=str(nwb_old.electrodes['location'][aux]),
                        filtering=str(nwb_old.electrodes['filtering'][aux]),
                        group=nwb_new.get_electrode_group(
                            nwb_old.electrodes['group'][aux].name),
                        group_name=str(nwb_old.electrodes['group_name'][aux])
                    )
                # if there are custom variables
                new_vars = list(nwb_old.electrodes.colnames)
                default_vars = ['x', 'y', 'z', 'imp', 'location', 'filtering',
                                'group', 'group_name']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:

                    if var == 'label':
                        var_data = [str(elem) for elem in nwb_old.electrodes[
                                                          var].data[:]]
                    else:
                        var_data = np.array(nwb_old.electrodes[var].data[:])

                    nwb_new.add_electrode_column(name=str(var),
                                                 description=
                                                 str(nwb_old.electrodes[
                                                     var].description),
                                                 data=var_data)

            # Epochs ----------------------------------------------------------
            if 'epochs' in cp_objs:
                nEpochs = len(nwb_old.epochs['start_time'].data[:])
                for i in np.arange(nEpochs):
                    nwb_new.add_epoch(
                        start_time=nwb_old.epochs['start_time'].data[i],
                        stop_time=nwb_old.epochs['stop_time'].data[i])
                # if there are custom variables
                new_vars = list(nwb_old.epochs.colnames)
                default_vars = ['start_time', 'stop_time', 'tags',
                                'timeseries']
                [new_vars.remove(var) for var in default_vars if
                 var in new_vars]
                for var in new_vars:
                    nwb_new.add_epoch_column(name=var,
                                             description=nwb_old.epochs[
                                                 var].description,
                                             data=nwb_old.epochs[var].data[:])

            # Invalid times ---------------------------------------------------
            if 'invalid_times' in cp_objs:
                nInvalid = len(nwb_old.invalid_times['start_time'][:])
                for aux in np.arange(nInvalid):
                    nwb_new.add_invalid_time_interval(
                        start_time=nwb_old.invalid_times['start_time'][aux],
                        stop_time=nwb_old.invalid_times['stop_time'][aux])

            # Trials ----------------------------------------------------------
            if 'trials' in cp_objs:
                nTrials = len(nwb_old.trials['start_time'])
                for aux in np.arange(nTrials):
                    nwb_new.add_trial(
                        start_time=nwb_old.trials['start_time'][aux],
                        stop_time=nwb_old.trials['stop_time'][aux])
                # if there are custom variables
                new_vars = list(nwb_old.trials.colnames)
                default_vars = ['start_time', 'stop_time']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:
                    nwb_new.add_trial_column(name=var,
                                             description=nwb_old.trials[
                                                 var].description,
                                             data=nwb_old.trials[var].data[:])

            # Intervals -------------------------------------------------------
            if 'intervals' in cp_objs:
                all_objs_names = list(nwb_old.intervals.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.intervals[obj_name]
                    # create and add TimeIntervals
                    obj = TimeIntervals(name=obj_old.name,
                                        description=obj_old.description)
                    nInt = len(obj_old['start_time'])
                    for ind in np.arange(nInt):
                        obj.add_interval(start_time=obj_old['start_time'][ind],
                                         stop_time=obj_old['stop_time'][ind])
                    # Add to file
                    nwb_new.add_time_intervals(obj)

            # Stimulus --------------------------------------------------------
            if 'stimulus' in cp_objs:
                all_objs_names = list(nwb_old.stimulus.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.stimulus[obj_name]
                    obj = TimeSeries(name=obj_old.name,
                                     description=obj_old.description,
                                     data=obj_old.data[:],
                                     rate=obj_old.rate,
                                     resolution=obj_old.resolution,
                                     conversion=obj_old.conversion,
                                     starting_time=obj_old.starting_time,
                                     unit=obj_old.unit)
                    nwb_new.add_stimulus(obj)

            # Processing modules ----------------------------------------------
            if 'ecephys' in cp_objs:
                if cp_objs['ecephys'] is True:
                    interfaces = nwb_old.processing[
                        'ecephys'].data_interfaces.keys()
                else:  # list of items
                    interfaces = [
                        nwb_old.processing['ecephys'].data_interfaces[key]
                        for key in cp_objs['ecephys']
                    ]
                # Add ecephys module to NWB file
                ecephys_module = ProcessingModule(
                    name='ecephys',
                    description='Extracellular electrophysiology data.'
                )
                nwb_new.add_processing_module(ecephys_module)
                for interface_old in interfaces:
                    obj = copy_obj(interface_old, nwb_old, nwb_new)
                    if obj is not None:
                        ecephys_module.add_data_interface(obj)

            # Acquisition -----------------------------------------------------
            if 'acquisition' in cp_objs:
                if cp_objs['acquisition'] is True:
                    all_acq_names = list(nwb_old.acquisition.keys())
                else:  # list of items
                    all_acq_names = cp_objs['acquisition']
                for acq_name in all_acq_names:
                    obj_old = nwb_old.acquisition[acq_name]
                    obj = copy_obj(obj_old, nwb_old, nwb_new)
                    if obj is not None:
                        nwb_new.add_acquisition(obj)

            # Subject ---------------------------------------------------------
            if 'subject' in cp_objs:
                try:
                    cortical_surfaces = CorticalSurfaces()
                    surfaces = nwb_old.subject.cortical_surfaces.surfaces
                    for sfc in list(surfaces.keys()):
                        cortical_surfaces.create_surface(
                            name=surfaces[sfc].name,
                            faces=surfaces[sfc].faces,
                            vertices=surfaces[sfc].vertices)
                    nwb_new.subject = ECoGSubject(
                        cortical_surfaces=cortical_surfaces,
                        subject_id=nwb_old.subject.subject_id,
                        age=nwb_old.subject.age,
                        description=nwb_old.subject.description,
                        genotype=nwb_old.subject.genotype,
                        sex=nwb_old.subject.sex,
                        species=nwb_old.subject.species,
                        weight=nwb_old.subject.weight,
                        date_of_birth=nwb_old.subject.date_of_birth)
                except:
                    nwb_new.subject = Subject(age=nwb_old.subject.age,
                                              description=nwb_old.subject.description,
                                              genotype=nwb_old.subject.genotype,
                                              sex=nwb_old.subject.sex,
                                              species=nwb_old.subject.species,
                                              subject_id=nwb_old.subject.subject_id,
                                              weight=nwb_old.subject.weight,
                                              date_of_birth=nwb_old.subject.date_of_birth)

            # Write new file with copied fields
            io2.write(nwb_new, link_data=False)
Beispiel #18
0
    def run_conversion(self,
                       nwbfile: NWBFile,
                       metadata: dict,
                       stub_test: bool = False):
        session_path = Path(self.source_data["folder_path"])
        task_types = [
            dict(name="OpenFieldPosition_ExtraLarge"),
            dict(name="OpenFieldPosition_New_Curtain", conversion=0.46),
            dict(name="OpenFieldPosition_New", conversion=0.46),
            dict(name="OpenFieldPosition_Old_Curtain", conversion=0.46),
            dict(name="OpenFieldPosition_Old", conversion=0.46),
            dict(name="OpenFieldPosition_Oldlast", conversion=0.46),
            dict(name="EightMazePosition", conversion=0.65 / 2),
        ]

        subject_path = session_path.parent
        session_id = session_path.stem

        [nwbfile.add_stimulus(x) for x in get_events(session_path)]

        sleep_state_fpath = session_path / f"{session_id}--StatePeriod.mat"

        exist_pos_data = any([
            (session_path / "{session_id}__{task_type['name']}.mat").is_file()
            for task_type in task_types
        ])
        if exist_pos_data:
            nwbfile.add_epoch_column("label", "Name of epoch.")

        # Epoch intervals
        for task_type in task_types:
            label = task_type["name"]

            file = session_path / f"{session_id}__{label}.mat"
            if file.is_file():
                pos_obj = Position(name=f"{label}_position")

                matin = loadmat(file)
                tt = matin["twhl_norm"][:, 0]
                exp_times = find_discontinuities(tt)

                if "conversion" in task_type:
                    conversion = task_type["conversion"]
                else:
                    conversion = np.nan

                for pos_type in ("twhl_norm", "twhl_linearized"):
                    if pos_type in matin:
                        pos_data_norm = matin[pos_type][:, 1:]

                        spatial_series_object = SpatialSeries(
                            name=f"{label}_{pos_type}_spatial_series",
                            data=H5DataIO(pos_data_norm, compression="gzip"),
                            reference_frame="unknown",
                            conversion=conversion,
                            resolution=np.nan,
                            timestamps=H5DataIO(tt, compression="gzip"),
                        )
                        pos_obj.add_spatial_series(spatial_series_object)

                check_module(
                    nwbfile, "behavior",
                    "Contains processed behavioral data.").add_data_interface(
                        pos_obj)
                for i, window in enumerate(exp_times):
                    nwbfile.add_epoch(
                        start_time=window[0],
                        stop_time=window[1],
                        tags=f"{label}_{str(i)}",
                    )

        # Trial intervals
        trialdata_path = session_path / f"{session_id}__EightMazeRun.mat"
        if trialdata_path.is_file():
            trials_data = loadmat(trialdata_path)["EightMazeRun"]

            trialdatainfo_path = subject_path / "EightMazeRunInfo.mat"
            trialdatainfo = [
                x[0]
                for x in loadmat(trialdatainfo_path)["EightMazeRunInfo"][0]
            ]

            features = trialdatainfo[:7]
            features[:2] = (
                "start_time",
                "stop_time",
            )
            [
                nwbfile.add_trial_column(x, "description")
                for x in features[4:] + ["condition"]
            ]

            for trial_data in trials_data:
                if trial_data[3]:
                    cond = "run_left"
                else:
                    cond = "run_right"
                nwbfile.add_trial(
                    start_time=trial_data[0],
                    stop_time=trial_data[1],
                    condition=cond,
                    error_run=trial_data[4],
                    stim_run=trial_data[5],
                    both_visit=trial_data[6],
                )

        # SLeep states
        if sleep_state_fpath.is_file():
            matin = loadmat(sleep_state_fpath)["StatePeriod"]
            table = TimeIntervals(name="states",
                                  description="sleep states of animal")
            table.add_column(name="label", description="sleep state")
            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append(
                        dict(start_time=row[0], stop_time=row[1], label=name))
            [
                table.add_row(**row)
                for row in sorted(data, key=lambda x: x["start_time"])
            ]
            check_module(nwbfile, "behavior",
                         "Contains behavioral data.").add_data_interface(table)
Beispiel #19
0
    def convert_data(
        self,
        nwbfile: NWBFile,
        metadata_dict: dict,
        stub_test: bool = False,
        include_spike_waveforms: bool = False,
    ):
        """Convert the behavioral portion of a particular session of the GrosmarkAD dataset."""
        session_path = self.input_args["folder_path"]
        subject_path, session_id = os.path.split(session_path)

        # Stimuli
        [nwbfile.add_stimulus(x) for x in get_events(session_path)]

        # States
        sleep_state_fpath = os.path.join(session_path,
                                         "{session_id}.SleepState.states.mat")
        # label renaming specific to Watson
        state_label_names = dict(WAKEstate="Awake",
                                 NREMstate="Non-REM",
                                 REMstate="REM")
        if os.path.isfile(sleep_state_fpath):
            matin = loadmat(sleep_state_fpath)["SleepState"]["ints"][0][0]

            table = TimeIntervals(name="states",
                                  description="Sleep states of animal.")
            table.add_column(name="label", description="Sleep state.")

            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append(
                        dict(
                            start_time=row[0],
                            stop_time=row[1],
                            label=state_label_names[name],
                        ))
            [
                table.add_row(**row)
                for row in sorted(data, key=lambda x: x["start_time"])
            ]
            check_module(nwbfile, "behavior",
                         "contains behavioral data").add_data_interface(table)

        # Position
        pos_filepath = Path(
            session_path) / f"{session_id}.position.behavior.mat"
        pos_mat = loadmat(str(pos_filepath.absolute()))
        starting_time = float(
            pos_mat["position"]["timestamps"][0][0]
            [0])  # confirmed to be a regularly sampled series
        rate = float(
            pos_mat["position"]["timestamps"][0][0][1]) - starting_time
        if pos_mat["position"]["units"][0][0][0] == "m":
            conversion = 1.0
        else:
            warnings.warn(
                f"Spatial units ({pos_mat['position']['units'][0][0][0]}) not listed in meters; "
                "setting conversion to nan.")
            conversion = np.nan
        pos_data = [[x[0], y[0]] for x, y in zip(
            pos_mat["position"]["position"][0][0]["x"][0][0],
            pos_mat["position"]["position"][0][0]["y"][0][0],
        )]
        linearized_data = [[
            lin[0]
        ] for lin in pos_mat["position"]["position"][0][0]["lin"][0][0]]

        label = pos_mat["position"]["behaviorinfo"][0][0]["MazeType"][0][0][
            0].replace(" ", "")
        pos_obj = Position(name=f"{label}Position")
        spatial_series_object = SpatialSeries(
            name=f"{label}SpatialSeries",
            description=
            "(x,y) coordinates tracking subject movement through the maze.",
            data=H5DataIO(pos_data, compression="gzip"),
            reference_frame="unknown",
            conversion=conversion,
            starting_time=starting_time,
            rate=rate,
            resolution=np.nan,
        )
        pos_obj.add_spatial_series(spatial_series_object)
        check_module(
            nwbfile, "behavior",
            "contains processed behavioral data").add_data_interface(pos_obj)

        lin_pos_obj = Position(name=f"{label}LinearizedPosition")
        lin_spatial_series_object = SpatialSeries(
            name=f"{label}LinearizedTimeSeries",
            description=
            "Linearized position, defined as starting at the edge of reward area, "
            "and increasing clockwise, terminating at the opposing edge of the reward area.",
            data=H5DataIO(linearized_data, compression="gzip"),
            reference_frame="unknown",
            conversion=conversion,
            starting_time=starting_time,
            rate=rate,
            resolution=np.nan,
        )
        lin_pos_obj.add_spatial_series(lin_spatial_series_object)
        check_module(nwbfile, "behavior",
                     "contains processed behavioral data").add_data_interface(
                         lin_pos_obj)

        # Epochs
        epoch_names = list(pos_mat["position"]["Epochs"][0][0].dtype.names)
        epoch_windows = [[float(start), float(stop)]
                         for x in pos_mat["position"]["Epochs"][0][0][0][0]
                         for start, stop in x]
        nwbfile.add_epoch_column("label", "name of epoch")
        for j, epoch_name in enumerate(epoch_names):
            nwbfile.add_epoch(
                start_time=epoch_windows[j][0],
                stop_time=epoch_windows[j][1],
                label=epoch_name,
            )
Beispiel #20
0
    def convert_data(
        self, nwbfile: NWBFile, metadata_dict: dict, stub_test: bool = False, include_spike_waveforms: bool = False
    ):
        session_path = self.input_args["folder_path"]
        # TODO: check/enforce format?
        task_types = metadata_dict.get("task_types", [])

        subject_path, session_id = os.path.split(session_path)
        fpath_base = os.path.split(subject_path)[0]

        [nwbfile.add_stimulus(x) for x in get_events(session_path)]

        exist_pos_data = any(
            os.path.isfile(os.path.join(session_path, "{}__{}.mat".format(session_id, task_type["name"])))
            for task_type in task_types
        )

        if exist_pos_data:
            nwbfile.add_epoch_column("label", "name of epoch")

        for task_type in task_types:
            label = task_type["name"]

            file = os.path.join(session_path, session_id + "__" + label + ".mat")
            if os.path.isfile(file):
                pos_obj = Position(name=label + "_position")

                matin = loadmat(file)
                tt = matin["twhl_norm"][:, 0]
                exp_times = find_discontinuities(tt)

                if "conversion" in task_type:
                    conversion = task_type["conversion"]
                else:
                    conversion = np.nan

                for pos_type in ("twhl_norm", "twhl_linearized"):
                    if pos_type in matin:
                        pos_data_norm = matin[pos_type][:, 1:]

                        spatial_series_object = SpatialSeries(
                            name=label + "_{}_spatial_series".format(pos_type),
                            data=H5DataIO(pos_data_norm, compression="gzip"),
                            reference_frame="unknown",
                            conversion=conversion,
                            resolution=np.nan,
                            timestamps=H5DataIO(tt, compression="gzip"),
                        )
                        pos_obj.add_spatial_series(spatial_series_object)

                check_module(nwbfile, "behavior", "contains processed behavioral data").add_data_interface(pos_obj)
                for i, window in enumerate(exp_times):
                    nwbfile.add_epoch(start_time=window[0], stop_time=window[1], label=label + "_" + str(i))

        trialdata_path = os.path.join(session_path, session_id + "__EightMazeRun.mat")
        if os.path.isfile(trialdata_path):
            trials_data = loadmat(trialdata_path)["EightMazeRun"]

            trialdatainfo_path = os.path.join(fpath_base, "EightMazeRunInfo.mat")
            trialdatainfo = [x[0] for x in loadmat(trialdatainfo_path)["EightMazeRunInfo"][0]]

            features = trialdatainfo[:7]
            features[:2] = (
                "start_time",
                "stop_time",
            )
            [nwbfile.add_trial_column(x, "description") for x in features[4:] + ["condition"]]

            for trial_data in trials_data:
                if trial_data[3]:
                    cond = "run_left"
                else:
                    cond = "run_right"
                nwbfile.add_trial(
                    start_time=trial_data[0],
                    stop_time=trial_data[1],
                    condition=cond,
                    error_run=trial_data[4],
                    stim_run=trial_data[5],
                    both_visit=trial_data[6],
                )

        sleep_state_fpath = os.path.join(session_path, "{}.SleepState.states.mat".format(session_id))
        # label renaming specific to Watson
        state_label_names = {"WAKEstate": "Awake", "NREMstate": "Non-REM", "REMstate": "REM"}
        if os.path.isfile(sleep_state_fpath):
            matin = loadmat(sleep_state_fpath)["SleepState"]["ints"][0][0]

            table = TimeIntervals(name="states", description="Sleep states of animal.")
            table.add_column(name="label", description="Sleep state.")

            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append({"start_time": row[0], "stop_time": row[1], "label": state_label_names[name]})
            [table.add_row(**row) for row in sorted(data, key=lambda x: x["start_time"])]

            check_module(nwbfile, "behavior", "contains behavioral data").add_data_interface(table)
Beispiel #21
0
def no2nwb(NOData, session_use, subjects_ini, path_to_data):
    '''
       Purpose:
           Import the data and associated meta-data from the new/old recognition dataset into an
           NWB file. Each of the features of the dataset, such as the events (i.e., TTLs) or mean waveform, are
           compartmentalized to the appropriate component of the NWB file.


    '''

    # Time scaling (covert uS -----> S for NWB file)
    TIME_SCALING = 10**6

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    # ============ Read Config File ()
    # load config file (subjects == config file)

    #  Check config file path
    filename = subjects_ini
    if not os.path.exists(filename):
        print('This file does not exist: {}'.format(filename))
        print("Check filename/and or directory")

    # Read the config file
    try:
        # initialze the ConfigParser() class
        config = configparser.ConfigParser()
        # read .ini file
        config.read(filename)
    except:
        print('Failed to read the config file..')
        print('Does this file exist: {}'.format(os.path.exists(filename)))

    #  Read Meta-data from INI file.
    for section in config.sections():
        if session_use == int(section):
            session_id = int(section)  #  The New/Old ID for the session
            #Get the session ID
            for value in config[section]:
                if value.lower() == 'nosessions.age':
                    age = int(config[section][value])
                if value.lower() == 'nosessions.diagnosiscode':
                    epilepsyDxCode = config[section][value]
                    epilepsyDx = getEpilepsyDx(int(epilepsyDxCode))
                if value.lower() == 'nosessions.sex':
                    sex = config[section][value].strip("'")
                if value.lower() == 'nosessions.id':
                    ID = config[section][value].strip("'")
                if value.lower() == 'nosessions.session':
                    pt_session = config[section][value].strip("'")
                if value.lower() == 'nosessions.date':
                    unformattedDate = config[section][value].strip("'")
                    date = datetime.strptime(unformattedDate, '%Y-%m-%d')
                    finaldate = date.replace(hour=0, minute=0)
                if value.lower() == 'nosessions.institution':
                    institution = config[section][value].strip("'")
                if value.lower() == 'nosessions.la':
                    LA = config[section][value].strip("'").split(',')
                    if LA[0] == 'NaN':
                        LA_x = np.nan
                        LA_y = np.nan
                        LA_z = np.nan
                    else:
                        LA_x = float(LA[0])
                        LA_y = float(LA[1])
                        LA_z = float(LA[2])
                if value.lower() == 'nosessions.ra':
                    RA = config[section][value].strip("'").split(',')
                    if RA[0] == 'NaN':
                        RA_x = np.nan
                        RA_y = np.nan
                        RA_z = np.nan
                    else:
                        RA_x = float(RA[0])
                        RA_y = float(RA[1])
                        RA_z = float(RA[2])
                if value.lower() == 'nosessions.lh':
                    LH = config[section][value].strip("'").split(',')
                    if LH[0] == 'NaN':
                        LH_x = np.nan
                        LH_y = np.nan
                        LH_z = np.nan
                    else:
                        LH_x = float(LH[0])
                        LH_y = float(LH[1])
                        LH_z = float(LH[2])
                if value.lower() == 'nosessions.rh':
                    RH = config[section][value].strip("'").split(',')
                    if RH[0] == 'NaN':
                        RH_x = np.nan
                        RH_y = np.nan
                        RH_z = np.nan
                    else:
                        RH_x = float(RH[0])
                        RH_y = float(RH[1])
                        RH_z = float(RH[2])
                if value.lower() == 'nosessions.system':
                    signalSystem = config[section][value].strip("'")

    # =================================================================

    print(
        '======================================================================='
    )
    print('session use: {}'.format(session_id))
    print('age: {}'.format(age))
    print('epilepsy_diagnosis: {}'.format(epilepsyDx))

    nwb_subject = Subject(age=str(age),
                          description=epilepsyDx,
                          sex=sex,
                          species='Human',
                          subject_id=pt_session[:pt_session.find('_')])

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='New/Old recognition task for ID: {}. '.format(
            session_id),
        identifier='{}_{}'.format(ID, session_use),
        session_start_time=finaldate,  #default session start time
        file_create_date=datetime.now(),
        experiment_description=
        'The data contained within this file describes a new/old recogntion task performed in '
        'patients with intractable epilepsy implanted with depth electrodes and Behnke-Fried '
        'microwires in the human Medical Temporal Lobe (MTL).',
        institution=institution,
        keywords=[
            'Intracranial Recordings', 'Intractable Epilepsy',
            'Single-Unit Recordings', 'Cognitive Neuroscience', 'Learning',
            'Memory', 'Neurosurgery'
        ],
        related_publications=
        'Faraut et al. 2018, Scientific Data; Rutishauser et al. 2015, Nat Neurosci;',
        lab='Rutishauser',
        subject=nwb_subject,
        data_collection='learning: {}, recognition: {}'.format(
            session['experiment_id_learn'], session['experiment_id_recog']))

    # Add events and experiment_id acquisition
    events_description = (
        """ The events coorespond to the TTL markers for each trial. For the learning trials, the TTL markers 
            are the following: 55 = start of the experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Is this an animal?”], 
            20 = Yes (21 = NO) during learning, 6 = End of Delay after Response, 66 = End of Experiment. For the recognition trials, 
            the TTL markers are the following: 55 = start of experiment, 1 = stimulus ON, 2 = stimulus OFF, 3 = Question Screen Onset [“Have you seen this image before?”], 
            31:36 = Confidence (Yes vs. No) response [31 (new, confident), 32 (new, probably), 33 (new, guess), 34 (old, guess), 
            35 (old, probably), 36 (old, confident)], 66 = End of Experiment"""
    )

    event_ts = AnnotationSeries(name='events',
                                data=np.asarray(events[1].values).astype(str),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=events_description)

    experiment_ids_description = (
        """The experiment_ids coorespond to the encoding (i.e., learning) or recogniton trials. The learning trials are demarcated by: {}. The recognition trials are demarcated by: {}. """
        .format(experiment_id_learn, experiment_id_recog))

    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values) /
                                TIME_SCALING,
                                description=experiment_ids_description)

    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use,
                           NOData.ls_cells(session_use)[0], path_to_data)
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]
    events_learn = events[(events[2] == experiment_id_learn)]
    events_learn_response = []
    events_learn_response_time = []
    for i in range(len(events_learn[0])):
        if (events_learn.iloc[i, 1]
                == NOData.markers['response_learning_animal']) or (
                    events_learn.iloc[i, 1]
                    == NOData.markers['response_learning_non_animal']):
            events_learn_response.append(events_learn.iloc[i, 1] - 20)
            events_learn_response_time.append(events_learn.iloc[i, 0])

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]
    events_recog = events[(events[2] == experiment_id_recog)]
    events_recog_response = []
    events_recog_response_time = []
    for i in range(len(events_recog[0])):
        if ((events_recog.iloc[i, 1] == NOData.markers['response_1'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_2'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_3'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_4'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_5'])
                or (events_recog.iloc[i, 1] == NOData.markers['response_6'])):
            events_recog_response.append(events_recog.iloc[i, 1])
            events_recog_response_time.append(events_recog.iloc[i, 0])

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]
    # Create the trial tables

    nwbfile.add_trial_column('stim_on_time',
                             'The Time when the Stimulus is Shown')
    nwbfile.add_trial_column('stim_off_time',
                             'The Time when the Stimulus is Off')
    nwbfile.add_trial_column('delay1_time', 'The Time when Delay1 is Off')
    nwbfile.add_trial_column('delay2_time', 'The Time when Delay2 is Off')
    nwbfile.add_trial_column('stim_phase',
                             'Learning/Recognition Phase During the Trial')
    nwbfile.add_trial_column('stimCategory', 'The Category ID of the Stimulus')
    nwbfile.add_trial_column('category_name',
                             'The Category Name of the Stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'The File Path to the Stimulus')
    nwbfile.add_trial_column(
        'new_old_labels_recog',
        '''The Ground truth Labels for New or Old Stimulus. 0 == Old Stimuli 
                            (presented during the learning phase), 1 = New Stimuli (not seen )'during learning phase'''
    )
    nwbfile.add_trial_column('response_value',
                             'The Response for Each Stimulus')
    nwbfile.add_trial_column('response_time',
                             'The Response Time for each Stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):

        nwbfile.add_trial(
            start_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stop_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_on_time=(events_learn_stim_on.iloc[i][0]) / (TIME_SCALING),
            stim_off_time=(events_learn_stim_off.iloc[i][0]) / (TIME_SCALING),
            delay1_time=(events_learn_delay1_off.iloc[i][0]) / (TIME_SCALING),
            delay2_time=(events_learn_delay2_off.iloc[i][0]) / (TIME_SCALING),
            stim_phase='learn',
            stimCategory=cat_id_learn[i],
            category_name=cat_name_learn[i],
            external_image_file=stimuli_learn_path[i],
            new_old_labels_recog='NA',
            response_value=events_learn_response[i],
            response_time=(events_learn_response_time[i]) / (TIME_SCALING))

    for i in range(range_recog):

        nwbfile.add_trial(
            start_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stop_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_on_time=events_recog_stim_on.iloc[i][0] / (TIME_SCALING),
            stim_off_time=events_recog_stim_off.iloc[i][0] / (TIME_SCALING),
            delay1_time=events_recog_delay1_off.iloc[i][0] / (TIME_SCALING),
            delay2_time=events_recog_delay2_off.iloc[i][0] / (TIME_SCALING),
            stim_phase='recog',
            stimCategory=cat_id_recog[i],
            category_name=cat_name_recog[i],
            external_image_file=stimuli_recog_path[i],
            new_old_labels_recog=new_old_recog[i],
            response_value=events_recog_response[i],
            response_time=events_recog_response_time[i] / (TIME_SCALING))

    # Add the waveform clustering and the spike data.
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # unique unit id
    unit_id = 0

    # Create unit columns
    nwbfile.add_unit_column('origClusterID', 'The original cluster id')
    nwbfile.add_unit_column('waveform_mean_encoding',
                            'The mean waveform for encoding phase.')
    nwbfile.add_unit_column('waveform_mean_recognition',
                            'The mean waveform for the recognition phase.')
    nwbfile.add_unit_column('IsolationDist', 'IsolDist')
    nwbfile.add_unit_column('SNR', 'SNR')
    nwbfile.add_unit_column('waveform_mean_sampling_rate',
                            'The Sampling Rate of Waveform')

    #Add Stimuli
    stimuli_presentation = []

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join(path_to_data, 'Stimuli', folders[0], folders[1],
                            folders[2])
        img = cv2.imread(path)
        resized_image = cv2.resize(img, (300, 400))
        stimuli_presentation.append(resized_image)
        name = 'stimuli_recog_' + str(counter)

    # Add stimuli to OpticalSeries
    stimulus_presentation_on_time = []

    for n in range(0, len(events_learn_stim_on)):
        stimulus_presentation_on_time.append(events_learn_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    for n in range(0, len(events_recog_stim_on)):
        stimulus_presentation_on_time.append(events_recog_stim_on.iloc[n][0] /
                                             (TIME_SCALING))

    name = 'StimulusPresentation'
    stimulus = OpticalSeries(name=name,
                             data=stimuli_presentation,
                             timestamps=stimulus_presentation_on_time[:],
                             orientation='lower left',
                             format='raw',
                             unit='meters',
                             field_of_view=[.2, .3, .7],
                             distance=0.7,
                             dimension=[300, 400, 3])

    nwbfile.add_stimulus(stimulus)

    # Get Unit data
    all_spike_cluster_ids = []
    all_selected_time_stamps = []
    all_IsolDist = []
    all_SNR = []
    all_selected_mean_waveform_learn = []
    all_selected_mean_waveform_recog = []
    all_mean_waveform = []
    all_channel_id = []
    all_oriClusterIDs = []
    all_channel_numbers = []
    all_brain_area = []
    # Iterate the channel list

    # load brain area file
    brain_area_file_path = os.path.join(path_to_data, 'Data', 'events',
                                        session['session'], task_descr,
                                        'brainArea.mat')

    try:
        brain_area_mat = loadmat(brain_area_file_path)
    except FileNotFoundError:
        print("brain_area_mat file not found")

    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        cell_file_path = os.path.join(path_to_data, 'Data', 'sorted',
                                      session['session'], task_descr,
                                      cell_name)

        try:
            cell_mat = loadmat(cell_file_path)
        except FileNotFoundError:
            print("cell mat file not found")
            continue

        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_cluster_id = np.asarray([spike[1] for spike in spikes
                                       ])  # Each Cluster ID of the spike
        spike_timestamps = (np.asarray([spike[2] for spike in spikes])) / (
            TIME_SCALING)  # Timestamps of spikes for each ClusterID
        unique_cluster_ids = np.unique(spike_cluster_id)

        # If there are more than one cluster.
        for id in unique_cluster_ids:

            # Grab brain area
            brain_area = extra_brain_area(brain_area_mat, channel_id)

            selected_spike_timestamps = spike_timestamps[spike_cluster_id ==
                                                         id]
            IsolDist, SNR = extract_IsolDist_SNR_by_cluster_id(
                IsolDist_SNR, id)
            selected_mean_waveform_learn = extra_mean_waveform(
                meanWaveform_learn, id)
            selected_mean_waveform_recog = extra_mean_waveform(
                meanWaveform_recog, id)

            # If the mean waveform does not have 256 elements, we set the mean wave form to all 0
            if len(selected_mean_waveform_learn) != 256:
                selected_mean_waveform_learn = np.zeros(256)
            if len(selected_mean_waveform_recog) != 256:
                selected_mean_waveform_recog = np.zeros(256)

            mean_waveform = np.hstack(
                [selected_mean_waveform_learn, selected_mean_waveform_recog])

            # Append unit data
            all_spike_cluster_ids.append(id)
            all_selected_time_stamps.append(selected_spike_timestamps)
            all_IsolDist.append(IsolDist)
            all_SNR.append(SNR)
            all_selected_mean_waveform_learn.append(
                selected_mean_waveform_learn)
            all_selected_mean_waveform_recog.append(
                selected_mean_waveform_recog)
            all_mean_waveform.append(mean_waveform)
            all_channel_id.append(channel_id)
            all_oriClusterIDs.append(int(id))
            all_channel_numbers.append(channel_id)
            all_brain_area.append(brain_area)

            unit_id += 1

    nwbfile.add_electrode_column(
        name='origChannel',
        description='The original channel ID for the channel')

    #Add Device
    device = nwbfile.create_device(name=signalSystem)

    # Add Electrodes (brain Area Locations, MNI coordinates for microwires)
    length_all_spike_cluster_ids = len(all_spike_cluster_ids)
    for electrodeNumber in range(0, len(channel_ids)):

        brainArea_location = extra_brain_area(brain_area_mat,
                                              channel_ids[electrodeNumber])

        if brainArea_location == 'RH':  #  Right Hippocampus
            full_brainArea_Location = 'Right Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            #Add Electrode
            nwbfile.add_electrode([channel_ids[electrodeNumber]],
                                  x=RH_x,
                                  y=RH_y,
                                  z=RH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

        if brainArea_location == 'LH':
            full_brainArea_Location = 'Left Hippocampus'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LH_x,
                                  y=LH_y,
                                  z=LH_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'RA':
            full_brainArea_Location = 'Right Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=RA_x,
                                  y=RA_y,
                                  z=RA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])
        if brainArea_location == 'LA':
            full_brainArea_Location = 'Left Amygdala'

            electrode_name = '{}-microwires-{}'.format(
                signalSystem, channel_ids[electrodeNumber])
            description = "Behnke Fried/Micro Inner Wire Bundle (Behnke-Fried BF08R-SP05X-000 and WB09R-SP00X-0B6; Ad-Tech Medical)"
            location = full_brainArea_Location

            # Add electrode group
            electrode_group = nwbfile.create_electrode_group(
                electrode_name,
                description=description,
                location=location,
                device=device)

            nwbfile.add_electrode([all_channel_id[electrodeNumber]],
                                  x=LA_x,
                                  y=LA_y,
                                  z=LA_z,
                                  imp=np.nan,
                                  location=full_brainArea_Location,
                                  filtering='300-3000Hz',
                                  group=electrode_group,
                                  origChannel=channel_ids[electrodeNumber])

    # Create Channel list index
    channel_list = list(range(0, length_all_spike_cluster_ids))
    unique_channel_ids = np.unique(all_channel_id)
    length_ChannelIds = len(np.unique(all_channel_id))
    for yy in range(0, length_ChannelIds):
        a = np.array(np.where(unique_channel_ids[yy] == all_channel_id))
        b = a[0]
        c = b.tolist()
        for i in c:
            channel_list[i] = yy

    #Add WAVEFORM Sampling RATE
    waveform_mean_sampling_rate = [98.4 * 10**3]
    waveform_mean_sampling_rate_matrix = [waveform_mean_sampling_rate
                                          ] * (length_all_spike_cluster_ids)

    # Add Units to NWB file
    for index_id in range(0, length_all_spike_cluster_ids):
        nwbfile.add_unit(
            id=index_id,
            spike_times=all_selected_time_stamps[index_id],
            origClusterID=all_oriClusterIDs[index_id],
            IsolationDist=all_IsolDist[index_id],
            SNR=all_SNR[index_id],
            waveform_mean_encoding=all_selected_mean_waveform_learn[index_id],
            waveform_mean_recognition=all_selected_mean_waveform_recog[
                index_id],
            electrodes=[channel_list[index_id]],
            waveform_mean_sampling_rate=waveform_mean_sampling_rate_matrix[
                index_id])

    return nwbfile
Beispiel #22
0
def chang2nwb(blockpath, out_file_path=None, save_to_file=False, htk_config=None):
    """
    Parameters
    ----------
    blockpath: str
    out_file_path: None | str
        if None, output = [blockpath]/[blockname].nwb
    save_to_file : bool
        If True, saves to file. If False, just returns nwbfile object
    htk_config : dict
        Dictionary cotaining HTK conversion paths and options. Example:
        {
            ecephys_path: 'path_to/ecephys_htk_files',
            ecephys_type: 'raw', 'preprocessed' or 'high_gamma',
            analog_path: 'path_to/analog_htk_files',
            anin1: {present: True, name: 'microphone', type: 'acquisition'},
            anin2: {present: True, name: 'speaker1', type: 'stimulus'},
            anin3: {present: False, name: 'speaker2', type: 'stimulus'},
            anin4: {present: False, name: 'custom', type: 'acquisition'},
            metadata: metadata,
            electrodes_file: electrodes_file,
            bipolar_file: bipolar_file
        }

    Returns
    -------
    """

    metadata = {}

    if htk_config is None:
        blockpath = Path(blockpath)
    else:
        blockpath = Path(htk_config['ecephys_path'])
        metadata = htk_config['metadata']
    blockname = blockpath.parent.name
    subject_id = blockpath.parent.parent.name[2:]

    if out_file_path is None:
        out_file_path = blockpath.resolve().parent / ''.join(['EC', subject_id, '_', blockname, '.nwb'])

    # file paths
    ecog_path = blockpath
    anin_path = htk_config['analog_path']
    bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')

    # Create the NWB file object
    nwbfile_dict = {
        'session_description': blockname,
        'identifier': blockname,
        'session_start_time': datetime.now().astimezone(),
        'institution': 'University of California, San Francisco',
        'lab': 'Chang Lab'
    }
    if 'NWBFile' in metadata:
        nwbfile_dict.update(metadata['NWBFile'])
    nwbfile = NWBFile(**nwbfile_dict)

    # Read electrophysiology data from HTK files
    print('reading htk acquisition...', flush=True)
    ecog_rate, data = readhtks(ecog_path)
    data = data.squeeze()
    print('done', flush=True)

    # Get electrodes info from mat file
    if htk_config['electrodes_file'] is not None:
        nwbfile = elecs_to_electrode_table(
            nwbfile=nwbfile,
            elecspath=htk_config['electrodes_file'],
        )
        n_electrodes = nwbfile.electrodes[:].shape[0]
        all_elecs = list(range(n_electrodes))
        elecs_region = nwbfile.create_electrode_table_region(
            region=all_elecs,
            description='ECoG electrodes on brain'
        )
    else:
        ecephys_dict = {
            'Device': [{'name': 'auto_device'}],
            'ElectricalSeries': [{'name': 'ECoG', 'description': 'description'}],
            'ElectrodeGroup': [{'name': 'auto_group', 'description': 'auto_group',
                                'location': 'location', 'device': 'auto_device'}]
        }
        if 'Ecephys' in metadata:
            ecephys_dict.update(metadata['Ecephys'])

        # Create devices
        for dev in ecephys_dict['Device']:
            device = nwbfile.create_device(dev['name'])

        # Electrode groups
        for el_grp in ecephys_dict['ElectrodeGroup']:
            device = nwbfile.devices[el_grp['device']]
            electrode_group = nwbfile.create_electrode_group(
                name=el_grp['name'],
                description=el_grp['description'],
                location=el_grp['location'],
                device=device
            )

        # Electrodes table
        n_electrodes = data.shape[1]
        nwbfile.add_electrode_column('label', 'label of electrode')
        nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')
        nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')
        nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')
        nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')
        nwbfile.add_electrode_column('null', 'if not connected to real electrode')
        bad_elecs_inds = get_bad_elecs(blockpath)
        for elec_counter in range(n_electrodes):
            bad = elec_counter in bad_elecs_inds
            nwbfile.add_electrode(
                id=elec_counter,
                x=np.nan,
                y=np.nan,
                z=np.nan,
                imp=np.nan,
                x_warped=np.nan,
                y_warped=np.nan,
                z_warped=np.nan,
                location='',
                filtering='none',
                group=electrode_group,
                label='',
                bad=bad,
                null=False,
            )

        all_elecs = list(range(n_electrodes))
        elecs_region = nwbfile.create_electrode_table_region(
            region=all_elecs,
            description='ECoG electrodes on brain'
        )

    # Get Bipolar table from file
    if htk_config['bipolar_file'] is not None:
        df = pd.read_csv(htk_config['bipolar_file'], index_col='id', sep='\t')

        # Create bipolar scheme table
        bipolar_scheme_table = BipolarSchemeTable(
            name='bipolar_scheme_table',
            description='desc'
        )

        # Columns for bipolar scheme - all anodes and cathodes within the same
        # bipolar row are considered to have the same group and location
        bipolar_scheme_table.add_column(
            name='group_name',
            description='electrode group name'
        )
        bipolar_scheme_table.add_column(
            name='location',
            description='electrode location'
        )

        # Iterate over anode / cathode rows
        for i, r in df.iterrows():
            if isinstance(r['anodes'], str):
                anodes = [int(a) for a in r['anodes'].split(',')]
            else:
                anodes = [int(r['anodes'])]
            if isinstance(r['cathodes'], str):
                cathodes = [int(a) for a in r['cathodes'].split(',')]
            else:
                cathodes = [int(r['cathodes'])]
            bipolar_scheme_table.add_row(
                anodes=anodes,
                cathodes=cathodes,
                group_name=nwbfile.electrodes['group_name'][anodes[0]],
                location=nwbfile.electrodes['location'][anodes[0]]
            )

        bipolar_scheme_table.anodes.table = nwbfile.electrodes
        bipolar_scheme_table.cathodes.table = nwbfile.electrodes

        # Creates bipolar table region
        elecs_region = DynamicTableRegion(
            name='electrodes',
            data=np.arange(0, df.shape[0]),
            description='desc',
            table=bipolar_scheme_table
        )

        ecephys_ext = EcephysExt(name='ecephys_ext')
        ecephys_ext.bipolar_scheme_table = bipolar_scheme_table
        nwbfile.add_lab_meta_data(ecephys_ext)

    # Stores HTK electrophysiology data as raw, preprocessed or high gamma
    if htk_config['ecephys_type'] == 'raw':
        ecog_es = ElectricalSeries(name='ECoG',
                                   data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
                                   electrodes=elecs_region,
                                   rate=ecog_rate,
                                   description='all Wav data')
        nwbfile.add_acquisition(ecog_es)
    elif htk_config['ecephys_type'] == 'preprocessed':
        lfp = LFP()
        ecog_es = ElectricalSeries(name='preprocessed',
                                   data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
                                   electrodes=elecs_region,
                                   rate=ecog_rate,
                                   description='all Wav data')
        lfp.add_electrical_series(ecog_es)
        # Creates the ecephys processing module
        ecephys_module = nwbfile.create_processing_module(
            name='ecephys',
            description='preprocessed electrophysiology data'
        )
        ecephys_module.add_data_interface(lfp)
    elif htk_config['ecephys_type'] == 'high_gamma':
        ecog_es = ElectricalSeries(name='high_gamma',
                                   data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
                                   electrodes=elecs_region,
                                   rate=ecog_rate,
                                   description='all Wav data')
        # Creates the ecephys processing module
        ecephys_module = nwbfile.create_processing_module(
            name='ecephys',
            description='preprocessed electrophysiology data'
        )
        ecephys_module.add_data_interface(ecog_es)

    # Add ANIN 1
    if htk_config['anin1']['present']:
        fs, data = get_analog(anin_path, 1)
        ts = TimeSeries(
            name=htk_config['anin1']['name'],
            data=data,
            unit='NA',
            rate=fs,
        )
        if htk_config['anin1']['type'] == 'acquisition':
            nwbfile.add_acquisition(ts)
        else:
            nwbfile.add_stimulus(ts)
        print('ANIN1 saved with name "', htk_config['anin1']['name'], '" in ',
              htk_config['anin1']['type'])

    # Add ANIN 2
    if htk_config['anin2']['present']:
        fs, data = get_analog(anin_path, 2)
        ts = TimeSeries(
            name=htk_config['anin2']['name'],
            data=data,
            unit='NA',
            rate=fs,
        )
        if htk_config['anin2']['type'] == 'acquisition':
            nwbfile.add_acquisition(ts)
        else:
            nwbfile.add_stimulus(ts)
        print('ANIN2 saved with name "', htk_config['anin2']['name'], '" in ',
              htk_config['anin2']['type'])

    # Add ANIN 3
    if htk_config['anin3']['present']:
        fs, data = get_analog(anin_path, 3)
        ts = TimeSeries(
            name=htk_config['anin3']['name'],
            data=data,
            unit='NA',
            rate=fs,
        )
        if htk_config['anin3']['type'] == 'acquisition':
            nwbfile.add_acquisition(ts)
        else:
            nwbfile.add_stimulus(ts)
        print('ANIN3 saved with name "', htk_config['anin3']['name'], '" in ',
              htk_config['anin3']['type'])

    # Add ANIN 4
    if htk_config['anin4']['present']:
        fs, data = get_analog(anin_path, 4)
        ts = TimeSeries(
            name=htk_config['anin4']['name'],
            data=data,
            unit='NA',
            rate=fs,
        )
        if htk_config['anin4']['type'] == 'acquisition':
            nwbfile.add_acquisition(ts)
        else:
            nwbfile.add_stimulus(ts)
        print('ANIN4 saved with name "', htk_config['anin4']['name'], '" in ',
              htk_config['anin4']['type'])

    # Add bad time segments
    if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:
        bad_time = sio.loadmat(bad_time_file)['badTimeSegments']
        for row in bad_time:
            nwbfile.add_invalid_time_interval(start_time=row[0],
                                              stop_time=row[1],
                                              tags=('ECoG artifact',),
                                              timeseries=ecog_es)

    # Subject
    subject_dict = {'subject_id': subject_id}
    if 'Subject' in metadata:
        subject_dict.update(metadata['Subject'])
    subject = ECoGSubject(**subject_dict)
    nwbfile.subject = subject

    if save_to_file:
        print('Saving HTK content to NWB file...')
        # Export the NWB file
        with NWBHDF5IO(str(out_file_path), manager=manager, mode='w') as io:
            io.write(nwbfile)

        # read check
        with NWBHDF5IO(str(out_file_path), manager=manager, mode='r') as io:
            io.read()
        print('NWB file saved: ', str(out_file_path))

    return nwbfile, out_file_path, subject_id, blockname
Beispiel #23
0
# :py:class:`~pynwb.icephys.CurrentClampStimulusSeries`--, and three classes for representing response
# data--:py:class:`~pynwb.icephys.VoltageClampSeries`,
# :py:class:`~pynwb.icephys.VoltageClampSeries`, :py:class:`~pynwb.icephys.CurrentClampSeries`, and
# :py:class:`~pynwb.icephys.IZeroClampSeries`.
#
# Here, we will use :py:class:`~pynwb.icephys.CurrentClampStimulusSeries` to store current clamp stimulus
# data and then add it to our NWBFile as stimulus data using the :py:class:`~pynwb.file.NWBFile` method
# :py:meth:`~pynwb.file.NWBFile.add_stimulus`.

from pynwb.icephys import CurrentClampStimulusSeries

ccss = CurrentClampStimulusSeries(
    name="ccss", source="command", data=[1, 2, 3, 4, 5], unit='A',
    starting_time=123.6, rate=10e3, electrode=elec, gain=0.02)

nwbfile.add_stimulus(ccss)

# Here, we will use :py:class:`~pynwb.icephys.VoltageClampSeries` to store voltage clamp
# data and then add it to our NWBFile as acquired data using the :py:class:`~pynwb.file.NWBFile` method
# :py:meth:`~pynwb.file.NWBFile.add_acquisition`.

from pynwb.icephys import VoltageClampSeries

vcs = VoltageClampSeries(
    name='vcs', source="command", data=[0.1, 0.2, 0.3, 0.4, 0.5],
    unit='A', conversion=1e-12, resolution=np.nan, starting_time=123.6, rate=20e3,
    electrode=elec, gain=0.02, capacitance_slow=100e-12, resistance_comp_correction=70.0,
    capacitance_fast=np.nan, resistance_comp_bandwidth=np.nan, resistance_comp_prediction=np.nan,
    whole_cell_capacitance_comp=np.nan, whole_cell_series_resistance_comp=np.nan)

nwbfile.add_acquisition(vcs)
Beispiel #24
0
    def run_conversion(
        self,
        nwbfile: NWBFile,
        metadata: dict,
        stub_test: bool = False,
    ):
        session_path = Path(self.source_data["folder_path"])
        session_id = session_path.name

        # Stimuli
        [
            nwbfile.add_stimulus(x) for x in get_events(
                session_path=session_path,
                suffixes=[".lrw.evt", ".puf.evt", ".rip.evt", ".rrw.evt"])
        ]

        # Epochs
        df = pd.read_csv(session_path / f"{session_id}.cat.evt",
                         sep=" ",
                         names=("time", "begin_or_end", "of", "epoch_name"))
        epoch_starts = []
        for j in range(int(len(df) / 2)):
            epoch_starts.append(df["time"][2 * j])
            nwbfile.add_epoch(start_time=epoch_starts[j],
                              stop_time=df["time"][2 * j + 1],
                              tags=[df["epoch_name"][2 * j][18:]])

        # Trials
        trialdata_path = session_path / f"{session_id}-TrackRunTimes.mat"
        if trialdata_path.is_file():
            trials_data = loadmat(trialdata_path)["trackruntimes"]
            for trial_data in trials_data:
                nwbfile.add_trial(start_time=trial_data[0],
                                  stop_time=trial_data[1])

        # .whl position
        whl_files = []
        for whl_file in whl_files:
            add_position_data(nwbfile=nwbfile,
                              session_path=session_path,
                              whl_file_path=whl_file,
                              starting_time=epoch_starts[j])

        # States
        sleep_state_fpath = session_path / f"{session_id}.SleepState.states.mat"
        # label renaming
        state_label_names = dict(WAKEstate="Awake",
                                 NREMstate="Non-REM",
                                 REMstate="REM")
        if sleep_state_fpath.is_file():
            matin = loadmat(sleep_state_fpath)["SleepState"]["ints"][0][0]

            table = TimeIntervals(name="states",
                                  description="Sleep states of animal.")
            table.add_column(name="label", description="Sleep state.")

            data = []
            for name in matin.dtype.names:
                for row in matin[name][0][0]:
                    data.append(
                        dict(start_time=row[0],
                             stop_time=row[1],
                             label=state_label_names[name]))
            [
                table.add_row(**row)
                for row in sorted(data, key=lambda x: x["start_time"])
            ]
            check_module(nwbfile, "behavior",
                         "Contains behavioral data.").add(table)
Beispiel #25
0
class Alyx2NWBConverter:
    def __init__(self,
                 saveloc=None,
                 nwb_metadata_file=None,
                 metadata_obj: Alyx2NWBMetadata = None,
                 one_object: ONE = None,
                 save_raw=False,
                 save_camera_raw=False,
                 complevel=4,
                 shuffle=False,
                 buffer_size=1):
        """
        Retrieve all Alyx session, subject metadata, raw data for eid using the one apis load method
        Map that to nwb supported datatypes and create an nwb file.
        Parameters
        ----------
        saveloc: str, Path
            save location of nwbfile
        nwb_metadata_file: [dict, str]
            output of Alyx2NWBMetadata as a dict/json location str
        metadata_obj: Alyx2NWBMetadata
        one_object: ONE()
        save_raw: bool
            will load and save large raw files: ecephys.raw.ap/lf.cbin to nwb
        save_camera_raw: bool
            will load and save mice camera movie .mp4: _iblrig_Camera.raw
        complevel: int
            level of compression to apply to raw datasets
            (0-9)>(low,high). https://docs.h5py.org/en/latest/high/dataset.html
        shuffle: bool
            Enable shuffle I/O filter. http://docs.h5py.org/en/latest/high/dataset.html#dataset-shuffle
        """
        self.buffer_size = buffer_size
        self.complevel = complevel
        self.shuffle = shuffle
        if nwb_metadata_file is not None:
            if isinstance(nwb_metadata_file, dict):
                self.nwb_metadata = nwb_metadata_file
            elif isinstance(nwb_metadata_file, str):
                with open(nwb_metadata_file, 'r') as f:
                    self.nwb_metadata = json.load(f)
        elif metadata_obj is not None:
            self.nwb_metadata = metadata_obj.complete_metadata
        else:
            raise Exception(
                'required one of argument: nwb_metadata_file OR metadata_obj')
        if one_object is not None:
            self.one_object = one_object
        elif metadata_obj is not None:
            self.one_object = metadata_obj.one_obj
        else:
            Warning('creating a ONE object and continuing')
            self.one_object = ONE()
        if saveloc is None:
            Warning('saving nwb file in current working directory')
            self.saveloc = str(Path.cwd())
        else:
            self.saveloc = str(saveloc)
        self.eid = self.nwb_metadata["eid"]
        if not isinstance(self.nwb_metadata['NWBFile']['session_start_time'],
                          datetime):
            self.nwb_metadata['NWBFile']['session_start_time'] = \
                datetime.strptime(self.nwb_metadata['NWBFile']['session_start_time'], '%Y-%m-%dT%X').replace(
                    tzinfo=pytz.utc)
            self.nwb_metadata['IBLSubject']['date_of_birth'] = \
                datetime.strptime(self.nwb_metadata['IBLSubject']['date_of_birth'], '%Y-%m-%dT%X').replace(
                    tzinfo=pytz.utc)
        # create nwbfile:
        self.initialize_nwbfile()
        self.no_probes = len(self.nwb_metadata['Probes'])
        if self.no_probes == 0:
            warnings.warn(
                'could not find probe information, will create trials, behavior, acquisition'
            )
        self.electrode_table_exist = False
        self._one_data = _OneData(self.one_object,
                                  self.eid,
                                  self.no_probes,
                                  self.nwb_metadata,
                                  save_raw=save_raw,
                                  save_camera_raw=save_camera_raw)

    def initialize_nwbfile(self):
        """
        Creates self.nwbfile, devices and electrode group of nwb file.
        """
        nwbfile_args = dict(identifier=str(uuid.uuid4()), )
        nwbfile_args.update(**self.nwb_metadata['NWBFile'])
        self.nwbfile = NWBFile(**nwbfile_args)
        # create devices
        [
            self.nwbfile.create_device(**idevice_meta)
            for idevice_meta in self.nwb_metadata['Ecephys']['Device']
        ]
        if 'ElectrodeGroup' in self.nwb_metadata['Ecephys']:
            self.create_electrode_groups(self.nwb_metadata['Ecephys'])

    def create_electrode_groups(self, metadata_ecephys):
        """
        This method is called at __init__.
        Use metadata to create ElectrodeGroup object(s) in the NWBFile

        Parameters
        ----------
        metadata_ecephys : dict
            Dict with key:value pairs for defining the Ecephys group from where this
            ElectrodeGroup belongs. This should contain keys for required groups
            such as 'Device', 'ElectrodeGroup', etc.
        """
        for metadata_elec_group in metadata_ecephys['ElectrodeGroup']:
            eg_name = metadata_elec_group['name']
            # Tests if ElectrodeGroup already exists
            aux = [i.name == eg_name for i in self.nwbfile.children]
            if any(aux):
                print(eg_name + ' already exists in current NWBFile.')
            else:
                device_name = metadata_elec_group['device']
                if device_name in self.nwbfile.devices:
                    device = self.nwbfile.devices[device_name]
                else:
                    print('Device ', device_name, ' for ElectrodeGroup ',
                          eg_name, ' does not exist.')
                    print('Make sure ', device_name,
                          ' is defined in metadata.')

                eg_description = metadata_elec_group['description']
                eg_location = metadata_elec_group['location']
                self.nwbfile.create_electrode_group(name=eg_name,
                                                    location=eg_location,
                                                    device=device,
                                                    description=eg_description)

    def check_module(self, name, description=None):
        """
        Check if processing module exists. If not, create it. Then return module

        Parameters
        ----------
        name: str
        description: str | None (optional)

        Returns
        -------
        pynwb.module

        """

        if name in self.nwbfile.processing:
            return self.nwbfile.processing[name]
        else:
            if description is None:
                description = name
            return self.nwbfile.create_processing_module(name, description)

    def create_stimulus(self):
        """
        Creates stimulus data in nwbfile
        """
        stimulus_list = self._get_data(
            self.nwb_metadata['Stimulus'].get('time_series'))
        for i in stimulus_list:
            self.nwbfile.add_stimulus(pynwb.TimeSeries(**i))

    def create_units(self):
        """
        Units table in nwbfile
        """
        if self.no_probes == 0:
            return
        if not self.electrode_table_exist:
            self.create_electrode_table_ecephys()
        unit_table_list = self._get_data(self.nwb_metadata['Units'])
        # no required arguments for units table. Below are default columns in the table.
        default_args = [
            'id', 'waveform_mean', 'electrodes', 'electrode_group',
            'spike_times', 'obs_intervals'
        ]
        default_ids = _get_default_column_ids(
            default_args, [i['name'] for i in unit_table_list])
        if len(default_ids) != len(default_args):
            warnings.warn(f'could not find all of {default_args} clusters')
        non_default_ids = list(
            set(range(len(unit_table_list))).difference(set(default_ids)))
        default_dict = {
            unit_table_list[id]['name']: unit_table_list[id]['data']
            for id in default_ids
        }
        for cluster_no in range(len(unit_table_list[0]['data'])):
            add_dict = dict()
            for ibl_dataset_name in default_dict:
                if ibl_dataset_name == 'electrodes':
                    add_dict.update({
                        ibl_dataset_name:
                        [default_dict[ibl_dataset_name][cluster_no]]
                    })
                if ibl_dataset_name == 'spike_times':
                    add_dict.update({
                        ibl_dataset_name:
                        default_dict[ibl_dataset_name][cluster_no]
                    })
                elif ibl_dataset_name == 'obs_intervals':  # common across all clusters
                    add_dict.update(
                        {ibl_dataset_name: default_dict[ibl_dataset_name]})
                elif ibl_dataset_name == 'electrode_group':
                    add_dict.update({
                        ibl_dataset_name:
                        self.nwbfile.electrode_groups[self.nwb_metadata[
                            'Probes'][default_dict[ibl_dataset_name]
                                      [cluster_no]]['name']]
                    })
                elif ibl_dataset_name == 'id':
                    if cluster_no >= self._one_data.data_attrs_dump[
                            'unit_table_length'][0]:
                        add_dict.update({
                            ibl_dataset_name:
                            default_dict[ibl_dataset_name][cluster_no] +
                            self._one_data.data_attrs_dump['unit_table_length']
                            [0]
                        })
                    else:
                        add_dict.update({
                            ibl_dataset_name:
                            default_dict[ibl_dataset_name][cluster_no]
                        })
                elif ibl_dataset_name == 'waveform_mean':
                    add_dict.update({
                        ibl_dataset_name:
                        np.mean(default_dict[ibl_dataset_name][cluster_no],
                                axis=1)
                    })  # finding the mean along all the channels of the sluter
            self.nwbfile.add_unit(**add_dict)

        for id in non_default_ids:
            if isinstance(unit_table_list[id]['data'], object):
                unit_table_list[id]['data'] = unit_table_list[id][
                    'data'].tolist()  # convert string numpy
            self.nwbfile.add_unit_column(
                name=unit_table_list[id]['name'],
                description=unit_table_list[id]['description'],
                data=unit_table_list[id]['data'])

    def create_electrode_table_ecephys(self):
        """
        Creates electrode table
        """
        if self.no_probes == 0:
            return
        if self.electrode_table_exist:
            pass
        electrode_table_list = self._get_data(
            self.nwb_metadata['ElectrodeTable'])
        # electrode table has required arguments:
        required_args = ['group', 'x', 'y']
        default_ids = _get_default_column_ids(
            required_args, [i['name'] for i in electrode_table_list])
        non_default_ids = list(
            set(range(len(electrode_table_list))).difference(set(default_ids)))
        default_dict = {
            electrode_table_list[id]['name']: electrode_table_list[id]['data']
            for id in default_ids
        }
        if 'group' in default_dict:
            group_labels = default_dict['group']
        else:  # else fill with probe zero data.
            group_labels = np.concatenate([
                np.ones(self._one_data.
                        data_attrs_dump['electrode_table_length'][i],
                        dtype=int) * i for i in range(self.no_probes)
            ])
        for electrode_no in range(len(electrode_table_list[0]['data'])):
            if 'x' in default_dict:
                x = default_dict['x'][electrode_no][0]
                y = default_dict['y'][electrode_no][1]
            else:
                x = float('NaN')
                y = float('NaN')
            group_data = self.nwbfile.electrode_groups[self.nwb_metadata[
                'Probes'][group_labels[electrode_no]]['name']]
            self.nwbfile.add_electrode(x=x,
                                       y=y,
                                       z=float('NaN'),
                                       imp=float('NaN'),
                                       location='None',
                                       group=group_data,
                                       filtering='none')
        for id in non_default_ids:
            self.nwbfile.add_electrode_column(
                name=electrode_table_list[id]['name'],
                description=electrode_table_list[id]['description'],
                data=electrode_table_list[id]['data'])
        # create probes specific DynamicTableRegion:
        self.probe_dt_region = [
            self.nwbfile.create_electrode_table_region(region=list(
                range(self._one_data.data_attrs_dump['electrode_table_length']
                      [j])),
                                                       description=i['name'])
            for j, i in enumerate(self.nwb_metadata['Probes'])
        ]
        self.probe_dt_region_all = self.nwbfile.create_electrode_table_region(
            region=list(
                range(
                    sum(self._one_data.
                        data_attrs_dump['electrode_table_length']))),
            description='AllProbes')
        self.electrode_table_exist = True

    def create_timeseries_ecephys(self):
        """
        create SpikeEventSeries, ElectricalSeries, Spectrum datatypes within nwbfile>processing>ecephys
        """
        if self.no_probes == 0:
            return
        if not self.electrode_table_exist:
            self.create_electrode_table_ecephys()
        if 'ecephys' not in self.nwbfile.processing:
            mod = self.nwbfile.create_processing_module(
                'ecephys', 'Processed electrophysiology data of IBL')
        else:
            mod = self.nwbfile.get_processing_module('ecephys')
        for neurodata_type_name, neurodata_type_args_list in self.nwb_metadata[
                'Ecephys']['Ecephys'].items():
            data_retrieved_args_list = self._get_data(
                neurodata_type_args_list
            )  # list of dicts with keys as argument names
            for no, neurodata_type_args in enumerate(data_retrieved_args_list):
                ibl_dataset_name = neurodata_type_args_list[no]['data']
                if 'ElectricalSeries' in neurodata_type_name:
                    timestamps_names = self._one_data.data_attrs_dump[
                        '_iblqc_ephysTimeRms.timestamps']
                    data_names = self._one_data.data_attrs_dump[
                        '_iblqc_ephysTimeRms.rms']
                    for data_idx, data in enumerate(
                            neurodata_type_args['data']):
                        probe_no = [
                            j for j in range(self.no_probes)
                            if self.nwb_metadata['Probes'][j]['name'] in
                            data_names[data_idx]
                        ][0]
                        if data.shape[1] > self._one_data.data_attrs_dump[
                                'electrode_table_length'][probe_no]:
                            if 'channels.rawInd' in self._one_data.loaded_datasets:
                                channel_idx = self._one_data.loaded_datasets[
                                    'channels.rawInd'][probe_no].data.astype(
                                        'int')
                            else:
                                warnings.warn('could not find channels.rawInd')
                                break
                        else:
                            channel_idx = slice(None)
                        mod.add(
                            ElectricalSeries(
                                name=data_names[data_idx],
                                description=neurodata_type_args['description'],
                                timestamps=neurodata_type_args['timestamps']
                                [timestamps_names.index(data_names[data_idx])],
                                data=data[:, channel_idx],
                                electrodes=self.probe_dt_region[probe_no]))
                elif 'Spectrum' in neurodata_type_name:
                    if ibl_dataset_name in '_iblqc_ephysSpectralDensity.power':
                        freqs_names = self._one_data.data_attrs_dump[
                            '_iblqc_ephysSpectralDensity.freqs']
                        data_names = self._one_data.data_attrs_dump[
                            '_iblqc_ephysSpectralDensity.power']
                        for data_idx, data in enumerate(
                                neurodata_type_args['data']):
                            mod.add(
                                Spectrum(name=data_names[data_idx],
                                         frequencies=neurodata_type_args[
                                             'frequencies'][freqs_names.index(
                                                 data_names[data_idx])],
                                         power=data))
                elif 'SpikeEventSeries' in neurodata_type_name:
                    neurodata_type_args.update(
                        dict(electrodes=self.probe_dt_region_all))
                    mod.add(
                        pynwb.ecephys.SpikeEventSeries(**neurodata_type_args))

    def create_behavior(self):
        """
        Create behavior processing module
        """
        self.check_module('behavior')
        for behavior_datatype in self.nwb_metadata['Behavior']:
            if behavior_datatype == 'Position':
                position_cont = pynwb.behavior.Position()
                time_series_list_details = self._get_data(
                    self.nwb_metadata['Behavior'][behavior_datatype]
                    ['spatial_series'])
                if len(time_series_list_details) == 0:
                    continue
                # rate_list = [150.0,60.0,60.0] # based on the google doc for _iblrig_body/left/rightCamera.raw,
                dataname_list = self._one_data.data_attrs_dump['camera.dlc']
                data_list = time_series_list_details[0]['data']
                timestamps_list = time_series_list_details[0]['timestamps']
                for dataname, data, timestamps in zip(dataname_list, data_list,
                                                      timestamps_list):
                    colnames = data.columns
                    data_np = data.to_numpy()
                    x_column_ids = [
                        n for n, k in enumerate(colnames) if 'x' in k
                    ]
                    for x_column_id in x_column_ids:
                        data_loop = data_np[:, x_column_id:x_column_id + 2]
                        position_cont.create_spatial_series(
                            name=dataname + colnames[x_column_id][:-2],
                            data=data_loop,
                            reference_frame='none',
                            timestamps=timestamps,
                            conversion=1e-3)
                self.nwbfile.processing['behavior'].add(position_cont)
            elif not (behavior_datatype == 'BehavioralEpochs'):
                time_series_func = pynwb.TimeSeries
                time_series_list_details = self._get_data(
                    self.nwb_metadata['Behavior'][behavior_datatype]
                    ['time_series'])
                if len(time_series_list_details) == 0:
                    continue
                time_series_list_obj = []
                for i in time_series_list_details:
                    unit = 'radians/sec' if 'velocity' in i[
                        'name'] else 'radians'
                    time_series_list_obj.append(
                        time_series_func(**i, unit=unit))
                func = getattr(pynwb.behavior, behavior_datatype)
                self.nwbfile.processing['behavior'].add(
                    func(time_series=time_series_list_obj))
            else:
                time_series_func = pynwb.epoch.TimeIntervals
                time_series_list_details = self._get_data(
                    self.nwb_metadata['Behavior'][behavior_datatype]
                    ['time_intervals'])
                if len(time_series_list_details) == 0:
                    continue
                for k in time_series_list_details:
                    time_intervals = time_series_func('BehavioralEpochs')
                    for time_interval in k['timestamps']:
                        time_intervals.add_interval(
                            start_time=time_interval[0],
                            stop_time=time_interval[1])
                    time_intervals.add_column(k['name'],
                                              k['description'],
                                              data=k['data'])
                    self.nwbfile.processing['behavior'].add(time_intervals)

    def create_acquisition(self):
        """
        Acquisition data like audiospectrogram(raw beh data), nidq(raw ephys data), raw camera data.
        These are independent of probe type.
        """
        for neurodata_type_name, neurodata_type_args_list in self.nwb_metadata[
                'Acquisition'].items():
            data_retrieved_args_list = self._get_data(neurodata_type_args_list)
            for neurodata_type_args in data_retrieved_args_list:
                if neurodata_type_name == 'ImageSeries':
                    for types, times in zip(neurodata_type_args['data'],
                                            neurodata_type_args['timestamps']):
                        customargs = dict(name='camera_raw',
                                          external_file=[str(types)],
                                          format='external',
                                          timestamps=times,
                                          unit='n.a.')
                        self.nwbfile.add_acquisition(ImageSeries(**customargs))
                elif neurodata_type_name == 'DecompositionSeries':
                    neurodata_type_args['bands'] = np.squeeze(
                        neurodata_type_args['bands'])
                    freqs = DynamicTable(
                        'bands',
                        'spectogram frequencies',
                        id=np.arange(neurodata_type_args['bands'].shape[0]))
                    freqs.add_column('freq',
                                     'frequency value',
                                     data=neurodata_type_args['bands'])
                    neurodata_type_args.update(dict(bands=freqs))
                    temp = neurodata_type_args['data'][:, :, np.newaxis]
                    neurodata_type_args['data'] = np.moveaxis(
                        temp, [0, 1, 2], [0, 2, 1])
                    ts = neurodata_type_args.pop('timestamps')
                    starting_time = ts[0][0] if isinstance(
                        ts[0], np.ndarray) else ts[0]
                    neurodata_type_args.update(
                        dict(starting_time=np.float64(starting_time),
                             rate=1 / np.mean(np.diff(ts.squeeze())),
                             unit='sec'))
                    self.nwbfile.add_acquisition(
                        DecompositionSeries(**neurodata_type_args))
                elif neurodata_type_name == 'ElectricalSeries':
                    if not self.electrode_table_exist:
                        self.create_electrode_table_ecephys()
                    if neurodata_type_args['name'] in ['raw.lf', 'raw.ap']:
                        for probe_no in range(self.no_probes):
                            if neurodata_type_args['data'][probe_no].shape[
                                    1] > self._one_data.data_attrs_dump[
                                        'electrode_table_length'][probe_no]:
                                if 'channels.rawInd' in self._one_data.loaded_datasets:
                                    channel_idx = self._one_data.loaded_datasets[
                                        'channels.rawInd'][
                                            probe_no].data.astype('int')
                                else:
                                    warnings.warn(
                                        'could not find channels.rawInd')
                                    break
                            else:
                                channel_idx = slice(None)
                            self.nwbfile.add_acquisition(
                                ElectricalSeries(
                                    name=neurodata_type_args['name'] + '_' +
                                    self.nwb_metadata['Probes'][probe_no]
                                    ['name'],
                                    starting_time=np.abs(
                                        np.round(
                                            neurodata_type_args['timestamps']
                                            [probe_no][0, 1], 2)
                                    ),  # round starting times of the order of 1e-5
                                    rate=neurodata_type_args['data']
                                    [probe_no].fs,
                                    data=H5DataIO(
                                        DataChunkIterator(
                                            _iter_datasetview(
                                                neurodata_type_args['data']
                                                [probe_no],
                                                channel_ids=channel_idx),
                                            buffer_size=self.buffer_size),
                                        compression=True,
                                        shuffle=self.shuffle,
                                        compression_opts=self.complevel),
                                    electrodes=self.probe_dt_region[probe_no],
                                    channel_conversion=neurodata_type_args[
                                        'data']
                                    [probe_no].channel_conversion_sample2v[
                                        neurodata_type_args['data']
                                        [probe_no].type][channel_idx]))
                    elif neurodata_type_args['name'] in ['raw.nidq']:
                        self.nwbfile.add_acquisition(
                            ElectricalSeries(**neurodata_type_args))

    def create_probes(self):
        """
        Fills in all the probes metadata into the custom NeuroPixels extension.
        """
        for i in self.nwb_metadata['Probes']:
            self.nwbfile.add_device(IblProbes(**i))

    def create_iblsubject(self):
        """
        Populates the custom subject extension for IBL mice daata
        """
        self.nwbfile.subject = IblSubject(**self.nwb_metadata['IBLSubject'])

    def create_lab_meta_data(self):
        """
        Populates the custom lab_meta_data extension for IBL sessions data
        """
        self.nwbfile.add_lab_meta_data(
            IblSessionData(**self.nwb_metadata['IBLSessionsData']))

    def create_trials(self):
        table_data = self._get_data(self.nwb_metadata['Trials'])
        required_fields = ['start_time', 'stop_time']
        required_data = [i for i in table_data if i['name'] in required_fields]
        optional_data = [
            i for i in table_data if i['name'] not in required_fields
        ]
        if len(required_fields) != len(required_data):
            warnings.warn(
                'could not find required datasets: trials.start_time, trials.stop_time, '
                'skipping trials table')
            return
        for start_time, stop_time in zip(required_data[0]['data'][:, 0],
                                         required_data[1]['data'][:, 1]):
            self.nwbfile.add_trial(start_time=start_time, stop_time=stop_time)
        for op_data in optional_data:
            if op_data['data'].shape[0] == required_data[0]['data'].shape[0]:
                self.nwbfile.add_trial_column(
                    name=op_data['name'],
                    description=op_data['description'],
                    data=op_data['data'])
            else:
                warnings.warn(
                    f'shape of trials.{op_data["name"]} does not match other trials.* datasets'
                )

    def _get_data(self, sub_metadata):
        """
        Uses OneData class to query ONE datasets on server and download them locally
        Parameters
        ----------
        sub_metadata: [list, dict]
            list of metadata dicts containing a data key with a dataset type string as value to retrieve data from(npy, tsv etc)

        Returns
        -------
        out_dict: dict
            dictionary with actual data loaded in the data field
        """
        include_idx = []
        out_dict_trim = []
        alt_datatypes = ['bands', 'power', 'frequencies', 'timestamps']
        if isinstance(sub_metadata, list):
            out_dict = deepcopy(sub_metadata)
        elif isinstance(sub_metadata, dict):
            out_dict = deepcopy(list(sub_metadata))
        else:
            return []
        req_datatypes = ['data']
        for count, neurodata_type_args in enumerate(out_dict):
            for alt_names in alt_datatypes:
                if neurodata_type_args.get(
                        alt_names
                ):  # in case of Decomposotion series, Spectrum
                    neurodata_type_args[
                        alt_names] = self._one_data.download_dataset(
                            neurodata_type_args[alt_names],
                            neurodata_type_args['name'])
                    req_datatypes.append(alt_names)
            if neurodata_type_args[
                    'name'] == 'id':  # valid in case of units table.
                neurodata_type_args['data'] = self._one_data.download_dataset(
                    neurodata_type_args['data'], 'cluster_id')
            else:
                out_dict[count]['data'] = self._one_data.download_dataset(
                    neurodata_type_args['data'], neurodata_type_args['name'])
            if all([out_dict[count][i] is not None for i in req_datatypes]):
                include_idx.extend([count])
        out_dict_trim.extend([out_dict[j0] for j0 in include_idx])
        return out_dict_trim

    def run_conversion(self):
        """
        Single method to create all datasets and metadata in nwbfile in one go
        Returns
        -------

        """
        execute_list = [
            self.create_stimulus, self.create_trials,
            self.create_electrode_table_ecephys,
            self.create_timeseries_ecephys, self.create_units,
            self.create_behavior, self.create_probes, self.create_iblsubject,
            self.create_lab_meta_data, self.create_acquisition
        ]
        t = tqdm(execute_list)
        for i in t:
            t.set_postfix(current=f'creating nwb ' + i.__name__.split('_')[-1])
            i()
        print('done converting')

    def write_nwb(self, read_check=True):
        """
        After run_conversion(), write nwbfile to disk with the loaded nwbfile
        Parameters
        ----------
        read_check: bool
            Round trip verification
        """
        print('Saving to file, please wait...')
        with NWBHDF5IO(self.saveloc, 'w') as io:
            io.write(self.nwbfile)
            print('File successfully saved at: ', str(self.saveloc))

        if read_check:
            with NWBHDF5IO(self.saveloc, 'r') as io:
                io.read()
                print('Read check: OK')
Beispiel #26
0
def export_to_nwb(session_key,
                  nwb_output_dir=default_nwb_output_dir,
                  save=False,
                  overwrite=True):
    this_session = (acquisition.Session & session_key).fetch1()
    # =============== General ====================
    # -- NWB file - a NWB2.0 file for each session
    nwbfile = NWBFile(session_description=this_session['session_note'],
                      identifier='_'.join([
                          this_session['subject_id'],
                          this_session['session_time'].strftime('%Y-%m-%d'),
                          this_session['session_id']
                      ]),
                      session_start_time=this_session['session_time'],
                      file_create_date=datetime.now(tzlocal()),
                      experimenter='; '.join(
                          (acquisition.Session.Experimenter
                           & session_key).fetch('experimenter')),
                      institution=institution,
                      experiment_description=experiment_description,
                      related_publications=related_publications,
                      keywords=keywords)
    # -- subject
    subj = (subject.Subject & session_key).fetch1()
    nwbfile.subject = pynwb.file.Subject(
        subject_id=this_session['subject_id'],
        description=subj['subject_description'],
        genotype=' x '.join(
            (subject.Subject.Allele & session_key).fetch('allele')),
        sex=subj['sex'],
        species=subj['species'])
    # =============== Intracellular ====================
    cell = ((intracellular.Cell
             & session_key).fetch1() if len(intracellular.Cell
                                            & session_key) == 1 else None)
    if cell:
        # metadata
        whole_cell_device = nwbfile.create_device(name=cell['device_name'])
        ic_electrode = nwbfile.create_ic_electrode(
            name=cell['cell_id'],
            device=whole_cell_device,
            description='N/A',
            filtering='N/A',
            location='; '.join([
                f'{k}: {str(v)}'
                for k, v in dict((reference.BrainLocation & cell).fetch1(),
                                 depth=cell['recording_depth']).items()
            ]))
        # acquisition - membrane potential
        mp, mp_timestamps = (intracellular.MembranePotential & cell).fetch1(
            'membrane_potential', 'membrane_potential_timestamps')
        nwbfile.add_acquisition(
            pynwb.icephys.PatchClampSeries(name='PatchClampSeries',
                                           electrode=ic_electrode,
                                           unit='mV',
                                           conversion=1e-3,
                                           gain=1.0,
                                           data=mp,
                                           timestamps=mp_timestamps))

        # acquisition - spike train
        spk, spk_timestamps = (intracellular.SpikeTrain & cell).fetch1(
            'spike_train', 'spike_timestamps')
        nwbfile.add_acquisition(
            pynwb.icephys.PatchClampSeries(name='SpikeTrain',
                                           electrode=ic_electrode,
                                           unit='a.u.',
                                           conversion=1e1,
                                           gain=1.0,
                                           data=spk,
                                           timestamps=spk_timestamps))

    # =============== Behavior ====================
    behavior_data = ((behavior.Behavior & session_key).fetch1()
                     if len(behavior.Behavior & session_key) == 1 else None)
    if behavior_data:
        behav_acq = pynwb.behavior.BehavioralTimeSeries(name='behavior')
        nwbfile.add_acquisition(behav_acq)
        [behavior_data.pop(k) for k in behavior.Behavior.primary_key]
        timestamps = behavior_data.pop('behavior_timestamps')

        # get behavior data description from the comments of table definition
        behavior_descriptions = {
            attr:
            re.search(f'(?<={attr})(.*)#(.*)',
                      str(behavior.Behavior.heading)).groups()[-1].strip()
            for attr in behavior_data
        }

        for b_k, b_v in behavior_data.items():
            behav_acq.create_timeseries(name=b_k,
                                        description=behavior_descriptions[b_k],
                                        unit='a.u.',
                                        conversion=1.0,
                                        data=b_v,
                                        timestamps=timestamps)

    # =============== Photostimulation ====================
    photostim = ((stimulation.PhotoStimulation
                  & session_key).fetch1() if len(stimulation.PhotoStimulation
                                                 & session_key) == 1 else None)
    if photostim:
        photostim_device = (stimulation.PhotoStimDevice & photostim).fetch1()
        stim_device = nwbfile.create_device(
            name=photostim_device['device_name'])
        stim_site = pynwb.ogen.OptogeneticStimulusSite(
            name='-'.join([photostim['hemisphere'],
                           photostim['brain_region']]),
            device=stim_device,
            excitation_lambda=float(
                (stimulation.PhotoStimulationProtocol
                 & photostim).fetch1('photo_stim_excitation_lambda')),
            location='; '.join([
                f'{k}: {str(v)}' for k, v in (reference.ActionLocation
                                              & photostim).fetch1().items()
            ]),
            description=(stimulation.PhotoStimulationProtocol
                         & photostim).fetch1('photo_stim_notes'))
        nwbfile.add_ogen_site(stim_site)

        if photostim['photostim_timeseries'] is not None:
            nwbfile.add_stimulus(
                pynwb.ogen.OptogeneticSeries(
                    name='_'.join([
                        'photostim_on',
                        photostim['photostim_datetime'].strftime(
                            '%Y-%m-%d_%H-%M-%S')
                    ]),
                    site=stim_site,
                    resolution=0.0,
                    conversion=1e-3,
                    data=photostim['photostim_timeseries'],
                    starting_time=photostim['photostim_start_time'],
                    rate=photostim['photostim_sampling_rate']))

    # =============== TrialSet ====================
    # NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes:
    #                                                                       'id', 'start_time' and 'stop_time'.
    # Other trial-related information needs to be added in to the trial-table as additional columns (with column name
    # and column description)
    if len((acquisition.TrialSet & session_key).fetch()) == 1:
        # Get trial descriptors from TrialSet.Trial and TrialStimInfo - remove '_trial' prefix (if any)
        trial_columns = [{
            'name':
            tag.replace('trial_', ''),
            'description':
            re.search(
                f'(?<={tag})(.*)#(.*)',
                str((acquisition.TrialSet.Trial *
                     stimulation.TrialPhotoStimInfo
                     ).heading)).groups()[-1].strip()
        } for tag in acquisition.TrialSet.Trial.heading.names
                         if tag not in acquisition.TrialSet.Trial.primary_key +
                         ['start_time', 'stop_time']]

        # Trial Events - discard 'trial_start' and 'trial_stop' as we already have start_time and stop_time
        # also add `_time` suffix to all events
        trial_events = set(((acquisition.TrialSet.EventTime & session_key) -
                            [{
                                'trial_event': 'trial_start'
                            }, {
                                'trial_event': 'trial_stop'
                            }]).fetch('trial_event'))
        event_names = [{
            'name': e + '_time',
            'description': d
        } for e, d in zip(*(reference.ExperimentalEvent & [{
            'event': k
        } for k in trial_events]).fetch('event', 'description'))]
        # Add new table columns to nwb trial-table for trial-label
        for c in trial_columns + event_names:
            nwbfile.add_trial_column(**c)

        # Add entry to the trial-table
        for trial in (acquisition.TrialSet.Trial
                      & session_key).fetch(as_dict=True):
            events = dict(
                zip(*(acquisition.TrialSet.EventTime & trial
                      & [{
                          'trial_event': e
                      } for e in trial_events]
                      ).fetch('trial_event', 'event_time')))
            # shift event times to be relative to session_start (currently relative to trial_start)
            events = {k: v + trial['start_time'] for k, v in events.items()}

            trial_tag_value = {**trial, **events}
            # rename 'trial_id' to 'id'
            trial_tag_value['id'] = trial_tag_value['trial_id']
            [
                trial_tag_value.pop(k)
                for k in acquisition.TrialSet.Trial.primary_key
            ]

            # Final tweaks: i) add '_time' suffix and ii) remove 'trial_' prefix
            events = {k + '_time': trial_tag_value.pop(k) for k in events}
            trial_attrs = {
                k.replace('trial_', ''): trial_tag_value.pop(k)
                for k in
                [n for n in trial_tag_value if n.startswith('trial_')]
            }

            nwbfile.add_trial(**trial_tag_value, **events, **trial_attrs)

        # =============== Write NWB 2.0 file ===============
        if save:
            save_file_name = ''.join([nwbfile.identifier, '.nwb'])
            if not os.path.exists(nwb_output_dir):
                os.makedirs(nwb_output_dir)
            if not overwrite and os.path.exists(
                    os.path.join(nwb_output_dir, save_file_name)):
                return nwbfile
            with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name),
                           mode='w') as io:
                io.write(nwbfile)
                print(f'Write NWB 2.0 file: {save_file_name}')

        return nwbfile
Beispiel #27
0
class NWBFileTest(unittest.TestCase):
    def setUp(self):
        self.start = datetime(2017, 5, 1, 12, 0, 0)
        self.path = 'nwbfile_test.h5'
        self.nwbfile = NWBFile(
            'a fake source',
            'a test session description for a test NWBFile',
            'FILE123',
            self.start,
            experimenter='A test experimenter',
            lab='a test lab',
            institution='a test institution',
            experiment_description='a test experiment description',
            session_id='test1')

    def test_constructor(self):
        self.assertEqual(self.nwbfile.session_description,
                         'a test session description for a test NWBFile')
        self.assertEqual(self.nwbfile.identifier, 'FILE123')
        self.assertEqual(self.nwbfile.session_start_time, self.start)
        self.assertEqual(self.nwbfile.lab, 'a test lab')
        self.assertEqual(self.nwbfile.experimenter, 'A test experimenter')
        self.assertEqual(self.nwbfile.institution, 'a test institution')
        self.assertEqual(self.nwbfile.experiment_description,
                         'a test experiment description')
        self.assertEqual(self.nwbfile.session_id, 'test1')

    def test_create_electrode_group(self):
        name = 'example_electrode_group'
        desc = 'An example electrode'
        loc = 'an example location'
        d = Device('a fake device', 'a fake source')
        elecgrp = self.nwbfile.create_electrode_group(name, 'a fake source',
                                                      desc, loc, d)
        self.assertEqual(elecgrp.description, desc)
        self.assertEqual(elecgrp.location, loc)
        self.assertIs(elecgrp.device, d)

    def test_epoch_tags(self):
        tags1 = ['t1', 't2']
        tags2 = ['t3', 't4']
        tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
        ts = TimeSeries("test_ts",
                        "a hypothetical source",
                        list(range(len(tstamps))),
                        'unit',
                        timestamps=tstamps)
        expected_tags = tags1 + tags2
        self.nwbfile.create_epoch('a fake epoch', 0.0, 1.0, tags1, ts)
        self.nwbfile.create_epoch('a second fake epoch', 0.0, 1.0, tags2, ts)
        tags = self.nwbfile.epoch_tags
        six.assertCountEqual(self, expected_tags, tags)

    def test_add_acquisition(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.acquisition), 1)

    def test_add_stimulus(self):
        self.nwbfile.add_stimulus(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus), 1)

    def test_add_stimulus_template(self):
        self.nwbfile.add_stimulus_template(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        self.assertEqual(len(self.nwbfile.stimulus_template), 1)

    def test_add_acquisition_check_dups(self):
        self.nwbfile.add_acquisition(
            TimeSeries('test_ts',
                       'unit test test_add_acquisition', [0, 1, 2, 3, 4, 5],
                       'grams',
                       timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
        with self.assertRaises(ValueError):
            self.nwbfile.add_acquisition(
                TimeSeries('test_ts',
                           'unit test test_add_acquisition',
                           [0, 1, 2, 3, 4, 5],
                           'grams',
                           timestamps=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5]))
Beispiel #28
0
class ABF1Converter:

    """
    Converts Neuron2BrainLab's ABF1 files from a single cell (collected without amplifier settings from the
    multi-clamp commander) to a collective NeurodataWithoutBorders v2 file.

    Modeled after ABFConverter created by the Allen Institute.

    Parameters
    ----------
    inputPath: path to ABF file or a folder of ABF files to be converted
    outputFilePath: path to the output NWB file
    acquisitionChannelName: Allows to output only a specific acquisition channel, defaults to all
    stimulusChannelName: Allows to output only a specific stimulus channel,
                         defaults to all. The name can also be an AD channel name for cases where
                         the stimulus is recorded as well.
    responseGain: user-input float indicating scalar gain for response channel
    stimulusGain: user-input float indicating scalar gain for stimulus channel
    clampMode: 0 or 1 integer indicating clamp mode (0 is VC, 1 is CC). If not None, overwrites clamp mode provided in ABF file
    """

    def __init__(self, inputPath, outputFilePath, acquisitionChannelName=None, stimulusChannelName=None, 
      responseGain = 1, stimulusGain = 1, responseOffset = 0, clampMode = None):

        self.inputPath = inputPath
        self.debug=False

        if os.path.isfile(self.inputPath):
            print(inputPath)

            abf = pyabf.ABF(self.inputPath)
            if abf.abfVersion["major"] != 1:
                raise ValueError(f"The ABF version for the file {abf} is not supported.")

            self.fileNames = [os.path.basename(self.inputPath)]
            self.abfFiles = [abf]

        elif os.path.isdir(self.inputPath):
            abfFiles = []
            for dirpath, dirnames, filenames in os.walk(self.inputPath):

                # Find all .abf files in the directory
                if len(dirnames) == 0 and len(glob.glob(dirpath + "/*.abf")) != 0:
                    abfFiles += glob.glob(dirpath + "/*.abf")

            if len(abfFiles) == 0:
                raise ValueError(f"{inputPath} contains no ABF Files.")

            # Arrange the ABF files in ascending order
            abfFiles.sort(key=lambda x: os.path.basename(x))

            # Collect file names for description
            self.fileNames = []
            for file in abfFiles:
                self.fileNames += [os.path.basename(file)]

            self.abfFiles = []
            for abfFile in abfFiles:
                # Load each ABF file using pyabf
                abf = pyabf.ABF(abfFile)

                # Check for ABF version
                if abf.abfVersion["major"] != 1:
                    raise ValueError(f"The ABF version for the file {abf} is not supported.")

                self.abfFiles += [abf]

        if clampMode:
          self.clampMode = clampMode # sometimes the abf-based clamp mode is wrong
        else:
          self.clampMode = self.abfFiles[0]._headerV1.nExperimentType

        self.outputPath = outputFilePath

        # Take metadata input, and return hard coded values for None

        self.responseGain = responseGain
        self.stimulusGain = stimulusGain
        self.responseOffset = responseOffset

        self.acquisitionChannelName = acquisitionChannelName
        self.stimulusChannelName    = stimulusChannelName

    def _outputMetadata(self):
        """
        Create metadata files in HTML format next to the existing ABF files.
        """

        for abfFile in self.abfFiles:
            root, ext = os.path.splitext(abfFile.abfFilePath)
            pyabf.abfHeaderDisplay.abfInfoPage(abfFile).generateHTML(saveAs=root + ".html")

    def _getComments(self, abf):

        """
        Accesses the tag comments created in Clampfit
        """

        return abf.tagComments

    def _createNWBFile(self):

        """
        Creates the NWB file for the cell, as defined by PyNWB
        """

        self.start_time =  self.abfFiles[0].abfDateTime
        self.inputCellName = os.path.basename(self.inputPath)

        creatorInfo = self.abfFiles[0]._headerV1.sCreatorInfo
        creatorVersion = self.abfFiles[0]._headerV1.creatorVersionString
        experiment_description = (f"{creatorInfo} v{creatorVersion}")

        self.NWBFile = NWBFile(
            session_description="",
            session_start_time=self.start_time,
            experiment_description = experiment_description,
            identifier=self.inputCellName,
            file_create_date= datetime.now(tzlocal()),
            experimenter=None,
            notes=""
        )
        return self.NWBFile

    def _createDevice(self):

        creatorInfo    = self.abfFiles[0]._headerV1.sCreatorInfo
        creatorVersion = self.abfFiles[0]._headerV1.creatorVersionString

        self.device = self.NWBFile.create_device(name=f"{creatorInfo} {creatorVersion}")

    def _createElectrode(self):

        self.electrode = self.NWBFile.create_ic_electrode(name='elec0', device=self.device, description='PLACEHOLDER')

    def _unitConversion(self, unit):

        # Returns a 2-list of base unit and conversion factor

        if unit == 'V':
            return 1.0, 'V'
        elif unit == 'mV':
            return 1e-3, 'V'
        elif unit == 'A':
            return 1.0, 'A'
        elif unit == 'pA':
            return 1e-12, 'A'
        elif unit == 'nA':
            return 1e-9, 'A'
        else:
            # raise ValueError(f"{unit} is not a valid unit.")
            return 1.0, 'V'  # hard coded for units stored as '?'

    def _getClampMode(self):

        """
        Returns the clamp mode of the experiment.

        Voltage Clamp Mode = 0
        Current Clamp Mode = 1
        """

        return self.clampMode

    def _addStimulus(self):

        """
        Adds a stimulus class as defined by PyNWB to the NWB File.

        Written for experiments conducted from a single channel.
        For multiple channels, refer to https://github.com/AllenInstitute/ipfx/blob/master/ipfx/x_to_nwb/ABFConverter.py
        """

        for idx, abfFile in enumerate(self.abfFiles):

            isStimulus = True

            if self.stimulusChannelName is None:
                channelList = abfFile.adcNames
                channelIndices = range(len(channelList))
            else:
                if self.stimulusChannelName in abfFile.dacNames:
                    channelList = abfFile.dacNames
                    channelIndices = [channelList.index(self.stimulusChannelName)]
                elif self.stimulusChannelName in abfFile.adcNames:
                    isStimulus = False
                    channelList = abfFile.adcNames
                    channelIndices = [channelList.index(self.stimulusChannelName)]
                else:
                    raise ValueError(f"Channel {self.stimulusChannelName} could not be found.")

            for i in range(abfFile.sweepCount):
                for channelIndex in channelIndices:

                    if self.debug:
                        print(f"stimulus: abfFile={abfFile.abfFilePath}, sweep={i}, channelIndex={channelIndex}, channelName={channelList[channelIndex]}")

                    # Collect data from pyABF
                    abfFile.setSweep(i, channel=channelIndex)
                    seriesName = f"Index_{idx}_{i}_{channelIndex}"

                    if isStimulus:
                        data = abfFile.sweepC
                        scaledUnit = abfFile.sweepUnitsC
                    else:
                        data = abfFile.sweepY
                        scaledUnit = abfFile.sweepUnitsY

                    stimulusGain = self.stimulusGain
                    data = data * stimulusGain

                    conversion, unit = self._unitConversion(scaledUnit)
                    electrode = self.electrode
                    resolution = np.nan
                    starting_time = 0.0
                    rate = float(abfFile.dataRate)

                    # Create a JSON file for the description field
                    description = json.dumps({"file_name": os.path.basename(self.fileNames[idx]),
                                              "file_version": abfFile.abfVersionString,
                                              "sweep_number": i,
                                              "protocol": abfFile.protocol,
                                              "protocol_path": abfFile.protocolPath,
                                              "comments": self._getComments(abfFile)},
                                             sort_keys=True, indent=4)

                    # Determine the clamp mode
                    if self.clampMode == 0:
                        stimulusClass = VoltageClampStimulusSeries
                    elif self.clampMode == 1:
                        stimulusClass = CurrentClampStimulusSeries
                    else:
                        raise ValueError(f"Unsupported clamp mode {self.clampMode}")

                    data = createCompressedDataset(data)

                    # Create a stimulus class
                    stimulus = stimulusClass(name=seriesName,
                                             data=data,
                                             sweep_number=i,
                                             electrode=electrode,
                                             gain=stimulusGain,
                                             resolution=resolution,
                                             conversion=conversion,
                                             starting_time=starting_time,
                                             rate=rate,
                                             unit=unit,
                                             description=description
                                             )

                    self.NWBFile.add_stimulus(stimulus)

    def _addAcquisition(self):

        """
        Adds an acquisition class as defined by PyNWB to the NWB File.

        Written for experiments conducted from a single channel.
        For multiple channels, refer to https://github.com/AllenInstitute/ipfx/blob/master/ipfx/x_to_nwb/ABFConverter.py
        """

        for idx, abfFile in enumerate(self.abfFiles):

            if self.acquisitionChannelName is None:
                channelList = abfFile.adcNames
                channelIndices = range(len(channelList))
            else:
                if self.acquisitionChannelName in abfFile.adcNames:
                    channelList = abfFile.adcNames
                    channelIndices = [channelList.index(self.acquisitionChannelName)]
                else:
                    raise ValueError(f"Channel {self.acquisitionChannelName} could not be found.")

            for i in range(abfFile.sweepCount):
                for channelIndex in channelIndices:

                    if self.debug:
                        print(f"acquisition: abfFile={abfFile.abfFilePath}, sweep={i}, channelIndex={channelIndex}, channelName={channelList[channelIndex]}")

                    # Collect data from pyABF
                    abfFile.setSweep(i, channel=channelIndex)
                    seriesName = f"Index_{idx}_{i}_{channelIndex}"
                    responseGain = self.responseGain
                    responseOffset = self.responseOffset

                    data = abfFile.sweepY * responseGain + responseOffset
                    conversion, unit = self._unitConversion(abfFile.sweepUnitsY)
                    electrode = self.electrode
                    resolution = np.nan
                    starting_time = 0.0
                    rate = float(abfFile.dataRate)

                    # Create a JSON file for the description field
                    description = json.dumps({"file_name": os.path.basename(self.fileNames[idx]),
                                              "file_version": abfFile.abfVersionString,
                                              "sweep_number": i,
                                              "protocol": abfFile.protocol,
                                              "protocol_path": abfFile.protocolPath,
                                              "comments": self._getComments(abfFile)},
                                             sort_keys=True, indent=4)

                    # Create an acquisition class
                    # Note: voltage input produces current output; current input produces voltage output

                    data = createCompressedDataset(data)

                    if self.clampMode == 1:
                        acquisition = CurrentClampSeries(name=seriesName,
                                                         data=data,
                                                         sweep_number=i,
                                                         electrode=electrode,
                                                         gain=responseGain,
                                                         resolution=resolution,
                                                         conversion=conversion,
                                                         starting_time=starting_time,
                                                         rate=rate,
                                                         unit=unit,
                                                         description=description,
                                                         bias_current=np.nan,
                                                         bridge_balance=np.nan,
                                                         capacitance_compensation=np.nan,
                                                         )

                    elif self.clampMode == 0:
                        acquisition = VoltageClampSeries(name=seriesName,
                                                         data=data,
                                                         sweep_number=i,
                                                         electrode=electrode,
                                                         gain=responseGain,
                                                         resolution=resolution,
                                                         conversion=conversion,
                                                         starting_time=starting_time,
                                                         rate=rate,
                                                         unit=unit,
                                                         description=description,
                                                         capacitance_fast=np.nan,
                                                         capacitance_slow=np.nan,
                                                         resistance_comp_bandwidth=np.nan,
                                                         resistance_comp_correction=np.nan,
                                                         resistance_comp_prediction=np.nan,
                                                         whole_cell_capacitance_comp=np.nan,
                                                         whole_cell_series_resistance_comp=np.nan
                                                         )
                    else:
                        raise ValueError(f"Unsupported clamp mode {self.clampMode}")

                    self.NWBFile.add_acquisition(acquisition)

    def convert(self):

        """
        Iterates through the functions in the specified order.
        :return: True (for success)
        """

        self._createNWBFile()
        self._createDevice()
        self._createElectrode()
        self._getClampMode()
        self._addStimulus()
        self._addAcquisition()

        with NWBHDF5IO(self.outputPath, "w") as io:
            io.write(self.NWBFile, cache_spec=True)

        print(f"Successfully converted to {self.outputPath}.")
Beispiel #29
0
def convert(
        input_file,
        session_start_time,
        subject_date_of_birth,
        subject_id='I5',
        subject_description='naive',
        subject_genotype='wild-type',
        subject_sex='M',
        subject_weight='11.6g',
        subject_species='Mus musculus',
        subject_brain_region='Medial Entorhinal Cortex',
        surgery='Probe: +/-3.3mm ML, 0.2mm A of sinus, then as deep as possible',
        session_id='npI5_0417_baseline_1',
        experimenter='Kei Masuda',
        experiment_description='Virtual Hallway Task',
        institution='Stanford University School of Medicine',
        lab_name='Giocomo Lab'):
    """
    Read in the .mat file specified by input_file and convert to .nwb format.

    Parameters
    ----------
    input_file : np.ndarray (..., n_channels, n_time)
        the .mat file to be converted
    subject_id : string
        the unique subject ID number for the subject of the experiment
    subject_date_of_birth : datetime ISO 8601
        the date and time the subject was born
    subject_description : string
        important information specific to this subject that differentiates it from other members of it's species
    subject_genotype : string
        the genetic strain of this species.
    subject_sex : string
        Male or Female
    subject_weight :
        the weight of the subject around the time of the experiment
    subject_species : string
        the name of the species of the subject
    subject_brain_region : basestring
        the name of the brain region where the electrode probe is recording from
    surgery : str
        information about the subject's surgery to implant electrodes
    session_id: string
        human-readable ID# for the experiment session that has a one-to-one relationship with a recording session
    session_start_time : datetime
        date and time that the experiment started
    experimenter : string
        who ran the experiment, first and last name
    experiment_description : string
        what task was being run during the session
    institution : string
        what institution was the experiment performed in
    lab_name : string
        the lab where the experiment was performed

    Returns
    -------
    nwbfile : NWBFile
        The contents of the .mat file converted into the NWB format.  The nwbfile is saved to disk using NDWHDF5
    """

    # input matlab data
    matfile = hdf5storage.loadmat(input_file)

    # output path for nwb data
    def replace_last(source_string, replace_what, replace_with):
        head, _sep, tail = source_string.rpartition(replace_what)
        return head + replace_with + tail

    outpath = replace_last(input_file, '.mat', '.nwb')

    create_date = datetime.today()
    timezone_cali = pytz.timezone('US/Pacific')
    create_date_tz = timezone_cali.localize(create_date)

    # if loading data from config.yaml, convert string dates into datetime
    if isinstance(session_start_time, str):
        session_start_time = datetime.strptime(session_start_time,
                                               '%B %d, %Y %I:%M%p')
        session_start_time = timezone_cali.localize(session_start_time)

    if isinstance(subject_date_of_birth, str):
        subject_date_of_birth = datetime.strptime(subject_date_of_birth,
                                                  '%B %d, %Y %I:%M%p')
        subject_date_of_birth = timezone_cali.localize(subject_date_of_birth)

    # create unique identifier for this experimental session
    uuid_identifier = uuid.uuid1()

    # Create NWB file
    nwbfile = NWBFile(
        session_description=experiment_description,  # required
        identifier=uuid_identifier.hex,  # required
        session_id=session_id,
        experiment_description=experiment_description,
        experimenter=experimenter,
        surgery=surgery,
        institution=institution,
        lab=lab_name,
        session_start_time=session_start_time,  # required
        file_create_date=create_date_tz)  # optional

    # add information about the subject of the experiment
    experiment_subject = Subject(subject_id=subject_id,
                                 species=subject_species,
                                 description=subject_description,
                                 genotype=subject_genotype,
                                 date_of_birth=subject_date_of_birth,
                                 weight=subject_weight,
                                 sex=subject_sex)
    nwbfile.subject = experiment_subject

    # adding constants via LabMetaData container
    # constants
    sample_rate = float(matfile['sp'][0]['sample_rate'][0][0][0])
    n_channels_dat = int(matfile['sp'][0]['n_channels_dat'][0][0][0])
    dat_path = matfile['sp'][0]['dat_path'][0][0][0]
    offset = int(matfile['sp'][0]['offset'][0][0][0])
    data_dtype = matfile['sp'][0]['dtype'][0][0][0]
    hp_filtered = bool(matfile['sp'][0]['hp_filtered'][0][0][0])
    vr_session_offset = matfile['sp'][0]['vr_session_offset'][0][0][0]
    # container
    lab_metadata = LabMetaData_ext(name='LabMetaData',
                                   acquisition_sampling_rate=sample_rate,
                                   number_of_electrodes=n_channels_dat,
                                   file_path=dat_path,
                                   bytes_to_skip=offset,
                                   raw_data_dtype=data_dtype,
                                   high_pass_filtered=hp_filtered,
                                   movie_start_time=vr_session_offset)
    nwbfile.add_lab_meta_data(lab_metadata)

    # Adding trial information
    nwbfile.add_trial_column(
        'trial_contrast',
        'visual contrast of the maze through which the mouse is running')
    trial = np.ravel(matfile['trial'])
    trial_nums = np.unique(trial)
    position_time = np.ravel(matfile['post'])
    # matlab trial numbers start at 1. To correctly index trial_contract vector,
    # subtracting 1 from 'num' so index starts at 0
    for num in trial_nums:
        trial_times = position_time[trial == num]
        nwbfile.add_trial(start_time=trial_times[0],
                          stop_time=trial_times[-1],
                          trial_contrast=matfile['trial_contrast'][num - 1][0])

    # Add mouse position inside:
    position = Position()
    position_virtual = np.ravel(matfile['posx'])
    # position inside the virtual environment
    sampling_rate = 1 / (position_time[1] - position_time[0])
    position.create_spatial_series(
        name='Position',
        data=position_virtual,
        starting_time=position_time[0],
        rate=sampling_rate,
        reference_frame='The start of the trial, which begins at the start '
        'of the virtual hallway.',
        conversion=0.01,
        description='Subject position in the virtual hallway.',
        comments='The values should be >0 and <400cm. Values greater than '
        '400cm mean that the mouse briefly exited the maze.',
    )

    # physical position on the mouse wheel
    physical_posx = position_virtual
    trial_gain = np.ravel(matfile['trial_gain'])
    for num in trial_nums:
        physical_posx[trial ==
                      num] = physical_posx[trial == num] / trial_gain[num - 1]

    position.create_spatial_series(
        name='PhysicalPosition',
        data=physical_posx,
        starting_time=position_time[0],
        rate=sampling_rate,
        reference_frame='Location on wheel re-referenced to zero '
        'at the start of each trial.',
        conversion=0.01,
        description='Physical location on the wheel measured '
        'since the beginning of the trial.',
        comments='Physical location found by dividing the '
        'virtual position by the "trial_gain"')
    nwbfile.add_acquisition(position)

    # Add timing of lick events, as well as mouse's virtual position during lick event
    lick_events = BehavioralEvents()
    lick_events.create_timeseries(
        'LickEvents',
        data=np.ravel(matfile['lickx']),
        timestamps=np.ravel(matfile['lickt']),
        unit='centimeter',
        description='Subject position in virtual hallway during the lick.')
    nwbfile.add_acquisition(lick_events)

    # Add information on the visual stimulus that was shown to the subject
    # Assumed rate=60 [Hz]. Update if necessary
    # Update external_file to link to Unity environment file
    visualization = ImageSeries(
        name='ImageSeries',
        unit='seconds',
        format='external',
        external_file=list(['https://unity.com/VR-and-AR-corner']),
        starting_time=vr_session_offset,
        starting_frame=[[0]],
        rate=float(60),
        description='virtual Unity environment that the mouse navigates through'
    )
    nwbfile.add_stimulus(visualization)

    # Add the recording device, a neuropixel probe
    recording_device = nwbfile.create_device(name='neuropixel_probes')
    electrode_group_description = 'single neuropixels probe http://www.open-ephys.org/neuropixelscorded'
    electrode_group_name = 'probe1'

    electrode_group = nwbfile.create_electrode_group(
        electrode_group_name,
        description=electrode_group_description,
        location=subject_brain_region,
        device=recording_device)

    # Add information about each electrode
    xcoords = np.ravel(matfile['sp'][0]['xcoords'][0])
    ycoords = np.ravel(matfile['sp'][0]['ycoords'][0])
    data_filtered_flag = matfile['sp'][0]['hp_filtered'][0][0]
    if data_filtered_flag:
        filter_desc = 'The raw voltage signals from the electrodes were high-pass filtered'
    else:
        filter_desc = 'The raw voltage signals from the electrodes were not high-pass filtered'

    num_recording_electrodes = xcoords.shape[0]
    recording_electrodes = range(0, num_recording_electrodes)

    # create electrode columns for the x,y location on the neuropixel  probe
    # the standard x,y,z locations are reserved for Allen Brain Atlas location
    nwbfile.add_electrode_column('rel_x', 'electrode x-location on the probe')
    nwbfile.add_electrode_column('rel_y', 'electrode y-location on the probe')

    for idx in recording_electrodes:
        nwbfile.add_electrode(id=idx,
                              x=np.nan,
                              y=np.nan,
                              z=np.nan,
                              rel_x=float(xcoords[idx]),
                              rel_y=float(ycoords[idx]),
                              imp=np.nan,
                              location='medial entorhinal cortex',
                              filtering=filter_desc,
                              group=electrode_group)

    # Add information about each unit, termed 'cluster' in giocomo data
    # create new columns in unit table
    nwbfile.add_unit_column(
        'quality',
        'labels given to clusters during manual sorting in phy (1=MUA, '
        '2=Good, 3=Unsorted)')

    # cluster information
    cluster_ids = matfile['sp'][0]['cids'][0][0]
    cluster_quality = matfile['sp'][0]['cgs'][0][0]
    # spikes in time
    spike_times = np.ravel(matfile['sp'][0]['st'][0])  # the time of each spike
    spike_cluster = np.ravel(
        matfile['sp'][0]['clu'][0])  # the cluster_id that spiked at that time

    for i, cluster_id in enumerate(cluster_ids):
        unit_spike_times = spike_times[spike_cluster == cluster_id]
        waveforms = matfile['sp'][0]['temps'][0][cluster_id]
        nwbfile.add_unit(id=int(cluster_id),
                         spike_times=unit_spike_times,
                         quality=cluster_quality[i],
                         waveform_mean=waveforms,
                         electrode_group=electrode_group)

    # Trying to add another Units table to hold the results of the automatic spike sorting
    # create TemplateUnits units table
    template_units = Units(
        name='TemplateUnits',
        description='units assigned during automatic spike sorting')
    template_units.add_column(
        'tempScalingAmps',
        'scaling amplitude applied to the template when extracting spike',
        index=True)

    # information on extracted spike templates
    spike_templates = np.ravel(matfile['sp'][0]['spikeTemplates'][0])
    spike_template_ids = np.unique(spike_templates)
    # template scaling amplitudes
    temp_scaling_amps = np.ravel(matfile['sp'][0]['tempScalingAmps'][0])

    for i, spike_template_id in enumerate(spike_template_ids):
        template_spike_times = spike_times[spike_templates ==
                                           spike_template_id]
        temp_scaling_amps_per_template = temp_scaling_amps[spike_templates ==
                                                           spike_template_id]
        template_units.add_unit(id=int(spike_template_id),
                                spike_times=template_spike_times,
                                electrode_group=electrode_group,
                                tempScalingAmps=temp_scaling_amps_per_template)

    # create ecephys processing module
    spike_template_module = nwbfile.create_processing_module(
        name='ecephys',
        description='units assigned during automatic spike sorting')

    # add template_units table to processing module
    spike_template_module.add(template_units)

    print(nwbfile)
    print('converted to NWB:N')
    print('saving ...')

    with NWBHDF5IO(outpath, 'w') as io:
        io.write(nwbfile)
        print('saved', outpath)
Beispiel #30
0
                unit='mV',  # TODO: not in pipeline
                conversion=1e-3,
                gain=1.0,  # TODO: not in pipeline
                data=mp,
                starting_time=mp_start_time,
                rate=mp_fs))
        # acquisition - current injection
        current_injection, ci_start_time, ci_fs = (
            acquisition.IntracellularAcquisition.CurrentInjection
            & cell).fetch1('current_injection', 'current_injection_start_time',
                           'current_injection_sampling_rate')
        nwbfile.add_stimulus(
            pynwb.icephys.CurrentClampStimulusSeries(
                name='current_injection',
                electrode=ic_electrode,
                unit='nA',
                conversion=1e-6,
                gain=1.0,
                data=current_injection,
                starting_time=ci_start_time,
                rate=ci_fs))

        # analysis - membrane potential without spike
        mp_wo_spike, mp_start_time, mp_fs = (
            acquisition.IntracellularAcquisition.MembranePotential
            & cell).fetch1('membrane_potential_wo_spike',
                           'membrane_potential_start_time',
                           'membrane_potential_sampling_rate')
        mp_rmv_spike = nwbfile.create_processing_module(
            name='membrane_potential_spike_removal',
            description='Spike removal')
        mp_rmv_spike.add_data_interface(