Beispiel #1
0
    def save(self, session_object):

        session_type = str(session_object.metadata['session_type'])

        nwbfile = NWBFile(
            session_description=session_type,
            identifier=str(session_object.ophys_experiment_id),
            session_start_time=session_object.metadata['experiment_datetime'],
            file_create_date=pytz.utc.localize(datetime.datetime.now()),
            institution="Allen Institute for Brain Science",
            keywords=[
                "2-photon", "calcium imaging", "visual cortex", "behavior",
                "task"
            ],
            experiment_description=get_expt_description(session_type))

        # Add stimulus_timestamps to NWB in-memory object:
        nwb.add_stimulus_timestamps(nwbfile,
                                    session_object.stimulus_timestamps)

        # Add running data to NWB in-memory object:
        unit_dict = {
            'v_sig': 'V',
            'v_in': 'V',
            'speed': 'cm/s',
            'timestamps': 's',
            'dx': 'cm'
        }
        nwb.add_running_data_df_to_nwbfile(nwbfile,
                                           session_object.running_data_df,
                                           unit_dict)

        # Add stimulus template data to NWB in-memory object:
        for name, image_data in session_object.stimulus_templates.items():
            nwb.add_stimulus_template(nwbfile, image_data, name)

            # Add index for this template to NWB in-memory object:
            nwb_template = nwbfile.stimulus_template[name]
            stimulus_index = session_object.stimulus_presentations[
                session_object.stimulus_presentations['image_set'] ==
                nwb_template.name]
            nwb.add_stimulus_index(nwbfile, stimulus_index, nwb_template)

        # search for omitted rows and add stop_time before writing to NWB file
        set_omitted_stop_time(
            stimulus_table=session_object.stimulus_presentations)

        # Add stimulus presentations data to NWB in-memory object:
        nwb.add_stimulus_presentations(nwbfile,
                                       session_object.stimulus_presentations)

        # Add trials data to NWB in-memory object:
        nwb.add_trials(nwbfile, session_object.trials,
                       TRIAL_COLUMN_DESCRIPTION_DICT)

        # Add licks data to NWB in-memory object:
        if len(session_object.licks) > 0:
            nwb.add_licks(nwbfile, session_object.licks)

        # Add rewards data to NWB in-memory object:
        if len(session_object.rewards) > 0:
            nwb.add_rewards(nwbfile, session_object.rewards)

        # Add max_projection image data to NWB in-memory object:
        nwb.add_max_projection(nwbfile, session_object.max_projection)

        # Add average_image image data to NWB in-memory object:
        nwb.add_average_image(nwbfile, session_object.average_projection)

        # Add segmentation_mask_image image data to NWB in-memory object:
        nwb.add_segmentation_mask_image(nwbfile,
                                        session_object.segmentation_mask_image)

        # Add metadata to NWB in-memory object:
        nwb.add_metadata(nwbfile, session_object.metadata)

        # Add task parameters to NWB in-memory object:
        nwb.add_task_parameters(nwbfile, session_object.task_parameters)

        # Add roi metrics to NWB in-memory object:
        nwb.add_cell_specimen_table(nwbfile,
                                    session_object.cell_specimen_table,
                                    session_object.metadata)

        # Add dff to NWB in-memory object:
        nwb.add_dff_traces(nwbfile, session_object.dff_traces,
                           session_object.ophys_timestamps)

        # Add corrected_fluorescence to NWB in-memory object:
        nwb.add_corrected_fluorescence_traces(
            nwbfile, session_object.corrected_fluorescence_traces)

        # Add motion correction to NWB in-memory object:
        nwb.add_motion_correction(nwbfile, session_object.motion_correction)

        # Write the file:
        with NWBHDF5IO(self.path, 'w') as nwb_file_writer:
            nwb_file_writer.write(nwbfile)

        return nwbfile
Beispiel #2
0
 def build_nwbfile(self):
     description = 'test nwbfile publications'
     identifier = 'TEST_publications'
     self.nwbfile = NWBFile(description, identifier, self.start_time)
     self.nwbfile.related_publications = ('pub1', 'pub2')
Beispiel #3
0
    def save(self,
             file_name,
             to32=True,
             order='F',
             imagej=False,
             bigtiff=True,
             excitation_lambda=488.0,
             compress=0,
             var_name_hdf5='mov',
             sess_desc='some_description',
             identifier='some identifier',
             exp_desc='experiment description',
             imaging_plane_description='some imaging plane description',
             emission_lambda=520.0,
             indicator='OGB-1',
             location='brain',
             starting_time=0.,
             experimenter='Dr Who',
             lab_name='',
             institution='',
             experiment_description='Experiment Description',
             session_id='Session ID'):
        """
        Save the timeseries in single precision. Supported formats include
        TIFF, NPZ, AVI, MAT, HDF5/H5, MMAP, and NWB

        Args:
            file_name: str
                name of file. Possible formats are tif, avi, npz, mmap and hdf5

            to32: Bool
                whether to transform to 32 bits

            order: 'F' or 'C'
                C or Fortran order

            var_name_hdf5: str
                Name of hdf5 file subdirectory

        Raises:
            Exception 'Extension Unknown'

        """
        name, extension = os.path.splitext(file_name)[:2]
        extension = extension.lower()
        logging.debug("Parsing extension " + str(extension))

        if extension == '.tif':
            with tifffile.TiffWriter(file_name, bigtiff=bigtiff,
                                     imagej=imagej) as tif:
                for i in range(self.shape[0]):
                    if i % 200 == 0:
                        logging.debug(str(i) + ' frames saved')

                    curfr = self[i].copy()
                    if to32 and not ('float32' in str(self.dtype)):
                        curfr = curfr.astype(np.float32)
                    tif.save(curfr, compress=compress)
        elif extension == '.npz':
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            np.savez(file_name,
                     input_arr=input_arr,
                     start_time=self.start_time,
                     fr=self.fr,
                     meta_data=self.meta_data,
                     file_name=self.file_name)
        elif extension == '.avi':
            codec = None
            try:
                codec = cv2.FOURCC('I', 'Y', 'U', 'V')
            except AttributeError:
                codec = cv2.VideoWriter_fourcc(*'IYUV')
            np.clip(self, np.percentile(self, 1), np.percentile(self, 99),
                    self)
            minn, maxx = np.min(self), np.max(self)
            data = 255 * (self - minn) / (maxx - minn)
            data = data.astype(np.uint8)
            y, x = data[0].shape
            vw = cv2.VideoWriter(file_name,
                                 codec,
                                 self.fr, (x, y),
                                 isColor=True)
            for d in data:
                vw.write(cv2.cvtColor(d, cv2.COLOR_GRAY2BGR))
            vw.release()

        elif extension == '.mat':
            if self.file_name[0] is not None:
                f_name = self.file_name
            else:
                f_name = ''

            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            if self.meta_data[0] is None:
                savemat(
                    file_name, {
                        'input_arr': np.rollaxis(input_arr, axis=0, start=3),
                        'start_time': self.start_time,
                        'fr': self.fr,
                        'meta_data': [],
                        'file_name': f_name
                    })
            else:
                savemat(
                    file_name, {
                        'input_arr': np.rollaxis(input_arr, axis=0, start=3),
                        'start_time': self.start_time,
                        'fr': self.fr,
                        'meta_data': self.meta_data,
                        'file_name': f_name
                    })

        elif extension in ('.hdf5', '.h5'):
            with h5py.File(file_name, "w") as f:
                if to32 and not ('float32' in str(self.dtype)):
                    input_arr = self.astype(np.float32)
                else:
                    input_arr = np.array(self)

                dset = f.create_dataset(var_name_hdf5, data=input_arr)
                dset.attrs["fr"] = self.fr
                dset.attrs["start_time"] = self.start_time
                try:
                    dset.attrs["file_name"] = [
                        a.encode('utf8') for a in self.file_name
                    ]
                except:
                    logging.warning('No file saved')
                if self.meta_data[0] is not None:
                    logging.debug("Metadata for saved file: " +
                                  str(self.meta_data))
                    dset.attrs["meta_data"] = cpk.dumps(self.meta_data)
        elif extension == '.mmap':
            base_name = name

            T = self.shape[0]
            dims = self.shape[1:]
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)

            input_arr = np.transpose(input_arr,
                                     list(range(1,
                                                len(dims) + 1)) + [0])
            input_arr = np.reshape(input_arr, (np.prod(dims), T), order='F')

            fname_tot = memmap_frames_filename(base_name, dims, T, order)
            fname_tot = os.path.join(os.path.split(file_name)[0], fname_tot)
            big_mov = np.memmap(fname_tot,
                                mode='w+',
                                dtype=np.float32,
                                shape=(np.uint64(np.prod(dims)), np.uint64(T)),
                                order=order)

            big_mov[:] = np.asarray(input_arr, dtype=np.float32)
            big_mov.flush()
            del big_mov, input_arr
            return fname_tot
        elif extension == '.nwb':
            if to32 and not ('float32' in str(self.dtype)):
                input_arr = self.astype(np.float32)
            else:
                input_arr = np.array(self)
            # Create NWB file
            nwbfile = NWBFile(sess_desc,
                              identifier,
                              datetime.now(tzlocal()),
                              experimenter=experimenter,
                              lab=lab_name,
                              institution=institution,
                              experiment_description=experiment_description,
                              session_id=session_id)
            # Get the device
            device = Device('imaging_device')
            nwbfile.add_device(device)
            # OpticalChannel
            optical_channel = OpticalChannel('OpticalChannel',
                                             'main optical channel',
                                             emission_lambda=emission_lambda)
            imaging_plane = nwbfile.create_imaging_plane(
                name='ImagingPlane',
                optical_channel=optical_channel,
                description=imaging_plane_description,
                device=device,
                excitation_lambda=excitation_lambda,
                imaging_rate=self.fr,
                indicator=indicator,
                location=location)
            # Images
            image_series = TwoPhotonSeries(name=var_name_hdf5,
                                           dimension=self.shape[1:],
                                           data=input_arr,
                                           imaging_plane=imaging_plane,
                                           starting_frame=[0],
                                           starting_time=starting_time,
                                           rate=self.fr)

            nwbfile.add_acquisition(image_series)

            with NWBHDF5IO(file_name, 'w') as io:
                io.write(nwbfile)

            return file_name

        else:
            logging.error("Extension " + str(extension) + " unknown")
            raise Exception('Extension Unknown')
Beispiel #4
0
 def build_nwbfile(self):
     description = 'test nwbfile experimenter'
     identifier = 'TEST_experimenter'
     self.nwbfile = NWBFile(description, identifier, self.start_time)
     self.nwbfile.experimenter = ('experimenter1', 'experimenter2')
Beispiel #5
0
class TestNWBFileHDF5IO(TestCase):
    """ Test reading/writing an NWBFile using HDF5IO """
    def setUp(self):
        """ Set up an NWBFile object with an acquisition TimeSeries, analysis TimeSeries, and a processing module """
        self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc())
        self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc())
        self.create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())
        self.manager = get_manager()
        self.filename = 'test_nwbfileio.h5'
        self.nwbfile = NWBFile(
            'a test NWB File',
            'TEST123',
            self.start_time,
            timestamps_reference_time=self.ref_time,
            file_create_date=self.create_date,
            experimenter='test experimenter',
            stimulus_notes='test stimulus notes',
            data_collection='test data collection notes',
            experiment_description='test experiment description',
            institution='nomad',
            lab='nolab',
            notes='nonotes',
            pharmacology='nopharmacology',
            protocol='noprotocol',
            related_publications='nopubs',
            session_id='007',
            slices='noslices',
            source_script='nosources',
            surgery='nosurgery',
            virus='novirus',
            source_script_file_name='nofilename')
        self.ts = TimeSeries('test_timeseries',
                             list(range(100, 200, 10)),
                             'SIunit',
                             timestamps=list(range(10)),
                             resolution=0.1)
        self.nwbfile.add_acquisition(self.ts)
        self.ts2 = TimeSeries('test_timeseries2',
                              list(range(200, 300, 10)),
                              'SIunit',
                              timestamps=list(range(10)),
                              resolution=0.1)
        self.nwbfile.add_analysis(self.ts2)
        self.mod = self.nwbfile.create_processing_module(
            'test_module', 'a test module')
        self.ts3 = TimeSeries('test_timeseries2',
                              list(range(100, 200, 10)),
                              'SIunit',
                              timestamps=list(range(10)),
                              resolution=0.1)
        self.mod.add(self.ts3)

    def tearDown(self):
        """ Delete the created test file """
        remove_test_file(self.filename)

    def test_children(self):
        """ Test that the TimeSeries and processing module are children of their respective parents """
        self.assertIn(self.ts, self.nwbfile.children)
        self.assertIn(self.ts2, self.nwbfile.children)
        self.assertIn(self.mod, self.nwbfile.children)
        self.assertIn(self.ts3, self.mod.children)

    def test_write(self):
        """ Test writing the NWBFile using HDF5IO """
        hdf5io = HDF5IO(self.filename, manager=self.manager, mode='a')
        hdf5io.write(self.nwbfile)
        hdf5io.close()
        # TODO add some asserts

    def test_read(self):
        """ Test reading the NWBFile using HDF5IO """
        hdf5io = HDF5IO(self.filename, manager=self.manager, mode='w')
        hdf5io.write(self.nwbfile)
        hdf5io.close()

        hdf5io = HDF5IO(self.filename, manager=self.manager, mode='r')
        container = hdf5io.read()
        self.assertIsInstance(container, NWBFile)
        self.assertEqual(len(container.acquisition), 1)
        self.assertEqual(len(container.analysis), 1)
        for v in container.acquisition.values():
            self.assertIsInstance(v, TimeSeries)
        self.assertContainerEqual(container, self.nwbfile)
        hdf5io.close()
Beispiel #6
0
 def setUpContainer(self):
     """ Return a placeholder NWBFile """
     return NWBFile('placeholder', 'placeholder',
                    datetime(1970, 1, 1, 12, tzinfo=tzutc()))
Beispiel #7
0
import numpy as np
import pandas as pd
from nwbext_ecog.ecog_manual import CorticalSurfaces, ECoGSubject
from pynwb import NWBFile, TimeSeries, NWBHDF5IO, get_manager
from pynwb.ecephys import ElectricalSeries
from pytz import timezone
from scipy.io.wavfile import read as wavread

# get_manager must come after dynamic imports
manager = get_manager()

external_subject = True

nwbfile = NWBFile('session description',
                  'session identifier',
                  datetime.now().astimezone(),
                  institution='UCSF',
                  lab='Chang Lab')

# electrodes
devices = ['a', 'a', 'a', 'b', 'b', 'b']
locations = ['a location', 'b location']
udevices, inds = np.unique(devices, return_inverse=True)
groups = []
for device_name, location in zip(udevices, locations):
    # Create devices
    device = nwbfile.create_device(device_name)

    # Create electrode groups
    electrode_group = nwbfile.create_electrode_group(name=device_name +
                                                     '_electrodes',
def main():
    # Get absolute path to file
    filename = Path(sys.argv[1]).resolve().as_posix()

    # TODO use with / as context manager - for handling open/closing files
    # TODO update the plexon API to use exceptions instead of this clunky
    # if result == 0 code
    file_reader = PyPL2FileReader()
    file_handle = file_reader.pl2_open_file(filename)

    if (file_handle == 0):
        print_error(file_reader)

    # create the PL2FileInfo instance containing basic header information
    file_info = PL2FileInfo()
    res = file_reader.pl2_get_file_info(file_handle, file_info)

    if (res == 0):
        print_error(file_reader)

    # USER NEEDS TO INPUT:

    # create the NWBFile instance
    session_description = 'Pulvinar recording from McCartney'
    id = 'M20170127'
    session_start_time = tm_to_datetime(file_info.m_CreatorDateTime)
    timezone = pytz.timezone("America/New_York")
    experimenter = 'Ryan Ly'
    lab = 'Kastner Lab'
    institution = 'Princeton University'
    experiment_description = 'Neural correlates of visual attention across the pulvinar'
    session_id = id
    data_collection = file_info.m_CreatorComment.decode('ascii')

    session_start_time = timezone.localize(session_start_time)
    nwbfile = NWBFile(session_description=session_description,
                      identifier=id,
                      session_start_time=session_start_time,
                      experimenter=experimenter,
                      lab=lab,
                      institution=institution,
                      experiment_description=experiment_description,
                      session_id=session_id,
                      data_collection=data_collection)

    # TODO add in the reprocessor metadata from file_info

    # create a recording device instance
    plexon_device_name = file_info.m_CreatorSoftwareName.decode('ascii') + '_v' + \
                         file_info.m_CreatorSoftwareVersion.decode('ascii')
    plexon_device = nwbfile.create_device(name=plexon_device_name)

    eye_trac_device_name = 'ASL_Eye-trac_6_via_' + plexon_device_name
    eye_trac_device = nwbfile.create_device(name=eye_trac_device_name)

    lever_device_name = 'Manual_lever_via_' + plexon_device_name
    lever_device = nwbfile.create_device(name=lever_device_name)
    # TODO update Device to take in metadata information about the Device

    # create electrode groups representing single shanks or other kinds of data
    # create list of metadata for adding into electrode groups. importantly, sasdasdsads
    # these metadata
    electrode_group_metadata = []
    electrode_group_metadata.append({'name': '32ch-array',
                                    'description': '32-channel_array',
                                    'location': 'Pulvinar',
                                    'device': plexon_device,
                                    'channel_ids': range(1, 33)})
    electrode_group_metadata.append({'name': 'test_electrode_1',
                                    'description': 'test_electrode_1',
                                    'location': 'unknown',
                                    'device': plexon_device,
                                    'channel_ids': [97]})
    electrode_group_metadata.append({'name': 'test_electrode_2',
                                    'description': 'test_electrode_2',
                                    'location': 'unknown',
                                    'device': plexon_device,
                                    'channel_ids': [98]})
    electrode_group_metadata.append({'name': 'test_electrode_3',
                                    'description': 'test_electrode_3',
                                    'location': 'unknown',
                                    'device': plexon_device,
                                    'channel_ids': [99]})
    non_electrode_ts_metadata = []
    non_electrode_ts_metadata.append({'name': 'eyetracker_x_voltage',
                                     'description': 'eyetracker_x_voltage',
                                     'location': 'n/a',
                                     'device': eye_trac_device,
                                     'channel_ids': [126]})
    non_electrode_ts_metadata.append({'name': 'eyetracker_y_voltage',
                                     'description': 'eyetracker_y_voltage',
                                     'location': 'n/a',
                                     'device': eye_trac_device,
                                     'channel_ids': [127]})
    non_electrode_ts_metadata.append({'name': 'lever_voltage',
                                     'description': 'lever_voltage',
                                     'location': 'n/a',
                                     'device': lever_device,
                                     'channel_ids': [128]})

    # make an electrode group for every group of channel IDs specified
    electrode_groups = []
    map_electrode_group_to_channel_ids = []
    for egm in electrode_group_metadata:
        print(f'Creating electrode group named "{egm["name"]}"')
        eg = nwbfile.create_electrode_group(name=egm['name'],
                                            description=egm['description'],
                                            location=egm['location'],
                                            device=egm['device'])
        electrode_groups.append(eg)
        map_electrode_group_to_channel_ids.append(egm['channel_ids'])

    # group indices of analog channels in the Plexon file by type, then source
    wb_src_chans = {};
    fp_src_chans = {};
    spkc_src_chans = {};
    ai_src_chans = {};
    aif_src_chans = {};

    for pl2_ind in range(file_info.m_TotalNumberOfAnalogChannels):
        achannel_info = PL2AnalogChannelInfo()
        res = file_reader.pl2_get_analog_channel_info(file_handle, pl2_ind, achannel_info)
        if (res == 0):
            print_error(file_reader)
            break

        if (achannel_info.m_ChannelEnabled and
                achannel_info.m_ChannelRecordingEnabled and
                achannel_info.m_NumberOfValues > 0 and
                achannel_info.m_MaximumNumberOfFragments > 0):
            # store zero-based channel index and electrode channel id
            achan_name = achannel_info.m_Name.decode('ascii');
            if achan_name.startswith('WB'):
                src_chans = wb_src_chans
            elif achan_name.startswith('FP') and get_channel_id(achan_name) <= 125:
                src_chans = fp_src_chans
            elif achan_name.startswith('FP') and get_channel_id(achan_name) > 125:
                src_chans = ai_src_chans
            elif achan_name.startswith('SPKC'):
                src_chans = spkc_src_chans
            elif achan_name.startswith('AI'):
                src_chans = ai_src_chans
            elif achan_name.startswith('AIF'):
                src_chans = aif_src_chans
            else:
                warnings.warn('Unrecognized analog channel: ' + achan_name)
                break

            channel_id = get_channel_id(achan_name)

            # src_chans is a dict {Plexon source ID : list of dict ...
            # {'pl2_ind': analog channel ind, 'channel_id': electrode channel ID}}
            chans = {'pl2_ind': pl2_ind, 'channel_id': channel_id}
            if not achannel_info.m_Source in src_chans:
                src_chans[achannel_info.m_Source] = [chans]
            else:
                src_chans[achannel_info.m_Source].append(chans)

    # create electrodes and create a region of indices in the electrode table
    # corresponding to the electrodes used for this type of analog data (WB, FP,
    # SPKC, AI, AIF)
    if wb_src_chans:
        for src, chans in wb_src_chans.items():
            channel_ids_all = [ch['channel_id'] for ch in chans]
            channel_ids_by_group = get_channel_ids_by_metadata_list(
                    channel_ids_all, electrode_group_metadata)
            for channel_ids, eg in zip(channel_ids_by_group, electrode_groups):
                if channel_ids:
                    # find the mapped pl2 index again
                    group_chans = [c for c in chans if c['channel_id'] in channel_ids]
                    add_electrodes(nwbfile, channel_ids, eg)
                    wb_es = pl2_create_electrode_timeseries(nwbfile,
                                                            file_reader,
                                                            file_handle,
                                                            file_info.m_TimestampFrequency,
                                                            group_chans,
                                                            'Wideband_voltages_' + eg.name,
                                                            'Wideband electrodes, group ' + eg.name)
                    nwbfile.add_acquisition(wb_es)

    if fp_src_chans or spkc_src_chans:
        ecephys_module = nwbfile.create_processing_module(name='ecephys',
                                                          description='Processed extracellular electrophysiology data')

    if fp_src_chans:
        # LFP signal can come from multiple Plexon "sources"
        # for src, chans in fp_src_chans.items():
        #     channel_ids_all = [ch['channel_id'] for ch in chans]
        #     channel_ids_by_group = get_channel_ids_by_metadata_list(
        #             channel_ids_all, electrode_group_metadata)
        #
        #     # channel_ids_by_group is a list that parallels
        #     # electrode_groups. it contains a list of lists of channel_ids that
        #     # are used for this type and source of analog data which are part of
        #     # the corresponding electrode group
        #     # for example, if the ElectrodeGroup at index 2 contains electrodes
        #     # with the channel IDs 1-32, and there is analog channel data for
        #     # channel ID 4-6, then channel_ids_by_group[2] would have
        #     # [4, 5, 6].
        #     d = []
        #     for c in channel_ids_all:
        #         for i in range(len(map_channel_ids_to_electrode_groups)):
        #             if c in get_channel_ids_by_metadata_list[i]:
        #                 d[i].append(c)
        #
        #     for channel_ids, eg in zip(channel_ids_by_group, electrode_groups):
        for src, chans in fp_src_chans.items():
            channel_ids = [ch['channel_id'] for ch in chans]
            electrode_group_to_channel_ids = get_channel_ids_by_metadata_list(
                    channel_ids, electrode_group_metadata)
            for channel_ids, eg, egm in zip(electrode_group_to_channel_ids,
                                                  electrode_groups,
                                                  electrode_group_metadata):
                if channel_ids:
                    group_chans = [c for c in chans if c['channel_id'] in channel_ids]
                    add_electrodes(nwbfile, channel_ids, eg)
                    lfp_es = pl2_create_electrode_timeseries(nwbfile,
                                                             file_reader,
                                                             file_handle,
                                                             file_info.m_TimestampFrequency,
                                                             group_chans,
                                                             'LFP_voltages_' + eg.name,
                                                             ('LFP electrodes, group ' + eg.name +
                                                              '. Low-pass filtering at 200 Hz done online by Plexon data acquisition system.'))

                    print('Adding LFP processing module with electrical series for channel ids [' +
                          ', '.join(str(x) for x in channel_ids) + '] for electrode group ' +
                          eg.name)
                    # TODO add LFP filter properties, though these are not stored in the PL2
                    # file
                    lfp = LFP(lfp_es, name='LFP_' + egm['name'])
                    ecephys_module.add(lfp)

    spkc_es = None
    if spkc_src_chans:
        for src, chans in spkc_src_chans.items():
            channel_ids = [ch['channel_id'] for ch in chans]
            electrode_group_to_channel_ids = get_channel_ids_by_metadata_list(
                    channel_ids, electrode_group_metadata)
            for channel_ids, eg, egm in zip(electrode_group_to_channel_ids,
                                                  electrode_groups,
                                                  electrode_group_metadata):
                if channel_ids:
                    group_chans = [c for c in chans if c['channel_id'] in channel_ids]
                    add_electrodes(nwbfile, channel_ids, eg)
                    spkc_es = pl2_create_electrode_timeseries(nwbfile,
                                                              file_reader,
                                                              file_handle,
                                                              file_info.m_TimestampFrequency,
                                                              group_chans,
                                                              'High-pass_filtered_voltages_' + egm['name'],
                                                              ('High-pass filtered ("SPKC") electrodes, group ' + egm['name'] +
                                                               '. High-pass filtering at 300 Hz done online by Plexon data acquisition system.'))

                    print('Adding SPKC processing module with electrical series for channel ids [' +
                          ', '.join(str(x) for x in channel_ids) + '] for electrode group ' +
                          egm['name'])
                    # TODO add SPKC filter properties, though these are not stored in the PL2
                    # file
                    filt_ephys = FilteredEphys(spkc_es, name='SPKC_' + egm['name'])
                    ecephys_module.add(filt_ephys)

    if ai_src_chans:
        for src, chans in ai_src_chans.items():
            channel_ids_all = [ch['channel_id'] for ch in chans]
            channel_ids_by_group = get_channel_ids_by_metadata_list(
                    channel_ids_all, non_electrode_ts_metadata)
            for channel_ids, gm in zip(channel_ids_by_group, non_electrode_ts_metadata):
                if channel_ids:
                    # find the mapped pl2 index again
                    pl2_inds = [c['pl2_ind'] for c in chans if c['channel_id'] in channel_ids]
                    ai_es = pl2_create_timeseries(nwbfile,
                                                  file_reader,
                                                  file_handle,
                                                  file_info.m_TimestampFrequency,
                                                  pl2_inds,
                                                  ('Auxiliary_input_' + str(src) +
                                                   '_' + gm['name']),
                                                  ('Auxiliary input, source ' + str(src) +
                                                   ', ' + gm['name']))
                    nwbfile.add_acquisition(ai_es)

    if aif_src_chans:
        for src, chans in aif_src_chans.items():
            channel_ids_all = [ch['channel_id'] for ch in chans]
            channel_ids_by_group = get_channel_ids_by_metadata_list(
                    channel_ids_all, non_electrode_ts_metadata)
            for channel_ids, gm in zip(channel_ids_by_group, non_electrode_ts_metadata):
                if channel_ids:
                    # find the mapped pl2 index again
                    pl2_inds = [c['pl2_ind'] for c in chans if c['channel_id'] in channel_ids]
                    aif_es = pl2_create_timeseries(nwbfile,
                                                  file_reader,
                                                  file_handle,
                                                  file_info.m_TimestampFrequency,
                                                  pl2_inds,
                                                  ('Filtered_auxiliary_input_' + str(src) +
                                                   '_' + gm['name']),
                                                  ('Filtered auxiliary input, source ' + str(src) +
                                                   ', ' + gm['name']))
                    nwbfile.add_acquisition(aif_es)

    #### Spikes ####

    # add these columns to unit table
    nwbfile.add_unit_column('pre_threshold_samples', 'number of samples before threshold')
    nwbfile.add_unit_column('num_samples', 'number of samples for each spike waveform')
    nwbfile.add_unit_column('num_spikes', 'number of spikes')
    nwbfile.add_unit_column('Fs', 'sampling frequency')
    nwbfile.add_unit_column('plx_sort_method', 'sorting method encoded by Plexon')
    nwbfile.add_unit_column('plx_sort_range', 'range of sample indices used in Plexon sorting')
    nwbfile.add_unit_column('plx_sort_threshold', 'voltage threshold used by Plexon sorting')
    nwbfile.add_unit_column('is_unsorted', 'whether this unit is the set of unsorted waveforms')
    nwbfile.add_unit_column('channel_id', 'original recording channel ID')

    # since waveforms are not a 1:1 mapping per unit, use table indexing

    nwbfile.add_unit_column('waveforms', 'waveforms for each spike', index=True)

    # add a unit for each spike channel
    for i in range(file_info.m_TotalNumberOfSpikeChannels):
        pl2_add_units(nwbfile, file_reader, file_handle, i)

    # if spkc_series is not None:
    #     # Plexon does not save the indices of the spike times in the
    #     # high-pass filtered data. So work backwards from the spike times
    #     # first convert spike times to sample indices, accounting for imprecision
    #     spike_inds = spike_ts * schannel_info.m_SamplesPerSecond
    #
    #     # TODO this can be SUPER INEFFICIENT
    #     if not all([math.isclose(x, np.round(x)) for x in spike_inds]):
    #         raise InconsistentInputException()
    #
    #     spike_inds = np.round(spike_inds) # need to account for fragments TODO
    #
    #     ed_module = nwbfile.create_processing_module(name='Plexon online sorted units - all',
    #                                                  description='All units detected online')
    #     print('Adding Event Detection processing module for Electrical Series ' +
    #           f'named {spkc_series.name}')
    #     ed = EventDetection(detection_method="xx", # TODO allow user input
    #                         source_electricalseries=spkc_series,
    #                         source_idx=spike_inds,
    #                         times=spike_ts)
    #     ed_module.add(ed)

    # write NWB file to disk
    out_file = './output/nwb_test.nwb'
    with NWBHDF5IO(out_file, 'w') as io:
        print('Writing to file: ' + out_file)
        io.write(nwbfile)

    # Close the PL2 file
    file_reader.pl2_close_file(file_handle)