Beispiel #1
0
def add_running_speed_to_nwbfile(nwbfile, running_speed, name='speed', unit='cm/s'):
    ''' Adds running speed data to an NWBFile as a timeseries in acquisition

    Parameters
    ----------
    nwbfile : pynwb.NWBFile
        File to which runnign speeds will be written
    running_speed : RunningSpeed
        Contains attributes 'values' and 'timestamps'
    name : str, optional
        used as name of timeseries object
    unit : str, optional
        SI units of running speed values

    Returns
    -------
    nwbfile : pynwb.NWBFile

    '''

    running_speed_series = pynwb.base.TimeSeries(
        name=name,
        data=running_speed.values,
        timestamps=running_speed.timestamps,
        unit=unit
    )

    running_mod = ProcessingModule('running', 'Running speed processing module')
    nwbfile.add_processing_module(running_mod)

    running_mod.add_data_interface(running_speed_series)

    return nwbfile
Beispiel #2
0
def add_image(nwbfile, image_data, image_name, module_name, module_description, image_api=None):

    description = '{} image at pixels/cm resolution'.format(image_name)

    if image_api is None:
        image_api = ImageApi

    if isinstance(image_data, sitk.Image):
        data, spacing, unit = ImageApi.deserialize(image_data)
    elif isinstance(image_data, Image):
        data = image_data.data
        spacing = image_data.spacing
        unit = image_data.unit
    else:
        raise ValueError("Not a supported image_data type: {}".format(type(image_data)))

    assert spacing[0] == spacing[1] and len(spacing) == 2 and unit == 'mm'

    if module_name not in nwbfile.modules:
        ophys_mod = ProcessingModule(module_name, module_description)
        nwbfile.add_processing_module(ophys_mod)
    else:
        ophys_mod = nwbfile.modules[module_name]

    image = GrayscaleImage(image_name, data, resolution=spacing[0] / 10, description=description)

    if 'images' not in ophys_mod.containers:
        images = Images(name='images')
        ophys_mod.add_data_interface(images)
    else:
        images = ophys_mod['images']
    images.add_image(image)

    return nwbfile
Beispiel #3
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:

        # If there is no rewards data, do not
        # write anything to the NWB file (this
        # is expected for passive sessions)
        if len(self.value['timestamps']) == 0:
            return nwbfile

        reward_volume_ts = TimeSeries(
            name='volume',
            data=self.value['volume'].values,
            timestamps=self.value['timestamps'].values,
            unit='mL'
        )

        autorewarded_ts = TimeSeries(
            name='autorewarded',
            data=self.value['autorewarded'].values,
            timestamps=reward_volume_ts.timestamps,
            unit='mL'
        )

        rewards_mod = ProcessingModule('rewards',
                                       'Licking behavior processing module')
        rewards_mod.add_data_interface(reward_volume_ts)
        rewards_mod.add_data_interface(autorewarded_ts)
        nwbfile.add_processing_module(rewards_mod)

        return nwbfile
Beispiel #4
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:
        running_speed: pd.DataFrame = self.value
        data = running_speed['speed'].values
        timestamps = running_speed['timestamps'].values

        if self._filtered:
            data_interface_name = "speed"
        else:
            data_interface_name = "speed_unfiltered"

        running_speed_series = TimeSeries(name=data_interface_name,
                                          data=data,
                                          timestamps=timestamps,
                                          unit='cm/s')

        if 'running' in nwbfile.processing:
            running_mod = nwbfile.processing['running']
        else:
            running_mod = ProcessingModule('running',
                                           'Running speed processing module')
            nwbfile.add_processing_module(running_mod)

        running_mod.add_data_interface(running_speed_series)

        return nwbfile
Beispiel #5
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:
        stimulus_ts = TimeSeries(data=self._value,
                                 name="timestamps",
                                 timestamps=self._value,
                                 unit="s")

        stim_mod = ProcessingModule("stimulus", "Stimulus Times processing")
        stim_mod.add_data_interface(stimulus_ts)
        nwbfile.add_processing_module(stim_mod)

        return nwbfile
Beispiel #6
0
def append_spike_times(input_nwb_path: PathLike,
                       sweep_spike_times: Dict[int, List[float]],
                       output_nwb_path: Optional[PathLike] = None):
    """
        Appends spiketimes to an nwb2 file

        Paramters
        ---------

        input_nwb_path: location of input nwb file without spiketimes

        spike_times: Dict of sweep_num: spiketimes

        output_nwb_path: optional location to write new nwb file with
                         spiketimes, otherwise appends spiketimes to
                         input file

    """

    # Copy to new location
    if output_nwb_path and output_nwb_path != input_nwb_path:
        shutil.copy(input_nwb_path, output_nwb_path)
        nwb_path = output_nwb_path
    else:
        nwb_path = input_nwb_path

    nwb_io = pynwb.NWBHDF5IO(nwb_path, mode='a', load_namespaces=True)
    nwbfile = nwb_io.read()

    spikes_module = "spikes"
    # Add spikes only if not previously added
    if spikes_module not in nwbfile.processing.keys():
        spike_module = ProcessingModule(name=spikes_module,
                                        description='detected spikes')
        for sweep_num, spike_times in sweep_spike_times.items():
            wrapped_spike_times = H5DataIO(data=np.asarray(spike_times),
                                           compression=True)
            ts = TimeSeries(timestamps=wrapped_spike_times,
                            unit='seconds',
                            data=wrapped_spike_times,
                            name=f"Sweep_{sweep_num}")
            spike_module.add_data_interface(ts)

        nwbfile.add_processing_module(spike_module)

        nwb_io.write(nwbfile)
    else:
        raise ValueError("Cannot add spikes times to the nwb file: "
                         "spikes times already exist!")

    nwb_io.close()
Beispiel #7
0
    def add_spike_times(self, sweep_spike_times):

        spike_module = ProcessingModule(name='spikes',
                                        description='detected spikes')

        for sweep_num, spike_times in sweep_spike_times.items():
            ts = TimeSeries(timestamps=spike_times, name=f"Sweep_{sweep_num}")
            spike_module.add_data_interface(ts)

        self.nwbfile.add_processing_module(spike_module)

        io = pynwb.NWBHDF5IO(self.nwb_file_name, 'w')
        io.write(self.nwbfile)
        io.close()
Beispiel #8
0
def add_stimulus_timestamps(nwbfile,
                            stimulus_timestamps,
                            module_name='stimulus'):
    stimulus_ts = TimeSeries(data=stimulus_timestamps,
                             name='timestamps',
                             timestamps=stimulus_timestamps,
                             unit='s')

    stim_mod = ProcessingModule(module_name, 'Stimulus Times processing')

    nwbfile.add_processing_module(stim_mod)
    stim_mod.add_data_interface(stimulus_ts)

    return nwbfile
Beispiel #9
0
def add_running_speed_to_nwbfile(nwbfile,
                                 running_speed,
                                 name='speed',
                                 unit='cm/s',
                                 from_dataframe=False):
    ''' Adds running speed data to an NWBFile as a timeseries in acquisition

    Parameters
    ----------
    nwbfile : pynwb.NWBFile
        File to which running speeds will be written
    running_speed : Union[RunningSpeed, pd.DataFrame]
        Either a RunningSpeed object or pandas DataFrame.
        Contains attributes 'values' and 'timestamps'
    name : str, optional
        Used as name of timeseries object
    unit : str, optional
        SI units of running speed values
    from_dataframe : bool, optional
        Whether `running_speed` is a dataframe or not. Default is False.

    Returns
    -------
    nwbfile : pynwb.NWBFile

    '''

    if from_dataframe:
        data = running_speed['speed'].values
        timestamps = running_speed['timestamps'].values
    else:
        data = running_speed.values
        timestamps = running_speed.timestamps

    running_speed_series = pynwb.base.TimeSeries(name=name,
                                                 data=data,
                                                 timestamps=timestamps,
                                                 unit=unit)

    if 'running' in nwbfile.processing:
        running_mod = nwbfile.processing['running']
    else:
        running_mod = ProcessingModule('running',
                                       'Running speed processing module')
        nwbfile.add_processing_module(running_mod)

    running_mod.add_data_interface(running_speed_series)

    return nwbfile
Beispiel #10
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:
        lick_timeseries = TimeSeries(
            name='licks',
            data=self.value['frame'].values,
            timestamps=self.value['timestamps'].values,
            description=('Timestamps and stimulus presentation '
                         'frame indices for lick events'),
            unit='N/A')

        # Add lick interface to nwb file, by way of a processing module:
        licks_mod = ProcessingModule('licking',
                                     'Licking behavior processing module')
        licks_mod.add_data_interface(lick_timeseries)
        nwbfile.add_processing_module(licks_mod)

        return nwbfile
Beispiel #11
0
def add_licks(nwbfile, licks):

    licks_event_series = TimeSeries(data=licks.time.values,
                                    name='timestamps',
                                    timestamps=licks.time.values,
                                    unit='s')

    # Add lick event timeseries to lick interface:
    licks_interface = BehavioralEvents([licks_event_series], 'licks')

    # Add lick interface to nwb file, by way of a processing module:
    licks_mod = ProcessingModule('licking',
                                 'Licking behavior processing module')
    licks_mod.add_data_interface(licks_interface)
    nwbfile.add_processing_module(licks_mod)

    return nwbfile
Beispiel #12
0
def add_licks(nwbfile, licks):

    lick_timeseries = TimeSeries(
        name='licks',
        data=licks.frame.values,
        timestamps=licks.timestamps.values,
        description=('Timestamps and stimulus presentation '
                     'frame indices for lick events'),
        unit='N/A')

    # Add lick interface to nwb file, by way of a processing module:
    licks_mod = ProcessingModule('licking',
                                 'Licking behavior processing module')
    licks_mod.add_data_interface(lick_timeseries)
    nwbfile.add_processing_module(licks_mod)

    return nwbfile
Beispiel #13
0
def add_rewards(nwbfile, rewards_df):
    reward_volume_ts = TimeSeries(name='volume',
                                  data=rewards_df.volume.values,
                                  timestamps=rewards_df['timestamps'].values,
                                  unit='mL')

    autorewarded_ts = TimeSeries(name='autorewarded',
                                 data=rewards_df.autorewarded.values,
                                 timestamps=reward_volume_ts.timestamps,
                                 unit='mL')

    rewards_mod = ProcessingModule('rewards',
                                   'Licking behavior processing module')
    rewards_mod.add_data_interface(reward_volume_ts)
    rewards_mod.add_data_interface(autorewarded_ts)
    nwbfile.add_processing_module(rewards_mod)

    return nwbfile
Beispiel #14
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:
        reward_volume_ts = TimeSeries(
            name='volume',
            data=self.value['volume'].values,
            timestamps=self.value['timestamps'].values,
            unit='mL')

        autorewarded_ts = TimeSeries(name='autorewarded',
                                     data=self.value['autorewarded'].values,
                                     timestamps=reward_volume_ts.timestamps,
                                     unit='mL')

        rewards_mod = ProcessingModule('rewards',
                                       'Licking behavior processing module')
        rewards_mod.add_data_interface(reward_volume_ts)
        rewards_mod.add_data_interface(autorewarded_ts)
        nwbfile.add_processing_module(rewards_mod)

        return nwbfile
Beispiel #15
0
def add_running_acquisition_to_nwbfile(nwbfile,
                                       running_acquisition_df: pd.DataFrame):

    running_dx_series = TimeSeries(
        name='dx',
        data=running_acquisition_df['dx'].values,
        timestamps=running_acquisition_df.index.values,
        unit='cm',
        description=(
            'Running wheel angular change, computed during data collection'))

    v_sig = TimeSeries(
        name='v_sig',
        data=running_acquisition_df['v_sig'].values,
        timestamps=running_acquisition_df.index.values,
        unit='V',
        description='Voltage signal from the running wheel encoder')

    v_in = TimeSeries(
        name='v_in',
        data=running_acquisition_df['v_in'].values,
        timestamps=running_acquisition_df.index.values,
        unit='V',
        description=(
            'The theoretical maximum voltage that the running wheel encoder '
            'will reach prior to "wrapping". This should '
            'theoretically be 5V (after crossing 5V goes to 0V, or '
            'vice versa). In practice the encoder does not always '
            'reach this value before wrapping, which can cause '
            'transient spikes in speed at the voltage "wraps".'))

    if 'running' in nwbfile.processing:
        running_mod = nwbfile.processing['running']
    else:
        running_mod = ProcessingModule('running',
                                       'Running speed processing module')
        nwbfile.add_processing_module(running_mod)

    running_mod.add_data_interface(running_dx_series)
    nwbfile.add_acquisition(v_sig)
    nwbfile.add_acquisition(v_in)

    return nwbfile
Beispiel #16
0
def add_image_to_nwb(nwbfile: NWBFile, image_data: Image, image_name: str):
    """
    Adds image given by image_data with name image_name to nwbfile

    Parameters
    ----------
    nwbfile
        nwbfile to add image to
    image_data
        The image data
    image_name
        Image name

    Returns
    -------
    None
    """
    module_name = 'ophys'
    description = '{} image at pixels/cm resolution'.format(image_name)

    data, spacing, unit = image_data

    assert spacing[0] == spacing[1] and len(spacing) == 2 and unit == 'mm'

    if module_name not in nwbfile.processing:
        ophys_mod = ProcessingModule(module_name, 'Ophys processing module')
        nwbfile.add_processing_module(ophys_mod)
    else:
        ophys_mod = nwbfile.processing[module_name]

    image = GrayscaleImage(image_name,
                           data,
                           resolution=spacing[0] / 10,
                           description=description)

    if 'images' not in ophys_mod.containers:
        images = Images(name='images')
        ophys_mod.add_data_interface(images)
    else:
        images = ophys_mod['images']
    images.add_image(image)
Beispiel #17
0
def add_motion_correction(nwbfile, motion_correction):

    twop_module = nwbfile.modules['two_photon_imaging']
    ophys_timestamps = twop_module.get_data_interface(
        'dff').roi_response_series['traces'].timestamps

    t1 = TimeSeries(name='x',
                    data=motion_correction['x'].values,
                    timestamps=ophys_timestamps,
                    unit='pixels')

    t2 = TimeSeries(name='y',
                    data=motion_correction['y'].values,
                    timestamps=ophys_timestamps,
                    unit='pixels')

    motion_module = ProcessingModule('motion_correction',
                                     'Motion Correction processing module')
    motion_module.add_data_interface(t1)
    motion_module.add_data_interface(t2)
    nwbfile.add_processing_module(motion_module)
Beispiel #18
0
    def to_nwb(self, nwbfile: NWBFile) -> NWBFile:

        # If there is no lick data, do not write
        # anything to the NWB file (this is
        # expected for passive sessions)
        if len(self.value['frame']) == 0:
            return nwbfile

        lick_timeseries = TimeSeries(
            name='licks',
            data=self.value['frame'].values,
            timestamps=self.value['timestamps'].values,
            description=('Timestamps and stimulus presentation '
                         'frame indices for lick events'),
            unit='N/A')

        # Add lick interface to nwb file, by way of a processing module:
        licks_mod = ProcessingModule('licking',
                                     'Licking behavior processing module')
        licks_mod.add_data_interface(lick_timeseries)
        nwbfile.add_processing_module(licks_mod)

        return nwbfile
Beispiel #19
0
    def test_read_nwb_nwb_image_series_successfully(self):
        device_1 = Device('device1')
        device_2 = Device('device2')
        mock_timestamps = [1, 2, 3]
        mock_external_file = ['some file']

        nwb_image_series = NwbImageSeries(name='NwbImageSeries1',
                                          timestamps=mock_timestamps,
                                          external_file=mock_external_file,
                                          devices=[device_1, device_2])

        behavioral_time_series = BehavioralEvents(name="BehavioralTimeSeries")
        behavioral_time_series.add_timeseries(nwb_image_series)
        processing_module = ProcessingModule(name='ProcessingModule',
                                             description='')
        processing_module.add_data_interface(behavioral_time_series)
        self.nwb_file_content.add_processing_module(processing_module)

        self.nwb_file_content.add_stimulus_template(nwb_image_series)

        nwb_file_handler = NWBHDF5IO('nwb_image_series.nwb', mode='w')
        nwb_file_handler.write(self.nwb_file_content)
        nwb_file_handler.close()

        self.assertTrue(os.path.exists('nwb_image_series.nwb'))
        with pynwb.NWBHDF5IO('nwb_image_series.nwb', 'r',
                             load_namespaces=True) as nwb_file_handler:
            nwb_file = nwb_file_handler.read()
            self.assertContainerEqual(
                nwb_file.stimulus_template['NwbImageSeries1'],
                nwb_image_series)
            self.assertContainerEqual(
                nwb_file.processing['ProcessingModule'].data_interfaces[
                    'BehavioralTimeSeries'].time_series['NwbImageSeries1'],
                nwb_image_series)

        self.delete_nwb('nwb_image_series')
Beispiel #20
0
nwbfile.add_processing_module(added_mod)

####################
# You can add data to your processing module using the method
# :py:func:`~pynwb.base.ProcessingModule.add_data_interface`.
# Lets make another :py:class:`~pynwb.base.TimeSeries` and then add it to the
# :py:class:`~pynwb.base.ProcessingModule` we just added.

data = list(range(0, 100, 10))
timestamps = list(range(10))
mod_ts = TimeSeries('ts_for_mod',
                    'PyNWB tutorial',
                    data,
                    'SIunit',
                    timestamps=timestamps)
added_mod.add_data_interface(mod_ts)

####################
# .. _basic_epochs:
#
# Epochs
# ------
#
# Epochs can be added to an NWB file using the method :py:func:`~pynwb.file.NWBFile.create_epoch`.
# The first argument is a description of the epoch, the second and third argument are the start time
# and stop time, respectively. The fourth argument is one or more tags for labelling the epoch,
# and the fifth argument is a list of all the :py:class:`~pynwb.base.TimeSeries` that the epoch applies
# to.

nwbfile.create_epoch('the first epoch', 2.0, 4.0, ['first', 'example'],
                     [test_ts, mod_ts])
Beispiel #21
0
def no2nwb(NOData, session_use, subjects):

    # Prepare the NO data that will be coverted to the NWB format

    session = NOData.sessions[session_use]
    events = NOData._get_event_data(session_use, experiment_type='All')
    cell_ids = NOData.ls_cells(session_use)
    experiment_id_learn = session['experiment_id_learn']
    experiment_id_recog = session['experiment_id_recog']
    task_descr = session['task_descr']

    # Get the metadata for the subject
    df_session = subjects[subjects['session_id'] == session_use]

    print('session_use')
    print(session_use)
    print('age')
    print(str(df_session['age'].values[0]))
    print('epilepsy_diagnosis')
    print(str(df_session['epilepsy_diagnosis'].values[0]))

    nwb_subject = Subject(
        age=str(df_session['age'].values[0]),
        description=df_session['epilepsy_diagnosis'].values[0],
        sex=df_session['sex'].values[0],
        subject_id=df_session['subject_id'].values[0])

    # Create the NWB file
    nwbfile = NWBFile(
        #source='https://datadryad.org/bitstream/handle/10255/dryad.163179/RecogMemory_MTL_release_v2.zip',
        session_description='RecogMemory dataset session use 5' + session['session'],
        identifier=session['session_id'],
        session_start_time=datetime.datetime.now(),# TODO: need to check out the time for session start
        file_create_date=datetime.datetime.now(),
        experiment_description="learning: " + str(experiment_id_learn) + ", " + \
                               "recognition: " + \
                               str(experiment_id_recog),
        subject=nwb_subject
    )

    # Add event and experiment_id acquisition
    # event_ts = TimeSeries(name='events', source='NA', unit='NA', data=np.asarray(events[1].values),
    #                       timestamps=np.asarray(events[0].values))

    event_ts = TimeSeries(name='events',
                          unit='NA',
                          data=np.asarray(events[1].values),
                          timestamps=np.asarray(events[0].values))
    # experiment_ids = TimeSeries(name='experiment_ids', source='NA', unit='NA', data=np.asarray(events[2]),
    #                             timestamps=np.asarray(events[0].values))
    experiment_ids = TimeSeries(name='experiment_ids',
                                unit='NA',
                                data=np.asarray(events[2]),
                                timestamps=np.asarray(events[0].values))
    nwbfile.add_acquisition(event_ts)
    nwbfile.add_acquisition(experiment_ids)

    # Add stimuli to the NWB file2
    # Get the first cell from the cell list
    cell = NOData.pop_cell(session_use, NOData.ls_cells(session_use)[0])
    trials = cell.trials
    stimuli_recog_path = [trial.file_path_recog for trial in trials]
    stimuli_learn_path = [trial.file_path_learn for trial in trials]

    # Add stimuli recog
    counter = 1
    for path in stimuli_recog_path:
        folders = path.split('\\')
        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)
        name = 'stimuli_recog_' + str(counter)
        stimulus_recog = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_recog)
        counter += 1

    # Add stimuli learn
    counter = 1
    for path in stimuli_learn_path:
        if path == 'NA':
            continue
        folders = path.split('\\')

        path = os.path.join('./RecogMemory_MTL_release_v2', 'Stimuli',
                            folders[0], folders[1], folders[2])
        img = cv2.imread(path)

        name = 'stimuli_learn_' + str(counter)

        stimulus_learn = ImageSeries(name=name,
                                     data=img,
                                     unit='NA',
                                     format='',
                                     timestamps=[0.0])

        nwbfile.add_stimulus(stimulus_learn)

        counter += 1

    # Add epochs and trials: storing start and end times for a stimulus

    # First extract the category ids and names that we need
    # The metadata for each trials will be store in a trial table

    cat_id_recog = [trial.category_recog for trial in trials]
    cat_name_recog = [trial.category_name_recog for trial in trials]
    cat_id_learn = [trial.category_learn for trial in trials]
    cat_name_learn = [trial.category_name_learn for trial in trials]

    # Extract the event timestamps
    events_learn_stim_on = events[(events[2] == experiment_id_learn) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_learn_stim_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_learn_delay1_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay1_off'])]
    events_learn_delay2_off = events[(events[2] == experiment_id_learn) & (
        events[1] == NOData.markers['delay2_off'])]

    events_recog_stim_on = events[(events[2] == experiment_id_recog) &
                                  (events[1] == NOData.markers['stimulus_on'])]
    events_recog_stim_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['stimulus_off'])]
    events_recog_delay1_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay1_off'])]
    events_recog_delay2_off = events[(events[2] == experiment_id_recog) & (
        events[1] == NOData.markers['delay2_off'])]

    # Extract new_old label
    new_old_recog = [trial.new_old_recog for trial in trials]

    # Create the trial tables
    nwbfile.add_trial_column('stim_on', 'the time when the stimulus is shown')
    nwbfile.add_trial_column('stim_off', 'the time when the stimulus is off')
    nwbfile.add_trial_column('delay1_off', 'the time when delay1 is off')
    nwbfile.add_trial_column('delay2_off', 'the time when delay2 is off')
    nwbfile.add_trial_column('stim_phase',
                             'learning/recognition phase during the trial')
    nwbfile.add_trial_column('category_id', 'the category id of the stimulus')
    nwbfile.add_trial_column('category_name',
                             'the category name of the stimulus')
    nwbfile.add_trial_column('external_image_file',
                             'the file path to the stimulus')
    nwbfile.add_trial_column('new_old_labels_recog',
                             'labels for new or old stimulus')

    range_recog = np.amin([
        len(events_recog_stim_on),
        len(events_recog_stim_off),
        len(events_recog_delay1_off),
        len(events_recog_delay2_off)
    ])
    range_learn = np.amin([
        len(events_learn_stim_on),
        len(events_learn_stim_off),
        len(events_learn_delay1_off),
        len(events_learn_delay2_off)
    ])

    # Iterate the event list and add information into each epoch and trial table
    for i in range(range_learn):
        # nwbfile.create_epoch(start_time=events_learn_stim_on.iloc[i][0],
        #                      stop_time=events_learn_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_learn',
        #                      description='learning phase stimulus')

        # nwbfile.add_trial({'start': events_learn_stim_on.iloc[i][0],
        #                    'end': events_learn_delay2_off.iloc[i][0],
        #                    'stim_on': events_learn_stim_on.iloc[i][0],
        #                    'stim_off': events_learn_stim_off.iloc[i][0],
        #                    'delay1_off': events_learn_delay1_off.iloc[i][0],
        #                    'delay2_off': events_learn_delay2_off.iloc[i][0],
        #                    'stim_phase': 'learn',
        #                    'category_id': cat_id_learn[i],
        #                    'category_name': cat_name_learn[i],
        #                    'external_image_file': stimuli_learn_path[i],
        #                    'new_old_labels_recog': -1})

        nwbfile.add_trial(start_time=events_learn_stim_on.iloc[i][0],
                          stop_time=events_learn_delay2_off.iloc[i][0],
                          stim_on=events_learn_stim_on.iloc[i][0],
                          stim_off=events_learn_stim_off.iloc[i][0],
                          delay1_off=events_learn_delay1_off.iloc[i][0],
                          delay2_off=events_learn_delay2_off.iloc[i][0],
                          stim_phase='learn',
                          category_id=cat_id_learn[i],
                          category_name=cat_name_learn[i],
                          external_image_file=stimuli_learn_path[i],
                          new_old_labels_recog='NA')

    for i in range(range_recog):
        # nwbfile.create_epoch(start_time=events_recog_stim_on.iloc[i][0],
        #                      stop_time=events_recog_stim_off.iloc[i][0],
        #                      timeseries=[event_ts, experiment_ids],
        #                      tags='stimulus_recog',
        #                      description='recognition phase stimulus')

        nwbfile.add_trial(start_time=events_recog_stim_on.iloc[i][0],
                          stop_time=events_recog_delay2_off.iloc[i][0],
                          stim_on=events_recog_stim_on.iloc[i][0],
                          stim_off=events_recog_stim_off.iloc[i][0],
                          delay1_off=events_recog_delay1_off.iloc[i][0],
                          delay2_off=events_recog_delay2_off.iloc[i][0],
                          stim_phase='recog',
                          category_id=cat_id_recog[i],
                          category_name=cat_name_recog[i],
                          external_image_file=stimuli_recog_path[i],
                          new_old_labels_recog=new_old_recog[i])

    # Add the waveform clustering and the spike data.
    # Create necessary processing modules for different kinds of waveform data
    clustering_processing_module = ProcessingModule(
        'Spikes', 'The spike data contained')
    clusterWaveform_learn_processing_module = ProcessingModule(
        'MeanWaveforms_learn',
        'The mean waveforms for the clustered raw signal for learning phase')
    clusterWaveform_recog_processing_module = ProcessingModule(
        'MeanWaveforms_recog',
        'The mean waveforms for the clustered raw signal for recognition phase'
    )
    IsolDist_processing_module = ProcessingModule('IsoDist', 'The IsolDist')
    SNR_processing_module = ProcessingModule('SNR', 'SNR (signal-to-noise)')
    # Get the unique channel id that we will be iterate over
    channel_ids = np.unique([cell_id[0] for cell_id in cell_ids])

    # Interate the channel list
    for channel_id in channel_ids:
        cell_name = 'A' + str(channel_id) + '_cells.mat'
        file_path = os.path.join('RecogMemory_MTL_release_v2', 'Data',
                                 'sorted', session['session'], task_descr,
                                 cell_name)
        try:
            cell_mat = loadmat(file_path)
        except FileNotFoundError:
            print("File not found")
            continue
        spikes = cell_mat['spikes']
        meanWaveform_recog = cell_mat['meanWaveform_recog']
        meanWaveform_learn = cell_mat['meanWaveform_learn']
        IsolDist_SNR = cell_mat['IsolDist_SNR']

        spike_id = np.asarray([spike[0] for spike in spikes])
        spike_cluster_id = np.asarray([spike[1] for spike in spikes])
        spike_timestamps = np.asarray([spike[2] / 1000000 for spike in spikes])
        clustering = Clustering(description='Spikes of the channel detected',
                                num=spike_id,
                                peak_over_rms=np.asarray([0]),
                                times=spike_timestamps,
                                name='channel' + str(channel_id))
        clustering_processing_module.add_data_interface(clustering)

        for i in range(len(meanWaveform_learn[0][0][0][0])):
            waveform_mean_learn = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_learn[0][0][1][i]]),
                name='waveform_learn_cluster_id_' +
                str(meanWaveform_learn[0][0][0][0][i]))
            try:
                clusterWaveform_learn_processing_module.add_data_interface(
                    waveform_mean_learn)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding mean waveform recognition into the processing module
        for i in range(len(meanWaveform_recog[0][0][0][0])):
            waveform_mean_recog = ClusterWaveforms(
                clustering_interface=clustering,
                waveform_filtering='NA',
                waveform_sd=np.asarray([[0]]),
                waveform_mean=np.asarray([meanWaveform_recog[0][0][1][i]]),
                name='waveform_recog_cluster_id_' +
                str(meanWaveform_recog[0][0][0][0][i]))
            try:
                clusterWaveform_recog_processing_module.add_data_interface(
                    waveform_mean_recog)
            except ValueError as e:
                print(
                    'Catch an error in adding waveform interface to the recog processing module:'
                    + str(e))
                continue

        # Adding IsolDist_SNR data into the processing module
        # Here I use feature extraction to store the IsolDist_SNR data because
        # they are extracted from the original signals.
        # print(IsolDist_SNR[0][0][0])
        for i in range(len(IsolDist_SNR[0][0][1][0])):
            isoldist_data_interface = TimeSeries(
                data=[IsolDist_SNR[0][0][1][0][i]],
                unit='NA',
                timestamps=[0],
                name='IsolDist_' + str(IsolDist_SNR[0][0][0][0][i]))
            try:
                IsolDist_processing_module.add_data_interface(
                    isoldist_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding IsolDist to the processing module:'
                    + str(e))
                continue

            SNR_data_interface = TimeSeries(unit='NA',
                                            description='The SNR data',
                                            data=[IsolDist_SNR[0][0][2][0][i]],
                                            timestamps=[0],
                                            name='SNR_' +
                                            str(IsolDist_SNR[0][0][0][0][i]))

            try:
                SNR_processing_module.add_data_interface(SNR_data_interface)
            except ValueError as e:
                print(
                    'Catch an error in adding SNR to the processing module:' +
                    str(e))
                continue

    nwbfile.add_processing_module(clustering_processing_module)
    nwbfile.add_processing_module(clusterWaveform_learn_processing_module)
    nwbfile.add_processing_module(clusterWaveform_recog_processing_module)
    nwbfile.add_processing_module(IsolDist_processing_module)
    nwbfile.add_processing_module(SNR_processing_module)

    return nwbfile
Beispiel #22
0
def high_gamma_estimation(block_path, bands_vals, new_file=''):
    """
    Takes preprocessed LFP data and calculates High-Gamma power from the
    averaged power of standard Hilbert transform on 70~150 Hz bands.

    Parameters
    ----------
    block_path : str
        subject file path
    bands_vals : [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]
    new_file : str
        if this argument is of form 'path/to/new_file.nwb', High Gamma power
        will be saved in a new file. If it is an empty string, '', High Gamma
        power will be saved in the current NWB file.

    Returns
    -------
    Saves High Gamma power (TimeSeries) in the current or new NWB file.
    Only if container for this data do not exist in the file.
    """

    # Get filter parameters
    band_param_0 = bands_vals[0, :]
    band_param_1 = bands_vals[1, :]

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()
        lfp = nwb.processing['ecephys'].data_interfaces[
            'LFP'].electrical_series['preprocessed']
        rate = lfp.rate

        nBands = len(band_param_0)
        nSamples = lfp.data.shape[0]
        nChannels = lfp.data.shape[1]
        Xp = np.zeros(
            (nBands, nChannels, nSamples))  #power (nBands,nChannels,nSamples)

        # Apply Hilbert transform ----------------------------------------------
        print('Running High Gamma estimation...')
        start = time.time()
        for ch in np.arange(nChannels):
            Xch = lfp.data[:,
                           ch] * 1e6  # 1e6 scaling helps with numerical accuracy
            Xch = Xch.reshape(1, -1)
            Xch = Xch.astype('float32')  # signal (nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                kernel = gaussian(Xch, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(Xch,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii, ch, :] = abs(X_analytic).astype('float32')
        print(
            'High Gamma estimation finished in {} seconds'.format(time.time() -
                                                                  start))

        # data: (ndarray) dims: num_times * num_channels * num_bands
        Xp = np.swapaxes(Xp, 0, 2)
        HG = np.mean(Xp, 2)  #average of high gamma bands

        # Storage of High Gamma on NWB file -----------------------------
        if new_file == '':  #on current file
            #make electrodes table
            nElecs = HG.shape[1]
            elecs_region = nwb.electrodes.create_region(
                name='electrodes',
                region=np.arange(nElecs).tolist(),
                description='all electrodes')
            hg = ElectricalSeries(name='high_gamma',
                                  data=HG,
                                  electrodes=elecs_region,
                                  rate=rate,
                                  description='')

            ecephys_module = nwb.processing['ecephys']
            ecephys_module.add_data_interface(hg)
            io.write(nwb)
            print('High Gamma power saved in ' + block_path)
        else:  #on new file
            with NWBHDF5IO(new_file, 'r+', load_namespaces=True) as io_new:
                nwb_new = io_new.read()
                #make electrodes table
                nElecs = HG.shape[1]
                elecs_region = nwb_new.electrodes.create_region(
                    name='electrodes',
                    region=np.arange(nElecs).tolist(),
                    description='all electrodes')
                hg = ElectricalSeries(name='high_gamma',
                                      data=HG,
                                      electrodes=elecs_region,
                                      rate=rate,
                                      description='')

                try:  # if ecephys module already exists
                    ecephys_module = nwb_new.processing['ecephys']
                except:  # creates ecephys ProcessingModule
                    ecephys_module = ProcessingModule(
                        name='ecephys',
                        description='Extracellular electrophysiology data.')
                    nwb_new.add_processing_module(ecephys_module)

                ecephys_module.add_data_interface(hg)
                io_new.write(nwb_new)
                print('High Gamma power saved in ' + new_file)
Beispiel #23
0
def preprocess_raw_data(block_path, config):
    """
    Takes raw data and runs:
    1) CAR
    2) notch filters
    3) Downsampling

    Parameters
    ----------
    block_path : str
        subject file path
    config : dictionary
        'CAR' - Number of channels to use in CAR (default=16)
        'Notch' - Main frequency (Hz) for notch filters (default=60)
        'Downsample' - Downsampling frequency (Hz, default= 400)

    Returns
    -------
    Saves preprocessed signals (LFP) in the current NWB file.
    Only if containers for these data do not exist in the current file.
    """
    subj_path, block_name = os.path.split(block_path)
    block_name = os.path.splitext(block_path)[0]
    start = time.time()

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()

        # Storage of processed signals on NWB file -----------------------------
        try:  # if ecephys module already exists
            ecephys_module = nwb.processing['ecephys']
        except:  # creates ecephys ProcessingModule
            ecephys_module = ProcessingModule(
                name='ecephys',
                description='Extracellular electrophysiology data.')
            # Add module to NWB file
            nwb.add_processing_module(ecephys_module)
            print('Created ecephys')

        # LFP: Downsampled and power line signal removed -----------------------
        if 'LFP' in nwb.processing[
                'ecephys'].data_interfaces:  # if LFP already exists
            lfp = nwb.processing['ecephys'].data_interfaces['LFP']
            lfp_ts = nwb.processing['ecephys'].data_interfaces[
                'LFP'].electrical_series['preprocessed']
        else:  # creates LFP data interface container
            lfp = LFP()

            # Data source
            lis = list(nwb.acquisition.keys())
            for i in lis:  # Check if there is ElectricalSeries in acquisition group
                if type(nwb.acquisition[i]).__name__ == 'ElectricalSeries':
                    source = nwb.acquisition[i]
            nChannels = source.data.shape[1]

            # Downsampling
            extraBins0 = 0
            fs = source.rate
            if config['Downsample'] is not None:
                print("Downsampling signals to " + str(config['Downsample']) +
                      " Hz.")
                print("Please wait, this might take around 30 minutes.")
                start = time.time()
                #zeros to pad to make signal lenght a power of 2
                nBins = source.data.shape[0]
                extraBins0 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins0)
                rate = config['Downsample']
                #One channel at a time, to improve memory usage for long signals
                for ch in np.arange(nChannels):
                    #1e6 scaling helps with numerical accuracy
                    Xch = source.data[:, ch] * 1e6
                    #Make lenght a power of 2, improves performance
                    Xch = np.append(Xch, extraZeros)
                    Xch = resample(Xch, rate, fs)
                    if ch == 0:
                        X = Xch.reshape(1, -1)
                    else:
                        X = np.append(X, Xch.reshape(1, -1), axis=0)
                print(
                    'Downsampling finished in {} seconds'.format(time.time() -
                                                                 start))
            else:  # No downsample
                rate = fs
                X = source.data[:, :].T * 1e6

            # Subtract CAR
            if config['CAR'] is not None:
                print(
                    "Computing and subtracting Common Average Reference in " +
                    str(config['CAR']) + " channel blocks.")
                start = time.time()
                X = subtract_CAR(X, b_size=config['CAR'])
                print('CAR subtract time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

            # Apply Notch filters
            if config['Notch'] is not None:
                print("Applying Notch filtering of " + str(config['Notch']) +
                      " Hz")
                #zeros to pad to make signal lenght a power of 2
                nBins = X.shape[1]
                extraBins1 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins1)
                start = time.time()
                for ch in np.arange(nChannels):
                    Xch = np.append(X[ch, :], extraZeros).reshape(1, -1)
                    Xch = linenoise_notch(Xch,
                                          rate,
                                          notch_freq=config['Notch'])
                    if ch == 0:
                        X2 = Xch.reshape(1, -1)
                    else:
                        X2 = np.append(X2, Xch.reshape(1, -1), axis=0)
                print('Notch filter time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

                X = np.copy(X2)
                del X2

            #Remove excess bins (because of zero padding on previous steps)
            excessBins = int(np.ceil(extraBins0 * rate / fs) + extraBins1)
            X = X[:, 0:-excessBins]
            X = X.astype('float32')  # signal (nChannels,nSamples)
            X /= 1e6  # Scales signals back to Volt

            # Add preprocessed downsampled signals as an electrical_series
            if config['CAR'] is None:
                car = 'None'
            else:
                car = str(config['CAR'])
            if config['Notch'] is None:
                notch = 'None'
            else:
                notch = str(config['Notch'])
            if config['Downsample'] is None:
                downs = 'No'
            else:
                downs = 'Yes'
            config_comment = 'CAR:' + car + ', Notch:' + notch + ', Downsampled:' + downs
            lfp_ts = lfp.create_electrical_series(name='preprocessed',
                                                  data=X.T,
                                                  electrodes=source.electrodes,
                                                  rate=rate,
                                                  description='',
                                                  comments=config_comment)
            ecephys_module.add_data_interface(lfp)

            # Write LFP to NWB file
            io.write(nwb)
            print('LFP saved in ' + block_path)
Beispiel #24
0
def add_cell_specimen_table(nwbfile: NWBFile,
                            cell_specimen_table: pd.DataFrame):
    """
    This function takes the cell specimen table and writes the ROIs
    contained within. It writes these to a new NWB imaging plane
    based off the previously supplied metadata
    Parameters
    ----------
    nwbfile: NWBFile
        this is the in memory NWBFile currently being written to which ROI data
        is added
    cell_specimen_table: pd.DataFrame
        this is the DataFrame containing the cells segmented from a ophys
        experiment, stored in json file and loaded.
        example: /home/nicholasc/projects/allensdk/allensdk/test/
                 brain_observatory/behavior/cell_specimen_table_789359614.json

    Returns
    -------
    nwbfile: NWBFile
        The altered in memory NWBFile object that now has a specimen table
    """
    cell_roi_table = cell_specimen_table.reset_index().set_index('cell_roi_id')

    # Device:
    device_name = nwbfile.lab_meta_data['metadata'].rig_name
    nwbfile.create_device(device_name,
                          "Allen Brain Observatory")
    device = nwbfile.get_device(device_name)

    # Location:
    location_description = "Area: {}, Depth: {} um".format(
        nwbfile.lab_meta_data['metadata'].targeted_structure,
        nwbfile.lab_meta_data['metadata'].imaging_depth)

    # FOV:
    fov_width = nwbfile.lab_meta_data['metadata'].field_of_view_width
    fov_height = nwbfile.lab_meta_data['metadata'].field_of_view_height
    imaging_plane_description = "{} field of view in {} at depth {} um".format(
        (fov_width, fov_height),
        nwbfile.lab_meta_data['metadata'].targeted_structure,
        nwbfile.lab_meta_data['metadata'].imaging_depth)

    # Optical Channel:
    optical_channel = OpticalChannel(
        name='channel_1',
        description='2P Optical Channel',
        emission_lambda=nwbfile.lab_meta_data['metadata'].emission_lambda)

    # Imaging Plane:
    imaging_plane = nwbfile.create_imaging_plane(
        name='imaging_plane_1',
        optical_channel=optical_channel,
        description=imaging_plane_description,
        device=device,
        excitation_lambda=nwbfile.lab_meta_data['metadata'].excitation_lambda,
        imaging_rate=nwbfile.lab_meta_data['metadata'].ophys_frame_rate,
        indicator=nwbfile.lab_meta_data['metadata'].indicator,
        location=location_description,
        manifold=[],  # Should this be passed in for future support?
        conversion=1.0,
        unit='unknown',  # Should this be passed in for future support?
        reference_frame='unknown')  # Should this be passed in for future support?

    # Image Segmentation:
    image_segmentation = ImageSegmentation(name="image_segmentation")

    if 'two_photon_imaging' not in nwbfile.modules:
        two_photon_imaging_module = ProcessingModule('two_photon_imaging', '2P processing module')
        nwbfile.add_processing_module(two_photon_imaging_module)
    else:
        two_photon_imaging_module = nwbfile.modules['two_photon_imaging']

    two_photon_imaging_module.add_data_interface(image_segmentation)

    # Plane Segmentation:
    plane_segmentation = image_segmentation.create_plane_segmentation(
        name='cell_specimen_table',
        description="Segmented rois",
        imaging_plane=imaging_plane)

    for col_name in cell_roi_table.columns:
        # the columns 'image_mask', 'pixel_mask', and 'voxel_mask' are already defined
        # in the nwb.ophys::PlaneSegmentation Object
        if col_name not in ['id', 'mask_matrix', 'image_mask', 'pixel_mask', 'voxel_mask']:
            # This builds the columns with name of column and description of column
            # both equal to the column name in the cell_roi_table
            plane_segmentation.add_column(col_name,
                                          CELL_SPECIMEN_COL_DESCRIPTIONS.get(col_name,
                                                                             "No Description Available"))

    # go through each roi and add it to the plan segmentation object
    for cell_roi_id, row in cell_roi_table.iterrows():
        sub_mask = np.array(row.pop('image_mask'))
        curr_roi = roi.create_roi_mask(fov_width, fov_height, [(fov_width - 1), 0, (fov_height - 1), 0],
                                       roi_mask=sub_mask)
        mask = curr_roi.get_mask_plane()
        csid = row.pop('cell_specimen_id')
        row['cell_specimen_id'] = -1 if csid is None else csid
        row['id'] = cell_roi_id
        plane_segmentation.add_roi(image_mask=mask, **row.to_dict())

    return nwbfile
Beispiel #25
0
def preprocess_raw_data(block_path, config):
    """
    Takes raw data and runs:
    1) CAR
    2) notch filters
    3) Downsampling

    Parameters
    ----------
    block_path : str
        subject file path
    config : dictionary
        'referencing' - tuple specifying electrode referencing (type, options)
            ('CAR', N_channels_per_group)
            ('CMR', N_channels_per_group)
            ('bipolar', INCLUDE_OBLIQUE_NBHD)
        'Notch' - Main frequency (Hz) for notch filters (default=60)
        'Downsample' - Downsampling frequency (Hz, default= 400)

    Returns
    -------
    Saves preprocessed signals (LFP) in the current NWB file.
    Only if containers for these data do not exist in the current file.
    """
    subj_path, block_name = os.path.split(block_path)
    block_name = os.path.splitext(block_path)[0]
    start = time.time()

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()

        # Storage of processed signals on NWB file ----------------------------
        if 'ecephys' in nwb.processing:
            ecephys_module = nwb.processing['ecephys']
        else:  # creates ecephys ProcessingModule
            ecephys_module = ProcessingModule(
                name='ecephys',
                description='Extracellular electrophysiology data.')
            # Add module to NWB file
            nwb.add_processing_module(ecephys_module)
            print('Created ecephys')

        # LFP: Downsampled and power line signal removed ----------------------
        if 'LFP' in nwb.processing['ecephys'].data_interfaces:
            ######
            # What's the point of this?  Nothing is done with these vars...
            lfp = nwb.processing['ecephys'].data_interfaces['LFP']
            lfp_ts = nwb.processing['ecephys'].data_interfaces[
                'LFP'].electrical_series['preprocessed']
            ######
        else:  # creates LFP data interface container
            lfp = LFP()

            # Data source
            source_list = [
                acq for acq in nwb.acquisition.values()
                if type(acq) == ElectricalSeries
            ]
            assert len(source_list) == 1, (
                'Not precisely one ElectricalSeries in acquisition!')
            source = source_list[0]
            nChannels = source.data.shape[1]

            # Downsampling
            if config['Downsample'] is not None:
                print("Downsampling signals to " + str(config['Downsample']) +
                      " Hz.")
                print("Please wait, this might take around 30 minutes.")
                start = time.time()
                # zeros to pad to make signal length a power of 2
                nBins = source.data.shape[0]
                extraBins0 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins0)
                rate = config['Downsample']

                # malloc
                T = int(np.ceil((nBins + extraBins0) * rate / source.rate))
                X = np.zeros((source.data.shape[1], T))

                # One channel at a time, to improve memory usage for long signals
                for ch in np.arange(nChannels):
                    # 1e6 scaling helps with numerical accuracy
                    Xch = source.data[:, ch] * 1e6
                    # Make length a power of 2, improves performance
                    Xch = np.append(Xch, extraZeros)
                    X[ch, :] = resample(Xch, rate, source.rate)
                print(
                    'Downsampling finished in {} seconds'.format(time.time() -
                                                                 start))
            else:  # No downsample
                extraBins0 = 0
                rate = source.rate
                X = source.data[()].T * 1e6

            # re-reference the (scaled by 1e6!) data
            electrodes = source.electrodes
            if config['referencing'] is not None:
                if config['referencing'][0] == 'CAR':
                    print(
                        "Computing and subtracting Common Average Reference in "
                        + str(config['referencing'][1]) + " channel blocks.")
                    start = time.time()
                    X = subtract_CAR(X, b_size=config['referencing'][1])
                    print('CAR subtract time for {}: {} seconds'.format(
                        block_name,
                        time.time() - start))
                elif config['referencing'][0] == 'bipolar':
                    X, bipolarTable, electrodes = get_bipolar_referenced_electrodes(
                        X, electrodes, rate, grid_step=1)

                    # add data interface for the metadata for saving
                    ecephys_module.add_data_interface(bipolarTable)
                    print('bipolarElectrodes stored for saving in ' +
                          block_path)
                else:
                    print('UNRECOGNIZED REFERENCING SCHEME; ', end='')
                    print('SKIPPING REFERENCING!')

            # Apply Notch filters
            if config['Notch'] is not None:
                print("Applying notch filtering of " + str(config['Notch']) +
                      " Hz")
                # zeros to pad to make signal lenght a power of 2
                nBins = X.shape[1]
                extraBins1 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins1)
                start = time.time()
                for ch in np.arange(nChannels):
                    Xch = np.append(X[ch, :], extraZeros).reshape(1, -1)
                    Xch = linenoise_notch(Xch,
                                          rate,
                                          notch_freq=config['Notch'])
                    if ch == 0:
                        X2 = Xch.reshape(1, -1)
                    else:
                        X2 = np.append(X2, Xch.reshape(1, -1), axis=0)
                print('Notch filter time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

                X = np.copy(X2)
                del X2
            else:
                extraBins1 = 0

            # Remove excess bins (because of zero padding on previous steps)
            excessBins = int(
                np.ceil(extraBins0 * rate / source.rate) + extraBins1)
            X = X[:, 0:-excessBins]
            X = X.astype('float32')  # signal (nChannels,nSamples)
            X /= 1e6  # Scales signals back to volts

            # Add preprocessed downsampled signals as an electrical_series
            referencing = 'None' if config['referencing'] is None else config[
                'referencing'][0]
            notch = 'None' if config['Notch'] is None else str(config['Notch'])
            downs = 'No' if config['Downsample'] is None else 'Yes'
            config_comment = ('referencing:' + referencing + ',Notch:' +
                              notch + ', Downsampled:' + downs)

            # create an electrical series for the LFP and store it in lfp
            lfp_ts = lfp.create_electrical_series(name='preprocessed',
                                                  data=X.T,
                                                  electrodes=electrodes,
                                                  rate=rate,
                                                  description='',
                                                  comments=config_comment)
            ecephys_module.add_data_interface(lfp)

            # Write LFP to NWB file
            io.write(nwb)
            print('LFP saved in ' + block_path)
Beispiel #26
0
def nwb_copy_file(old_file, new_file, cp_objs={}):
    """
    Copy fields defined in 'obj', from existing NWB file to new NWB file.

    Parameters
    ----------
    old_file : str, path
        String such as '/path/to/old_file.nwb'.
    new_file : str, path
        String such as '/path/to/new_file.nwb'.
    cp_objs : dict
        Name:Value pairs (Group:Children) listing the groups and respective
        children from the current NWB file to be copied. Children can be:
        - Boolean, indicating an attribute (e.g. for institution, lab)
        - List of strings, containing several children names
        Example:
        {'institution':True,
         'lab':True,
         'acquisition':['microphone'],
         'ecephys':['LFP','DecompositionSeries']}
    """

    manager = get_manager()

    # Open original signal file
    with NWBHDF5IO(old_file, 'r', manager=manager,
                   load_namespaces=True) as io1:
        nwb_old = io1.read()

        # Creates new file
        nwb_new = NWBFile(session_description=str(nwb_old.session_description),
                          identifier='',
                          session_start_time=datetime.now(tzlocal()))
        with NWBHDF5IO(new_file, mode='w', manager=manager,
                       load_namespaces=False) as io2:
            # Institution name ------------------------------------------------
            if 'institution' in cp_objs:
                nwb_new.institution = str(nwb_old.institution)

            # Lab name --------------------------------------------------------
            if 'lab' in cp_objs:
                nwb_new.lab = str(nwb_old.lab)

            # Session id ------------------------------------------------------
            if 'session' in cp_objs:
                nwb_new.session_id = nwb_old.session_id

            # Devices ---------------------------------------------------------
            if 'devices' in cp_objs:
                for aux in list(nwb_old.devices.keys()):
                    dev = Device(nwb_old.devices[aux].name)
                    nwb_new.add_device(dev)

            # Electrode groups ------------------------------------------------
            if 'electrode_groups' in cp_objs:
                for aux in list(nwb_old.electrode_groups.keys()):
                    nwb_new.create_electrode_group(
                        name=str(nwb_old.electrode_groups[aux].name),
                        description=str(nwb_old.electrode_groups[
                            aux].description),
                        location=str(nwb_old.electrode_groups[aux].location),
                        device=nwb_new.get_device(
                            nwb_old.electrode_groups[aux].device.name)
                    )

            # Electrodes ------------------------------------------------------
            if 'electrodes' in cp_objs:
                nElec = len(nwb_old.electrodes['x'].data[:])
                for aux in np.arange(nElec):
                    nwb_new.add_electrode(
                        x=nwb_old.electrodes['x'][aux],
                        y=nwb_old.electrodes['y'][aux],
                        z=nwb_old.electrodes['z'][aux],
                        imp=nwb_old.electrodes['imp'][aux],
                        location=str(nwb_old.electrodes['location'][aux]),
                        filtering=str(nwb_old.electrodes['filtering'][aux]),
                        group=nwb_new.get_electrode_group(
                            nwb_old.electrodes['group'][aux].name),
                        group_name=str(nwb_old.electrodes['group_name'][aux])
                    )
                # if there are custom variables
                new_vars = list(nwb_old.electrodes.colnames)
                default_vars = ['x', 'y', 'z', 'imp', 'location', 'filtering',
                                'group', 'group_name']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:

                    if var == 'label':
                        var_data = [str(elem) for elem in nwb_old.electrodes[
                                                          var].data[:]]
                    else:
                        var_data = np.array(nwb_old.electrodes[var].data[:])

                    nwb_new.add_electrode_column(name=str(var),
                                                 description=
                                                 str(nwb_old.electrodes[
                                                     var].description),
                                                 data=var_data)

            # Epochs ----------------------------------------------------------
            if 'epochs' in cp_objs:
                nEpochs = len(nwb_old.epochs['start_time'].data[:])
                for i in np.arange(nEpochs):
                    nwb_new.add_epoch(
                        start_time=nwb_old.epochs['start_time'].data[i],
                        stop_time=nwb_old.epochs['stop_time'].data[i])
                # if there are custom variables
                new_vars = list(nwb_old.epochs.colnames)
                default_vars = ['start_time', 'stop_time', 'tags',
                                'timeseries']
                [new_vars.remove(var) for var in default_vars if
                 var in new_vars]
                for var in new_vars:
                    nwb_new.add_epoch_column(name=var,
                                             description=nwb_old.epochs[
                                                 var].description,
                                             data=nwb_old.epochs[var].data[:])

            # Invalid times ---------------------------------------------------
            if 'invalid_times' in cp_objs:
                nInvalid = len(nwb_old.invalid_times['start_time'][:])
                for aux in np.arange(nInvalid):
                    nwb_new.add_invalid_time_interval(
                        start_time=nwb_old.invalid_times['start_time'][aux],
                        stop_time=nwb_old.invalid_times['stop_time'][aux])

            # Trials ----------------------------------------------------------
            if 'trials' in cp_objs:
                nTrials = len(nwb_old.trials['start_time'])
                for aux in np.arange(nTrials):
                    nwb_new.add_trial(
                        start_time=nwb_old.trials['start_time'][aux],
                        stop_time=nwb_old.trials['stop_time'][aux])
                # if there are custom variables
                new_vars = list(nwb_old.trials.colnames)
                default_vars = ['start_time', 'stop_time']
                [new_vars.remove(var) for var in default_vars]
                for var in new_vars:
                    nwb_new.add_trial_column(name=var,
                                             description=nwb_old.trials[
                                                 var].description,
                                             data=nwb_old.trials[var].data[:])

            # Intervals -------------------------------------------------------
            if 'intervals' in cp_objs:
                all_objs_names = list(nwb_old.intervals.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.intervals[obj_name]
                    # create and add TimeIntervals
                    obj = TimeIntervals(name=obj_old.name,
                                        description=obj_old.description)
                    nInt = len(obj_old['start_time'])
                    for ind in np.arange(nInt):
                        obj.add_interval(start_time=obj_old['start_time'][ind],
                                         stop_time=obj_old['stop_time'][ind])
                    # Add to file
                    nwb_new.add_time_intervals(obj)

            # Stimulus --------------------------------------------------------
            if 'stimulus' in cp_objs:
                all_objs_names = list(nwb_old.stimulus.keys())
                for obj_name in all_objs_names:
                    obj_old = nwb_old.stimulus[obj_name]
                    obj = TimeSeries(name=obj_old.name,
                                     description=obj_old.description,
                                     data=obj_old.data[:],
                                     rate=obj_old.rate,
                                     resolution=obj_old.resolution,
                                     conversion=obj_old.conversion,
                                     starting_time=obj_old.starting_time,
                                     unit=obj_old.unit)
                    nwb_new.add_stimulus(obj)

            # Processing modules ----------------------------------------------
            if 'ecephys' in cp_objs:
                if cp_objs['ecephys'] is True:
                    interfaces = nwb_old.processing[
                        'ecephys'].data_interfaces.keys()
                else:  # list of items
                    interfaces = [
                        nwb_old.processing['ecephys'].data_interfaces[key]
                        for key in cp_objs['ecephys']
                    ]
                # Add ecephys module to NWB file
                ecephys_module = ProcessingModule(
                    name='ecephys',
                    description='Extracellular electrophysiology data.'
                )
                nwb_new.add_processing_module(ecephys_module)
                for interface_old in interfaces:
                    obj = copy_obj(interface_old, nwb_old, nwb_new)
                    if obj is not None:
                        ecephys_module.add_data_interface(obj)

            # Acquisition -----------------------------------------------------
            if 'acquisition' in cp_objs:
                if cp_objs['acquisition'] is True:
                    all_acq_names = list(nwb_old.acquisition.keys())
                else:  # list of items
                    all_acq_names = cp_objs['acquisition']
                for acq_name in all_acq_names:
                    obj_old = nwb_old.acquisition[acq_name]
                    obj = copy_obj(obj_old, nwb_old, nwb_new)
                    if obj is not None:
                        nwb_new.add_acquisition(obj)

            # Subject ---------------------------------------------------------
            if 'subject' in cp_objs:
                try:
                    cortical_surfaces = CorticalSurfaces()
                    surfaces = nwb_old.subject.cortical_surfaces.surfaces
                    for sfc in list(surfaces.keys()):
                        cortical_surfaces.create_surface(
                            name=surfaces[sfc].name,
                            faces=surfaces[sfc].faces,
                            vertices=surfaces[sfc].vertices)
                    nwb_new.subject = ECoGSubject(
                        cortical_surfaces=cortical_surfaces,
                        subject_id=nwb_old.subject.subject_id,
                        age=nwb_old.subject.age,
                        description=nwb_old.subject.description,
                        genotype=nwb_old.subject.genotype,
                        sex=nwb_old.subject.sex,
                        species=nwb_old.subject.species,
                        weight=nwb_old.subject.weight,
                        date_of_birth=nwb_old.subject.date_of_birth)
                except:
                    nwb_new.subject = Subject(age=nwb_old.subject.age,
                                              description=nwb_old.subject.description,
                                              genotype=nwb_old.subject.genotype,
                                              sex=nwb_old.subject.sex,
                                              species=nwb_old.subject.species,
                                              subject_id=nwb_old.subject.subject_id,
                                              weight=nwb_old.subject.weight,
                                              date_of_birth=nwb_old.subject.date_of_birth)

            # Write new file with copied fields
            io2.write(nwb_new, link_data=False)
Beispiel #27
0
def add_cell_specimen_table(nwbfile: NWBFile,
                            cell_specimen_table: pd.DataFrame,
                            session_metadata: dict):
    """
    This function takes the cell specimen table and writes the ROIs
    contained within. It writes these to a new NWB imaging plane
    based off the previously supplied metadata

    Parameters
    ----------
    nwbfile: NWBFile
        this is the in memory NWBFile currently being written to which ROI data
        is added
    cell_specimen_table: pd.DataFrame
        this is the DataFrame containing the cells segmented from a ophys
        experiment, stored in json file and loaded.
        example: /home/nicholasc/projects/allensdk/allensdk/test/
                 brain_observatory/behavior/cell_specimen_table_789359614.json
    session_metadata: dict
        Dictionary containing cell_specimen_table related metadata. Should
        include at minimum the following fields:
            "emission_lambda", "excitation_lambda", "indicator",
            "targeted_structure", and ophys_frame_rate"

    Returns
    -------
    nwbfile: NWBFile
        The altered in memory NWBFile object that now has a specimen table
    """
    cell_specimen_metadata = NwbOphysMetadataSchema().load(
        session_metadata, unknown=marshmallow.EXCLUDE)
    cell_roi_table = cell_specimen_table.reset_index().set_index('cell_roi_id')

    # Device:
    device_name: str = nwbfile.lab_meta_data['metadata'].equipment_name
    if device_name.startswith("MESO"):
        device_config = {
            "name": device_name,
            "description": "Allen Brain Observatory - Mesoscope 2P Rig"
        }
    else:
        device_config = {
            "name": device_name,
            "description": "Allen Brain Observatory - Scientifica 2P Rig",
            "manufacturer": "Scientifica"
        }
    nwbfile.create_device(**device_config)
    device = nwbfile.get_device(device_name)

    # FOV:
    fov_width = nwbfile.lab_meta_data['metadata'].field_of_view_width
    fov_height = nwbfile.lab_meta_data['metadata'].field_of_view_height
    imaging_plane_description = "{} field of view in {} at depth {} um".format(
        (fov_width, fov_height), cell_specimen_metadata['targeted_structure'],
        nwbfile.lab_meta_data['metadata'].imaging_depth)

    # Optical Channel:
    optical_channel = OpticalChannel(
        name='channel_1',
        description='2P Optical Channel',
        emission_lambda=cell_specimen_metadata['emission_lambda'])

    # Imaging Plane:
    imaging_plane = nwbfile.create_imaging_plane(
        name='imaging_plane_1',
        optical_channel=optical_channel,
        description=imaging_plane_description,
        device=device,
        excitation_lambda=cell_specimen_metadata['excitation_lambda'],
        imaging_rate=cell_specimen_metadata['ophys_frame_rate'],
        indicator=cell_specimen_metadata['indicator'],
        location=cell_specimen_metadata['targeted_structure'])

    # Image Segmentation:
    image_segmentation = ImageSegmentation(name="image_segmentation")

    if 'ophys' not in nwbfile.processing:
        ophys_module = ProcessingModule('ophys', 'Ophys processing module')
        nwbfile.add_processing_module(ophys_module)
    else:
        ophys_module = nwbfile.processing['ophys']

    ophys_module.add_data_interface(image_segmentation)

    # Plane Segmentation:
    plane_segmentation = image_segmentation.create_plane_segmentation(
        name='cell_specimen_table',
        description="Segmented rois",
        imaging_plane=imaging_plane)

    for col_name in cell_roi_table.columns:
        # the columns 'roi_mask', 'pixel_mask', and 'voxel_mask' are
        # already defined in the nwb.ophys::PlaneSegmentation Object
        if col_name not in [
                'id', 'mask_matrix', 'roi_mask', 'pixel_mask', 'voxel_mask'
        ]:
            # This builds the columns with name of column and description
            # of column both equal to the column name in the cell_roi_table
            plane_segmentation.add_column(
                col_name,
                CELL_SPECIMEN_COL_DESCRIPTIONS.get(col_name,
                                                   "No Description Available"))

    # go through each roi and add it to the plan segmentation object
    for cell_roi_id, table_row in cell_roi_table.iterrows():

        # NOTE: The 'roi_mask' in this cell_roi_table has already been
        # processing by the function from
        # allensdk.brain_observatory.behavior.session_apis.data_io.ophys_lims_api
        # get_cell_specimen_table() method. As a result, the ROI is stored in
        # an array that is the same shape as the FULL field of view of the
        # experiment (e.g. 512 x 512).
        mask = table_row.pop('roi_mask')

        csid = table_row.pop('cell_specimen_id')
        table_row['cell_specimen_id'] = -1 if csid is None else csid
        table_row['id'] = cell_roi_id
        plane_segmentation.add_roi(image_mask=mask, **table_row.to_dict())

    return nwbfile
Beispiel #28
0
def psd_estimate(src_file, type):
    """
    Estimates Power Spectral Density from signals.

    Parameters
    ----------
    src_file : str or path
        Full path to the current NWB file.
    type : str
        ElectricalSeries source. 'raw' or 'preprocessed'.
    """

    # Open file
    with NWBHDF5IO(src_file, mode='r+', load_namespaces=True) as io:
        nwb = io.read()

        # Source ElectricalSeries
        if type == 'raw':
            # Check if there is ElectricalSeries in acquisition group
            for i in list(nwb.acquisition.keys()):
                if isinstance(nwb.acquisition[i], ElectricalSeries):
                    data_obj = nwb.acquisition[i]
        elif type == 'preprocessed':
            data_obj = nwb.processing['ecephys'].data_interfaces['LFP'].electrical_series['preprocessed']

        nChannels = data_obj.data.shape[1]
        nSamples = data_obj.data.shape[0]
        fs = data_obj.rate
        # Welch - window length as power of 2 and keeps dF~0.05 Hz
        dF = .05            # Frequency bin size
        win_len_welch = 2**(np.ceil(np.log2(fs / dF)).astype('int'))   # dF = fs/nfft
        # FFT - using a power of 2 number of samples improves performance
        nfft = int(2**(np.floor(np.log2(nSamples)).astype('int')))
        fx_lim = 200.
        for ch in np.arange(nChannels):  # Iterate over channels
            trace = data_obj.data[:, ch]
            fx_w, py_w = sgn.welch(trace, fs=fs, nperseg=win_len_welch)
            fx_f, py_f = sgn.periodogram(trace, fs=fs, nfft=nfft)
            # saves PSD up to 200 Hz
            py_w = py_w[fx_w < fx_lim]
            fx_w = fx_w[fx_w < fx_lim]
            py_f = py_f[fx_f < fx_lim]
            fx_f = fx_f[fx_f < fx_lim]
            if ch == 0:
                PY_welch = py_w.reshape(-1, 1)
                PY_fft = py_f.reshape(-1, 1)
            else:
                PY_welch = np.append(PY_welch, py_w.reshape(-1, 1), axis=1)
                PY_fft = np.append(PY_fft, py_f.reshape(-1, 1), axis=1)

        # vElectrodes
        elecs_region = nwb.electrodes.create_region(name='electrodes',
                                                    region=np.arange(nChannels).tolist(),
                                                    description='all electrodes')

        # vPSD shape: ('frequency', 'channel')
        spectrum_module_welch = Spectrum(name='Spectrum_welch_' + type,
                                         frequencies=fx_w,
                                         power=PY_welch,
                                         source_timeseries=data_obj,
                                         electrodes=elecs_region)

        spectrum_module_fft = Spectrum(name='Spectrum_fft_' + type,
                                       frequencies=fx_f,
                                       power=PY_fft,
                                       source_timeseries=data_obj,
                                       electrodes=elecs_region)

        # Processing module
        try:      # if ecephys module already exists
            ecephys_module = nwb.processing['ecephys']
        except:   # creates ecephys ProcessingModule
            ecephys_module = ProcessingModule(name='ecephys',
                                              description='Extracellular electrophysiology data.')
            # Add module to NWB file
            nwb.add_processing_module(ecephys_module)
            print('Created ecephys')
        ecephys_module.add_data_interface(spectrum_module_welch)
        ecephys_module.add_data_interface(spectrum_module_fft)

        io.write(nwb)
        print('Spectrum_welch_' + type + ' added to file.')
        print('Spectrum_fft_' + type + ' added to file.')
Beispiel #29
0
def add_cell_specimen_table(nwbfile, cell_specimen_table):
    cell_roi_table = cell_specimen_table.reset_index().set_index('cell_roi_id')

    # Device:
    device_name = nwbfile.lab_meta_data['metadata'].rig_name
    nwbfile.create_device(device_name, "Allen Brain Observatory")
    device = nwbfile.get_device(device_name)

    # Location:
    location_description = "Area: {}, Depth: {} um".format(
        nwbfile.lab_meta_data['metadata'].targeted_structure,
        nwbfile.lab_meta_data['metadata'].imaging_depth)

    # FOV:
    fov_width = nwbfile.lab_meta_data['metadata'].field_of_view_width
    fov_height = nwbfile.lab_meta_data['metadata'].field_of_view_height
    imaging_plane_description = "{} field of view in {} at depth {} um".format(
        (fov_width, fov_height),
        nwbfile.lab_meta_data['metadata'].targeted_structure,
        nwbfile.lab_meta_data['metadata'].imaging_depth)

    # Optical Channel:
    optical_channel = OpticalChannel(
        name='channel_1',
        description='2P Optical Channel',
        emission_lambda=nwbfile.lab_meta_data['metadata'].emission_lambda)

    # Imaging Plane:
    imaging_plane = nwbfile.create_imaging_plane(
        name='imaging_plane_1',
        optical_channel=optical_channel,
        description=imaging_plane_description,
        device=device,
        excitation_lambda=nwbfile.lab_meta_data['metadata'].excitation_lambda,
        imaging_rate=nwbfile.lab_meta_data['metadata'].ophys_frame_rate,
        indicator=nwbfile.lab_meta_data['metadata'].indicator,
        location=location_description,
        manifold=[],  # Should this be passed in for future support?
        conversion=1.0,
        unit='unknown',  # Should this be passed in for future support?
        reference_frame='unknown'
    )  # Should this be passed in for future support?

    # Image Segmentation:
    image_segmentation = ImageSegmentation(name="image_segmentation")

    if 'two_photon_imaging' not in nwbfile.modules:
        two_photon_imaging_module = ProcessingModule('two_photon_imaging',
                                                     '2P processing module')
        nwbfile.add_processing_module(two_photon_imaging_module)
    else:
        two_photon_imaging_module = nwbfile.modules['two_photon_imaging']

    two_photon_imaging_module.add_data_interface(image_segmentation)

    # Plane Segmentation:
    plane_segmentation = image_segmentation.create_plane_segmentation(
        name='cell_specimen_table',
        description="Segmented rois",
        imaging_plane=imaging_plane)

    for c in [
            c for c in cell_roi_table.columns
            if c not in ['id', 'mask_matrix']
    ]:
        plane_segmentation.add_column(c, c)

    for cell_roi_id, row in cell_roi_table.iterrows():
        sub_mask = np.array(row.pop('image_mask'))
        curr_roi = roi.create_roi_mask(fov_width,
                                       fov_height, [(fov_width - 1), 0,
                                                    (fov_height - 1), 0],
                                       roi_mask=sub_mask)
        mask = curr_roi.get_mask_plane()
        csid = row.pop('cell_specimen_id')
        row['cell_specimen_id'] = -1 if csid is None else csid
        row['id'] = cell_roi_id
        plane_segmentation.add_roi(image_mask=mask, **row.to_dict())

    return nwbfile
Beispiel #30
0
def preprocess_raw_data(block_path, config):
    """
    Takes raw data and runs:
    1) CAR
    2) notch filters
    3) Downsampling

    Parameters
    ----------
    block_path : str
        subject file path
    config : dictionary
        'referencing' - tuple specifying electrode referencing (type, options)
            ('CAR', N_channels_per_group)
            ('CMR', N_channels_per_group)
            ('bipolar', INCLUDE_OBLIQUE_NBHD)
        'Notch' - Main frequency (Hz) for notch filters (default=60)
        'Downsample' - Downsampling frequency (Hz, default= 400)

    Returns
    -------
    Saves preprocessed signals (LFP) in the current NWB file.
    Only if containers for these data do not exist in the current file.
    """
    subj_path, block_name = os.path.split(block_path)
    block_name = os.path.splitext(block_path)[0]
    start = time.time()

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()

        # Storage of processed signals on NWB file ----------------------------
        if 'ecephys' in nwb.processing:
            ecephys_module = nwb.processing['ecephys']
        else:  # creates ecephys ProcessingModule
            ecephys_module = ProcessingModule(
                name='ecephys',
                description='Extracellular electrophysiology data.')
            # Add module to NWB file
            nwb.add_processing_module(ecephys_module)
            print('Created ecephys')

        # LFP: Downsampled and power line signal removed ----------------------
        if 'LFP' in nwb.processing['ecephys'].data_interfaces:
            warnings.warn(
                'LFP data already exists in the nwb file. Skipping preprocessing.'
            )
        else:  # creates LFP data interface container
            lfp = LFP()

            # Data source
            source_list = [
                acq for acq in nwb.acquisition.values()
                if type(acq) == ElectricalSeries
            ]
            assert len(source_list) == 1, (
                'Not precisely one ElectricalSeries in acquisition!')
            source = source_list[0]
            nChannels = source.data.shape[1]

            # Downsampling
            if config['Downsample'] is not None:
                print("Downsampling signals to " + str(config['Downsample']) +
                      " Hz.")
                print("Please wait...")
                start = time.time()
                # Note: zero padding the signal to make the length
                # a power of 2 won't help, since resample will further pad it
                # (breaking the power of 2)
                nBins = source.data.shape[0]
                rate = config['Downsample']

                # malloc
                T = int(np.ceil(nBins * rate / source.rate))
                X = np.zeros((source.data.shape[1], T))

                # One channel at a time, to improve memory usage for long signals
                for ch in np.arange(nChannels):
                    # 1e6 scaling helps with numerical accuracy
                    Xch = source.data[:, ch] * 1e6
                    X[ch, :] = resample(Xch, rate, source.rate)
                print(
                    'Downsampling finished in {} seconds'.format(time.time() -
                                                                 start))
            else:  # No downsample
                rate = source.rate
                X = source.data[()].T * 1e6

            # re-reference the (scaled by 1e6!) data
            electrodes = source.electrodes
            if config['referencing'] is not None:
                if config['referencing'][0] == 'CAR':
                    print(
                        "Computing and subtracting Common Average Reference in "
                        + str(config['referencing'][1]) + " channel blocks.")
                    start = time.time()
                    X = subtract_CAR(X, b_size=config['referencing'][1])
                    print('CAR subtract time for {}: {} seconds'.format(
                        block_name,
                        time.time() - start))
                elif config['referencing'][0] == 'bipolar':
                    X, bipolarTable, electrodes = get_bipolar_referenced_electrodes(
                        X, electrodes, rate, grid_step=1)

                    # add data interface for the metadata for saving
                    ecephys_module.add_data_interface(bipolarTable)
                    print('bipolarElectrodes stored for saving in ' +
                          block_path)
                else:
                    print('UNRECOGNIZED REFERENCING SCHEME; ', end='')
                    print('SKIPPING REFERENCING!')

            # Apply Notch filters
            if config['Notch'] is not None:
                print("Applying notch filtering of " + str(config['Notch']) +
                      " Hz")
                # Note: zero padding the signal to make the length a power
                # of 2 won't help, since notch filtering will further pad it
                start = time.time()
                for ch in np.arange(nChannels):
                    # NOTE: apply_linenoise_notch takes a signal that is
                    # (n_timePoints, n_channels). The documentation may be wrong
                    Xch = X[ch, :].reshape(-1, 1)
                    Xch = apply_linenoise_notch(Xch, rate)
                    X[ch, :] = Xch[:, 0]
                print('Notch filter time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

            X = X.astype('float32')  # signal (nChannels,nSamples)
            X /= 1e6  # Scales signals back to volts

            # Add preprocessed downsampled signals as an electrical_series
            referencing = 'None' if config['referencing'] is None else config[
                'referencing'][0]
            notch = 'None' if config['Notch'] is None else str(config['Notch'])
            downs = 'No' if config['Downsample'] is None else 'Yes'
            config_comment = ('referencing:' + referencing + ', Notch:' +
                              notch + ', Downsampled:' + downs)

            # create an electrical series for the LFP and store it in lfp
            lfp.create_electrical_series(name='preprocessed',
                                         data=X.T,
                                         electrodes=electrodes,
                                         rate=rate,
                                         description='',
                                         comments=config_comment)
            ecephys_module.add_data_interface(lfp)

            # Write LFP to NWB file
            io.write(nwb)
            print('LFP saved in ' + block_path)