Beispiel #1
0
    def setUpContainer(self):
        self.timeseries = TimeSeries(name='dummy timeseries',
                                     description='desc',
                                     data=np.ones((3, 3)),
                                     unit='flibs',
                                     timestamps=np.ones((3, )))
        bands = DynamicTable(name='bands',
                             description='band info for LFPSpectralAnalysis',
                             columns=[
                                 VectorData(name='band_name',
                                            description='name of bands',
                                            data=['alpha', 'beta', 'gamma']),
                                 VectorData(
                                     name='band_limits',
                                     description='low and high cutoffs in Hz',
                                     data=np.ones((3, 2)))
                             ])
        spec_anal = DecompositionSeries(name='LFPSpectralAnalysis',
                                        description='my description',
                                        data=np.ones((3, 3, 3)),
                                        timestamps=np.ones((3, )),
                                        source_timeseries=self.timeseries,
                                        metric='amplitude',
                                        bands=bands)

        return spec_anal
Beispiel #2
0
    def test_init(self):
        timeseries = TimeSeries(name='dummy timeseries',
                                description='desc',
                                data=np.ones((3, 3)),
                                unit='Volts',
                                timestamps=np.ones((3, )))
        bands = DynamicTable(name='bands',
                             description='band info for LFPSpectralAnalysis',
                             columns=[
                                 VectorData(name='band_name',
                                            description='name of bands',
                                            data=['alpha', 'beta', 'gamma']),
                                 VectorData(
                                     name='band_limits',
                                     description='low and high cutoffs in Hz',
                                     data=np.ones((3, 2)))
                             ])
        spec_anal = DecompositionSeries(name='LFPSpectralAnalysis',
                                        description='my description',
                                        data=np.ones((3, 3, 3)),
                                        timestamps=np.ones((3, )),
                                        source_timeseries=timeseries,
                                        metric='amplitude',
                                        bands=bands)

        self.assertEqual(spec_anal.name, 'LFPSpectralAnalysis')
        self.assertEqual(spec_anal.description, 'my description')
        np.testing.assert_equal(spec_anal.data, np.ones((3, 3, 3)))
        np.testing.assert_equal(spec_anal.timestamps, np.ones((3, )))
        self.assertEqual(spec_anal.bands['band_name'].data,
                         ['alpha', 'beta', 'gamma'])
        np.testing.assert_equal(spec_anal.bands['band_limits'].data,
                                np.ones((3, 2)))
        self.assertEqual(spec_anal.source_timeseries, timeseries)
        self.assertEqual(spec_anal.metric, 'amplitude')
Beispiel #3
0
 def test_constructor_ids_default(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     table = DynamicTable("with_spec", 'a test table', columns=columns)
     self.check_table(table)
Beispiel #4
0
 def test_nd_array_to_df(self):
     data = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
     col = VectorData(name='name', description='desc', data=data)
     df = DynamicTable('test', 'desc', np.arange(3, dtype='int'), (col, )).to_dataframe()
     df2 = pd.DataFrame({'name': [x for x in data]},
                        index=pd.Index(name='id', data=[0, 1, 2]))
     assert_frame_equal(df, df2)
Beispiel #5
0
 def with_columns_and_data(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     return DynamicTable("with_columns_and_data",
                         'a test table',
                         columns=columns)
Beispiel #6
0
 def test_constructor_ElementIdentifier_ids(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     ids = ElementIdentifiers('ids', [0, 1, 2, 3, 4])
     table = DynamicTable("with_columns",
                          'a test table',
                          id=ids,
                          columns=columns)
     self.check_table(table)
Beispiel #7
0
 def test_constructor_ids_bad_ids(self):
     columns = [
         VectorData(name=s['name'], description=s['description'], data=d)
         for s, d in zip(self.spec, self.data)
     ]
     msg = "must provide same number of ids as length of columns"
     with self.assertRaisesRegex(ValueError, msg):
         DynamicTable("with_columns",
                      'a test table',
                      id=[0, 1],
                      columns=columns)
def create_ragged_array(name, values):
    """
    :param values: list of lists
    :return:
    """
    vector_data = VectorData(
        name, 'indicates which compartments the data refers to',
        [item for sublist in values for item in sublist])
    vector_index = VectorIndex(
        name + '_index', np.cumsum([len(x) for x in values]), target=vector_data)
    return vector_data, vector_index
Beispiel #9
0
def spectral_decomposition(block_path, bands_vals):
    """
    Takes preprocessed LFP data and does the standard Hilbert transform on
    different bands. Takes about 20 minutes to run on 1 10-min block.

    Parameters
    ----------
    block_path : str
        subject file path
    bands_vals : [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]

    Returns
    -------
    Saves spectral power (DecompositionSeries) in the current NWB file.
    Only if container for this data do not exist in the file.
    """

    # Get filter parameters
    band_param_0 = bands_vals[0, :]
    band_param_1 = bands_vals[1, :]

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()
        lfp = nwb.processing['ecephys'].data_interfaces[
            'LFP'].electrical_series['preprocessed']
        rate = lfp.rate

        nBands = len(band_param_0)
        nSamples = lfp.data.shape[0]
        nChannels = lfp.data.shape[1]
        Xp = np.zeros(
            (nBands, nChannels, nSamples))  #power (nBands,nChannels,nSamples)

        # Apply Hilbert transform ----------------------------------------------
        print('Running Spectral Decomposition...')
        start = time.time()
        for ch in np.arange(nChannels):
            Xch = lfp.data[:,
                           ch] * 1e6  # 1e6 scaling helps with numerical accuracy
            Xch = Xch.reshape(1, -1)
            Xch = Xch.astype('float32')  # signal (nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                kernel = gaussian(Xch, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(Xch,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii, ch, :] = abs(X_analytic).astype('float32')
        print('Spectral Decomposition finished in {} seconds'.format(
            time.time() - start))

        # data: (ndarray) dims: num_times * num_channels * num_bands
        Xp = np.swapaxes(Xp, 0, 2)

        # Spectral band power
        # bands: (DynamicTable) frequency bands that signal was decomposed into
        band_param_0V = VectorData(
            name='filter_param_0',
            description='frequencies for bandpass filters',
            data=band_param_0)
        band_param_1V = VectorData(
            name='filter_param_1',
            description='frequencies for bandpass filters',
            data=band_param_1)
        bandsTable = DynamicTable(
            name='bands',
            description='Series of filters used for Hilbert transform.',
            columns=[band_param_0V, band_param_1V],
            colnames=['filter_param_0', 'filter_param_1'])
        decs = DecompositionSeries(
            name='DecompositionSeries',
            data=Xp,
            description='Analytic amplitude estimated with Hilbert transform.',
            metric='amplitude',
            unit='V',
            bands=bandsTable,
            rate=rate,
            source_timeseries=lfp)

        # Storage of spectral decomposition on NWB file ------------------------
        ecephys_module = nwb.processing['ecephys']
        ecephys_module.add_data_interface(decs)
        io.write(nwb)
        print('Spectral decomposition saved in ' + block_path)
Beispiel #10
0
 def with_table_columns(self):
     cols = [VectorData(**d) for d in self.spec]
     table = DynamicTable("with_table_columns",
                          'a test table',
                          columns=cols)
     return table
Beispiel #11
0
    data=['A1', 'A2', 'A3'],
    description='String with a recording tag')

#####################################################################
# The :py:class:`~pynwb.icephys.IntracellularRecordingsTable` table is not just a ``DynamicTable``
# but an ``AlignedDynamicTable``. The ``AlignedDynamicTable`` type is itself a ``DynamicTable``
# that may contain an arbitrary number of additional ``DynamicTable``, each of which defines
# a "category". This is similar to a table with "sub-headings". In the case of the
# :py:class:`~pynwb.icephys.IntracellularRecordingsTable`, we have three predefined categories,
# i.e., electrodes, stimuli, and responses. We can also dynamically add new categories to
# the table. As each category corresponds to a ``DynamicTable``, this means we have to create a
# new ``DynamicTable`` and add it to our table.

# Create a new DynamicTable for our category that contains a location column of type VectorData
location_column = VectorData(name='location',
                             data=['Mordor', 'Gondor', 'Rohan'],
                             description='Recording location in Middle Earth')

lab_category = DynamicTable(
    name='recording_lab_data',
    description='category table for lab-specific recording metadata',
    colnames=[
        'location',
    ],
    columns=[
        location_column,
    ])
# Add the table as a new category to our intracellular_recordings
nwbfile.intracellular_recordings.add_category(category=lab_category)
# Note, the name of the category is name of the table, i.e., 'recording_lab_data'
Beispiel #12
0
def get_bipolar_referenced_electrodes(X,
                                      electrodes,
                                      rate,
                                      grid_size=None,
                                      grid_step=1):
    '''
    Bipolar referencing of electrodes according to the scheme of Dr. John Burke

    Each electrode (with obvious exceptions at the edges) yields two bipolar-
    referenced channels, one for the "right" and one for the "below" neighbors.
    These are returned as an ElectricalSeries.

    Input arguments:
    --------
    X:
        numpy array containing (raw) electrode traces (Nelectrodes x T)
    electrodes:
        DynamicTableRegion containing the metadata for the electrodes whose
        traces are in X
    rate:
        sampling rate of X; for storage in ElectricalSeries
    grid_size:
        numpy array with the two dimensions of the grid (2, )

    Returns:
    --------
    bipolarElectrodes:
        ElectricalSeries containing the bipolar-referenced (pseudo) electrodes
        and associated metadata.  (NB that a, e.g., 16x16 grid yields 960 of
        these pseudo-electrodes.)
    '''

    # set mutable default argument(s)
    if grid_size is None:
        grid_size = np.array([16, 16])

    # malloc
    elec_layout = np.arange(np.prod(grid_size) - 1, -1,
                            -1).reshape(grid_size).T
    elec_layout = elec_layout[::grid_step, ::grid_step]
    grid_size = elec_layout.T.shape  # in case grid_step > 1
    Nchannels = 2 * np.prod(grid_size) - np.sum(grid_size)
    XX = np.zeros((Nchannels, X.shape[1]))

    # create a new dynamic table to hold the metadata
    column_names = ['x', 'y', 'z', 'imp', 'location', 'label', 'bad']
    columns = [
        VectorData(name=name, description=electrodes.table[name].description)
        for name in column_names
    ]
    bipolarTable = DynamicTable(
        name='bipolar-referenced metadata',
        description=('pseudo-channels derived via John Burke style'
                     ' bipolar referencing'),
        colnames=column_names,
        columns=columns,
    )

    # compute bipolar-ref'd channel and add a new row of metadata to table
    def add_new_channel(iChannel, iElectrode, jElectrode):
        jElectrode = int(jElectrode)

        # "bipolar referencing": the difference of neighboring electrodes
        XX[iChannel, :] = X[iElectrode, :] - X[jElectrode, :]

        # add a row to the table for this new pseudo-electrode
        bipolarTable.add_row({
            'location':
            '_'.join({
                electrodes.table['location'][iElectrode],
                electrodes.table['location'][jElectrode]
            }),
            'label':
            '-'.join([
                electrodes.table['label'][iElectrode],
                electrodes.table['label'][jElectrode]
            ]),
            'bad': (electrodes.table['bad'][iElectrode]
                    or electrodes.table['bad'][jElectrode]),
            **{
                name: electrodes.table[name][iElectrode]
                for name in ['x', 'y', 'z', 'imp']
            },
        })
        return iChannel + 1

    iChannel = 0

    # loop across columns and rows (remembering that grid is transposed)
    for i in range(grid_size[1]):
        for j in range(grid_size[0]):
            if j < grid_size[0] - 1:
                iChannel = add_new_channel(iChannel, elec_layout[i, j],
                                           elec_layout[i, j + 1])
            if i < grid_size[1] - 1:
                iChannel = add_new_channel(iChannel, elec_layout[i, j],
                                           elec_layout[i + 1, j])

    # create one big region for the entire table
    bipolarTableRegion = bipolarTable.create_region(
        'electrodes', [i for i in range(Nchannels)], 'all bipolar electrodes')

    return XX, bipolarTable, bipolarTableRegion
def transform(block_path, filter='default', bands_vals=None):
    """
    Takes raw LFP data and does the standard Hilbert algorithm:
    1) CAR
    2) notch filters
    3) Hilbert transform on different bands

    Takes about 20 minutes to run on 1 10-min block.

    Parameters
    ----------
    block_path : str
        subject file path
    filter: str, optional
        Frequency bands to filter the signal.
        'default' for Chang lab default values (Gaussian filters)
        'custom' for user defined (Gaussian filters)
    bands_vals: 2D array, necessary only if filter='custom'
        [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]

    Returns
    -------
    Saves preprocessed signals (LFP) and spectral power (DecompositionSeries) in
    the current NWB file. Only if containers for these data do not exist in the
    file.
    """
    write_file = 1
    rate = 400.

    # Define filter parameters
    if filter == 'default':
        band_param_0 = bands.chang_lab['cfs']
        band_param_1 = bands.chang_lab['sds']
    elif filter == 'high_gamma':
        band_param_0 = bands.chang_lab['cfs'][(bands.chang_lab['cfs'] > 70)
                                              & (bands.chang_lab['cfs'] < 150)]
        band_param_1 = bands.chang_lab['sds'][(bands.chang_lab['cfs'] > 70)
                                              & (bands.chang_lab['cfs'] < 150)]
        #band_param_0 = [ bands.neuro['min_freqs'][-1] ]  #for hamming window filter
        #band_param_1 = [ bands.neuro['max_freqs'][-1] ]
        #band_param_0 = bands.chang_lab['cfs'][29:37]      #for average of gaussian filters
        #band_param_1 = bands.chang_lab['sds'][29:37]
    elif filter == 'custom':
        band_param_0 = bands_vals[0, :]
        band_param_1 = bands_vals[1, :]

    block_name = os.path.splitext(block_path)[0]

    start = time.time()

    with NWBHDF5IO(block_path, 'a') as io:
        nwb = io.read()

        # Storage of processed signals on NWB file -----------------------------
        if 'ecephys' not in nwb.modules:
            # Add module to NWB file
            nwb.create_processing_module(
                name='ecephys',
                description='Extracellular electrophysiology data.')
        ecephys_module = nwb.modules['ecephys']

        # LFP: Downsampled and power line signal removed
        if 'LFP' in nwb.modules['ecephys'].data_interfaces:
            lfp_ts = nwb.modules['ecephys'].data_interfaces[
                'LFP'].electrical_series['preprocessed']
            X = lfp_ts.data[:].T
            rate = lfp_ts.rate
        else:

            # 1e6 scaling helps with numerical accuracy
            X = nwb.acquisition['ECoG'].data[:].T * 1e6
            fs = nwb.acquisition['ECoG'].rate
            bad_elects = load_bad_electrodes(nwb)
            print('Load time for h5 {}: {} seconds'.format(
                block_name,
                time.time() - start))
            print('rates {}: {} {}'.format(block_name, rate, fs))
            if not np.allclose(rate, fs):
                assert rate < fs
                start = time.time()
                X = resample(X, rate, fs)
                print('resample time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

            if bad_elects.sum() > 0:
                X[bad_elects] = np.nan

            # Subtract CAR
            start = time.time()
            X = subtract_CAR(X)
            print('CAR subtract time for {}: {} seconds'.format(
                block_name,
                time.time() - start))

            # Apply Notch filters
            start = time.time()
            X = linenoise_notch(X, rate)
            print('Notch filter time for {}: {} seconds'.format(
                block_name,
                time.time() - start))

            lfp = LFP()
            # Add preprocessed downsampled signals as an electrical_series
            lfp_ts = lfp.create_electrical_series(
                name='preprocessed',
                data=X.T,
                electrodes=nwb.acquisition['ECoG'].electrodes,
                rate=rate,
                description='')
            ecephys_module.add_data_interface(lfp)

        # Spectral band power
        if 'Bandpower_' + filter not in nwb.modules['ecephys'].data_interfaces:

            # Apply Hilbert transform
            X = X.astype('float32')  # signal (nChannels,nSamples)
            nChannels = X.shape[0]
            nSamples = X.shape[1]
            nBands = len(band_param_0)
            Xp = np.zeros((nBands, nChannels,
                           nSamples))  # power (nBands,nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                # if filter=='high_gamma':
                #    kernel = hamming(X, rate, bp0, bp1)
                # else:
                kernel = gaussian(X, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(X,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii] = abs(X_analytic).astype('float32')

            # Scales signals back to Volt
            X /= 1e6

            band_param_0V = VectorData(
                name='filter_param_0',
                description='frequencies for bandpass filters',
                data=band_param_0)
            band_param_1V = VectorData(
                name='filter_param_1',
                description='frequencies for bandpass filters',
                data=band_param_1)
            bandsTable = DynamicTable(
                name='bands',
                description='Series of filters used for Hilbert transform.',
                columns=[band_param_0V, band_param_1V],
                colnames=['filter_param_0', 'filter_param_1'])

            # data: (ndarray) dims: num_times * num_channels * num_bands
            Xp = np.swapaxes(Xp, 0, 2)
            decs = DecompositionSeries(
                name='Bandpower_' + filter,
                data=Xp,
                description='Band power estimated with Hilbert transform.',
                metric='power',
                unit='V**2/Hz',
                bands=bandsTable,
                rate=rate,
                source_timeseries=lfp_ts)
            ecephys_module.add_data_interface(decs)
        io.write(nwb)

        print('done', flush=True)
Beispiel #14
0
def copy_obj(obj_old, nwb_old, nwb_new):
    """ Creates a copy of obj_old. """

    obj = None
    obj_type = type(obj_old).__name__

    #ElectricalSeries ----------------------------------------------------------
    if obj_type == 'ElectricalSeries':
        nChannels = obj_old.electrodes.table['x'].data.shape[0]
        elecs_region = nwb_new.electrodes.create_region(
            name='electrodes',
            region=np.arange(nChannels).tolist(),
            description='')
        obj = ElectricalSeries(name=obj_old.name,
                               data=obj_old.data[:],
                               electrodes=elecs_region,
                               rate=obj_old.rate,
                               description=obj_old.description)

    #LFP -----------------------------------------------------------------------
    if obj_type == 'LFP':
        obj = LFP(name=obj_old.name)
        els_name = list(obj_old.electrical_series.keys())[0]
        els = obj_old.electrical_series[els_name]
        nChannels = els.data.shape[1]
        elecs_region = nwb_new.electrodes.create_region(
            name='electrodes',
            region=np.arange(nChannels).tolist(),
            description='')
        obj_ts = obj.create_electrical_series(name=els.name,
                                              comments=els.comments,
                                              conversion=els.conversion,
                                              data=els.data[:],
                                              description=els.description,
                                              electrodes=elecs_region,
                                              rate=els.rate,
                                              resolution=els.resolution,
                                              starting_time=els.starting_time)

    #TimeSeries ----------------------------------------------------------------
    elif obj_type == 'TimeSeries':
        obj = TimeSeries(name=obj_old.name,
                         description=obj_old.description,
                         data=obj_old.data[:],
                         rate=obj_old.rate,
                         resolution=obj_old.resolution,
                         conversion=obj_old.conversion,
                         starting_time=obj_old.starting_time,
                         unit=obj_old.unit)

    #DecompositionSeries -------------------------------------------------------
    elif obj_type == 'DecompositionSeries':
        list_columns = []
        for item in obj_old.bands.columns:
            bp = VectorData(name=item.name,
                            description=item.description,
                            data=item.data[:])
            list_columns.append(bp)
        bandsTable = DynamicTable(name=obj_old.bands.name,
                                  description=obj_old.bands.description,
                                  columns=list_columns,
                                  colnames=obj_old.bands.colnames)
        obj = DecompositionSeries(
            name=obj_old.name,
            data=obj_old.data[:],
            description=obj_old.description,
            metric=obj_old.metric,
            unit=obj_old.unit,
            rate=obj_old.rate,
            #source_timeseries=lfp,
            bands=bandsTable,
        )

    #Spectrum ------------------------------------------------------------------
    elif obj_type == 'Spectrum':
        file_elecs = nwb_new.electrodes
        nChannels = len(file_elecs['x'].data[:])
        elecs_region = file_elecs.create_region(
            name='electrodes',
            region=np.arange(nChannels).tolist(),
            description='')
        obj = Spectrum(name=obj_old.name,
                       frequencies=obj_old.frequencies[:],
                       power=obj_old.power,
                       electrodes=elecs_region)

    return obj