Exemplo n.º 1
0
# Create the first file
nwbfile1 = NWBFile(source='PyNWB tutorial',
                   session_description='demonstrate external files',
                   identifier='NWBE1',
                   session_start_time=start_time,
                   file_create_date=create_date)
# Create the second file
test_ts1 = TimeSeries(name='test_timeseries1',
                      source='PyNWB tutorial',
                      data=data,
                      unit='SIunit',
                      timestamps=timestamps)
nwbfile1.add_acquisition(test_ts1)
# Write the first file
io = NWBHDF5IO(filename1, 'w')
io.write(nwbfile1)
io.close()

# Create the second file
nwbfile2 = NWBFile(source='PyNWB tutorial',
                   session_description='demonstrate external files',
                   identifier='NWBE2',
                   session_start_time=start_time,
                   file_create_date=create_date)
# Create the second file
test_ts2 = TimeSeries(name='test_timeseries2',
                      source='PyNWB tutorial',
                      data=data,
                      unit='SIunit',
                      timestamps=timestamps)
Exemplo n.º 2
0
####################
# .. tip::
#
#       Again, if we want to explicitly control how our data will be chunked (compressed etc.)
#       in the HDF5 file then we need to wrap our :py:class:`~hdmf.data_utils.DataChunkIterator`
#       using :py:class:`~hdmf.backends.hdf5.h5_utils.H5DataIO`

####################
# Discussion
# ^^^^^^^^^^
# Let's verify that our data was written correctly

# Read the NWB file
from pynwb import NWBHDF5IO  # noqa: F811

io = NWBHDF5IO('basic_sparse_iterwrite_largearray.nwb', 'r')
nwbfile = io.read()
data = nwbfile.get_acquisition('synthetic_timeseries').data
# Compare all the data values of our two arrays
data_match = np.all(arrdata == data[:])  # Don't do this for very large arrays!
# Print result message
if data_match:
    print("Success: All data values match")
else:
    print("ERROR: Mismatch between data")

####################
# ``[Out]:``
#
#  .. code-block:: python
#
Exemplo n.º 3
0
def spectral_decomposition(block_path, bands_vals):
    """
    Takes preprocessed LFP data and does the standard Hilbert transform on
    different bands. Takes about 20 minutes to run on 1 10-min block.

    Parameters
    ----------
    block_path : str
        subject file path
    bands_vals : [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]

    Returns
    -------
    Saves spectral power (DecompositionSeries) in the current NWB file.
    Only if container for this data do not exist in the file.
    """

    # Get filter parameters
    band_param_0 = bands_vals[0, :]
    band_param_1 = bands_vals[1, :]

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()
        lfp = nwb.processing['ecephys'].data_interfaces[
            'LFP'].electrical_series['preprocessed']
        rate = lfp.rate

        nBands = len(band_param_0)
        nSamples = lfp.data.shape[0]
        nChannels = lfp.data.shape[1]
        Xp = np.zeros(
            (nBands, nChannels, nSamples))  #power (nBands,nChannels,nSamples)

        # Apply Hilbert transform ---------------------------------------------
        print('Running Spectral Decomposition...')
        start = time.time()
        for ch in np.arange(nChannels):
            Xch = lfp.data[:,
                           ch] * 1e6  # 1e6 scaling helps with numerical accuracy
            Xch = Xch.reshape(1, -1)
            Xch = Xch.astype('float32')  # signal (nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                kernel = gaussian(Xch, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(Xch,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii, ch, :] = abs(X_analytic).astype('float32')
        print('Spectral Decomposition finished in {} seconds'.format(
            time.time() - start))

        # data: (ndarray) dims: num_times * num_channels * num_bands
        Xp = np.swapaxes(Xp, 0, 2)

        # Spectral band power
        # bands: (DynamicTable) frequency bands that signal was decomposed into
        band_param_0V = VectorData(
            name='filter_param_0',
            description='frequencies for bandpass filters',
            data=band_param_0)
        band_param_1V = VectorData(
            name='filter_param_1',
            description='frequencies for bandpass filters',
            data=band_param_1)
        bandsTable = DynamicTable(
            name='bands',
            description='Series of filters used for Hilbert transform.',
            columns=[band_param_0V, band_param_1V],
            colnames=['filter_param_0', 'filter_param_1'])
        decs = DecompositionSeries(
            name='DecompositionSeries',
            data=Xp,
            description='Analytic amplitude estimated with Hilbert transform.',
            metric='amplitude',
            unit='V',
            bands=bandsTable,
            rate=rate,
            source_timeseries=lfp)

        # Storage of spectral decomposition on NWB file ------------------------
        ecephys_module = nwb.processing['ecephys']
        ecephys_module.add_data_interface(decs)
        io.write(nwb)
        print('Spectral decomposition saved in ' + block_path)
Exemplo n.º 4
0
def plotStimuli(nwbFilePath):
    # Input Path Here
    #nwbFilePath = ('V:/LabUsers/chandravadian/NWB Data/p/P9HMH_NOID5.nwb')
    #session = 'P9HMH_NOID5.nwb'  # 'P42HMH.nwb'

    plotStimuli = range(0, 100)  # Learn = [0, 100] ; Recog = [101, 200]

    # ===============================================================
    # ===============================================================

    if not os.path.exists(nwbFilePath):
        print('This file does not exist: {}'.format(nwbFilePath))
        sys.exit(-1)

    # Read NWB file
    io = NWBHDF5IO(nwbFilePath, mode='r')
    nwbfile = io.read()

    # ************* Helper FXNs ***************************

    def check(text):
        return int(text) if text.isdigit() else text

    def natural_keys(text):

        return [check(c) for c in re.split(r'(\d+)', text)]

    # *****************************************************

    try:
        # Get Stimuli from NWB file
        stimulus_keys = nwbfile.stimulus.keys()
        stimuli = []
        for stimuli_names in stimulus_keys:

            stimuli.append(stimuli_names)

        stimuli.sort(key=natural_keys)

    except:
        print('Error in getting stimuli keys')
        sys.exit(-1)

    for eachStim in plotStimuli:

        img = np.asarray(nwbfile.stimulus['StimulusPresentation'].data)
        # Check Matrix Dimensions
        img_size = np.shape(img[eachStim])
        if not img_size[-1] == 3:
            img = img.transpose()

        # Get Stimuli Category Name
        stim = nwbfile.trials['category_name'].data
        # Show Image
        cv2.moveWindow(stim[eachStim], 100, 100)
        cv2.imshow(stim[eachStim], img[eachStim])
        # cv2.moveWindow(stim[eachStim], 910, 0)
        # Wait for a key press
        print(
            "=========  Press any key to move to the next image. Press 'esc' to quit. ================"
        )
        key = cv2.waitKey(0)
        if key == 27:
            cv2.destroyAllWindows()
            sys.exit(0)
        else:
            cv2.destroyAllWindows()
Exemplo n.º 5
0
# For local field potential data, there are two options. Again, which one you choose depends on what data you
# have available. With both options, you should store your traces with :py:class:`~pynwb.ecephys.ElectricalSeries`
# objects. If you are storing unfiltered local field potential data, you should store
# the :py:class:`~pynwb.ecephys.ElectricalSeries` objects in :py:class:`~pynwb.ecephys.LFP` data interface object(s).
# If you have filtered LFP data, you should store the :py:class:`~pynwb.ecephys.ElectricalSeries` objects  in
# :py:class:`~pynwb.ecephys.FilteredEphys` data interface object(s).

####################
# .. _ecephys_writing:
#
# Once you have finished adding all of your data to the :py:class:`~pynwb.file.NWBFile`,
# write the file with :py:class:`~pynwb.NWBHDF5IO`.

from pynwb import NWBHDF5IO

with NWBHDF5IO('ecephys_example.nwb', 'w') as io:
    io.write(nwbfile)

####################
# For more details on :py:class:`~pynwb.NWBHDF5IO`, see the :ref:`basic tutorial <basic_writing>`.

####################
# .. _ecephys_reading:
#
# Reading electrophysiology data
# ------------------------------
#
# Now that you have written some electrophysiology data, you can read it back in.

io = NWBHDF5IO('ecephys_example.nwb', 'r')
nwbfile = io.read()
Exemplo n.º 6
0
from hdmf.data_utils import DataChunkIterator

start_time = datetime(2018, 4, 25, 2, 30, 3, tzinfo=tz.gettz('US/Pacific'))
fname = 'test_parallel_pynwb.nwb'
rank = MPI.COMM_WORLD.rank  # The process ID (integer 0-3 for 4-process run)

# Create file on one rank. Here we only instantiate the dataset we want to
# write in parallel but we do not write any data
if rank == 0:
    nwbfile = NWBFile('aa', 'aa', start_time)
    data = DataChunkIterator(data=None, maxshape=(4, ), dtype=np.dtype('int'))

    nwbfile.add_acquisition(
        TimeSeries('ts_name',
                   description='desc',
                   data=data,
                   rate=100.,
                   unit='m'))
    with NWBHDF5IO(fname, 'w') as io:
        io.write(nwbfile)

# write to dataset in parallel
with NWBHDF5IO(fname, 'a', comm=MPI.COMM_WORLD) as io:
    nwbfile = io.read()
    print(rank)
    nwbfile.acquisition['ts_name'].data[rank] = rank

# read from dataset in parallel
with NWBHDF5IO(fname, 'r', comm=MPI.COMM_WORLD) as io:
    print(io.read().acquisition['ts_name'].data[rank])
Exemplo n.º 7
0
                        f'(ROIs x time), aligned to event_id: {postmat[data_name].eventI}'
                    ))
            img_seg_mod.add_data_interface(dF_F)
        except Exception as e:
            print(
                f'Error adding roi_response_series: {data_name}\n\t\tErrorMsg: {str(e)}\n',
                file=sys.stderr)

    # ------ Behavior processing module ------
    behavior_mod = nwbfile.create_processing_module(
        'Behavior', 'Behavior data (e.g. wheel revolution, lick traces)')
    behavior_epoch = pynwb.behavior.BehavioralTimeSeries(
        name='Epoched_behavioral_series')
    behavior_mod.add_data_interface(behavior_epoch)

    for behavior in ['firstSideTryAl_wheelRev', 'firstSideTryAl_lick']:
        behavior_epoch.create_timeseries(
            name=behavior,
            data=H5DataIO(postmat[behavior].traces, compression=True),
            unit='unknown',
            starting_time=postmat[behavior].time[0] / 1000,  # in seconds
            rate=(postmat[behavior].time[1] - postmat[behavior].time[0]) /
            1000,
            description=
            f'(time x trial), aligned to event_id: {postmat[behavior].eventI}')

    with NWBHDF5IO(os.path.join(save_path,
                                mouse_folder + '_' + session + '.nwb'),
                   mode='w') as io:
        io.write(nwbfile)
Exemplo n.º 8
0
def AddTDTAnalogDataToNWB(tdt_block_dir,
                          nwb_file_name='',
                          signal_info=None,
                          module_name='behavior',
                          verbose=False):
    """
        Copies analog (continuous) data from the specified Blackrock file to Neurodata Without Borders (NWB) file.
        This is usually continuous signals about behavior (joystick position, screen refreshes, etc).
        User should provide information about the signals to help end users understand what is in each signal.
        Multiple calls can be used to load multiple data signals from multiple files.

        Typically, the NWB file will already be initialized by calling InitializeNWBFromBlackrock.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change blackrock extension to 'nwb'
        :param signal_info: [optional] {list} List of dictionaries with information about the signals to save.
        :param module_name: [optional] {str} Name of module to store data.  Usually 'behavior' but could also be 'ecephys' or 'misc'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = path.splitext(tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError(
            "Couldn't open TDT file. Error: {:s}".format(e))

    # Initialize the NWB file
    nwb_io = []
    try:
        if not path.isfile(nwb_file_name):
            # Initialize NWB file
            if verbose:
                print("NWB file doesn't exist. Creating new one: %s..." %
                      (nwb_file_name))
            InitializeNWBFromTDT(tdt_block_dir, nwb_file_name, verbose=verbose)

        # Append to existing file
        if verbose: print("Opening NWB file %s..." % (nwb_file_name), end='')
        nwb_file_append = True
        nwb_io = NWBHDF5IO(nwb_file_name, mode='a')
        nwb_file = nwb_io.read()
        if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)

    # Make sure module name is either behavior or misc
    module_name = module_name.lower()
    if (module_name != 'behavior') and (module_name != 'misc'):
        raise ValueError("Module type must either be 'behavior' or 'misc'.")

    # Parse the signal_info list
    if not signal_info:
        raise ValueError("Must specify signals to load.")
    elec_ids = []
    for cur_signal_ind, cur_signal_info in enumerate(signal_info):
        if 'label' not in cur_signal_info.keys():
            raise ValueError(
                "Signal information must have a label for each signal.")
        if 'name' not in cur_signal_info.keys():
            raise ValueError(
                "Signal information must have a name for each signal. (Should be user-understandable)"
            )
        if 'comments' not in cur_signal_info.keys():
            signal_info[cur_signal_ind]['comments'] = ''
        # Find electrode IDs for this signal
        if ('elec_id' not in cur_signal_info.keys()) or (
                not cur_signal_info['elec_id']):
            # Loop through and grab all signals of type 'streams' that aren't RAW data
            signal_info[cur_signal_ind]['elec_id'] = []
            for cur_store in tdt_header.stores.keys():
                # Grab all 'streams' but ignore RAWs
                if (tdt_header.stores[cur_store].type_str
                        == 'streams') and (cur_store[0:3] != 'RAW'):
                    signal_info[cur_signal_ind]['elec_id'].append(cur_store)

    # Create processing module for saving data
    if module_name not in nwb_file.processing.keys():
        if verbose:
            print(
                "Specified processing module (%s) does not exist. Creating." %
                (module_name))
        signal_module = ProcessingModule(
            name=module_name,
            description="Processing module for continuous signal data from %s."
            % (path.split(tdt_tsq_files[0])[1]))
        nwb_file.add_processing_module(signal_module)

    # Create data interface for the analog signals
    signal_info_str = signal_info[0]['name']
    for i in range(1, len(signal_info)):
        signal_info_str = signal_info_str + ", " + signal_info[i]['name']
    if verbose:
        print("Creating %s data interface for signals %s." %
              (module_name, signal_info_str))
    if module_name == 'behavior':
        cur_data_interface = BehavioralTimeSeries(name="Analog signals (" +
                                                  signal_info_str + ")")
    elif module_name == 'misc':
        cur_data_interface = AbstractFeatureSeries(name="Analog signals (" +
                                                   signal_info_str + ")")
    else:
        raise ValueError("Module type must either be 'behavior' or 'misc'.")

    if verbose: print("Adding signals...")
    for cur_signal_ind, cur_signal_info in enumerate(signal_info):
        # Get data from file
        analog_data = []
        analog_fs = []
        analog_start_time = []
        for cur_elec_id in cur_signal_info['elec_id']:
            cur_data = tdt.read_block(tdt_block_dir, store=cur_elec_id)
            cur_analog_data = cur_data.streams[cur_elec_id].data
            cur_analog_data = np.reshape(cur_analog_data,
                                         [len(cur_analog_data), 1])
            if len(analog_data) == 0:
                analog_fs = cur_data.streams[cur_elec_id].fs
                analog_start_time = cur_data.streams[cur_elec_id].start_time
                analog_data = cur_analog_data
            else:
                analog_fs.append(cur_data.streams[cur_elec_id].fs)
                analog_start_time.append(
                    cur_data.streams[cur_elec_id].start_time)
                analog_data = np.concatenate([analog_data, cur_analog_data],
                                             axis=1)

        # Make sure all of the fs and start_times are the same
        analog_start_time = np.unique(analog_start_time)
        analog_fs = np.unique(analog_fs)
        if len(analog_start_time) != 1 or len(analog_fs) != 1:
            raise ValueError(
                'Start time and sampling frequency need to be the same for signals to be combined.'
            )

        # Create time series
        cur_data_interface.create_timeseries(
            name=cur_signal_info['name'],
            data=analog_data,
            comments=cur_signal_info['comments'],
            unit="V",  #TODO: Check that this is correct for TDT
            resolution=1.0,  #TODO: Can we get this from TDT?
            conversion=
            0.001,  #TODO: Check what the correct conversion is for TDT
            starting_time=analog_start_time[0],
            rate=analog_fs[0],
            description="Signal %s from %s." %
            (cur_signal_info['label'], path.split(tdt_tsq_files[0])[1]))
        if verbose: print("\tAdded %s." % (cur_signal_info['label']))

    # Add data interface to module in NWB file
    if verbose: print("Adding data interface to module.")
    nwb_file.processing[module_name].add(cur_data_interface)

    # Write the file
    if verbose: print("Writing NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 9
0
 def load_nwbfile(fullname):
     "Returns an `NWBFIle <https://nwb-schema.readthedocs.io/en/latest/format.html#nwbfile>`_ for a given fullname (filepath + filename; string)."
     io = NWBHDF5IO( fullname, mode="r" )
     return io.read() # this is the nwbfile object
Exemplo n.º 10
0
def InitializeNWBFromTDT(tdt_block_dir,
                         nwb_file_name='',
                         experiment=None,
                         subject=None,
                         electrode_info=None,
                         electrode_group=None,
                         notes=None,
                         overwrite=False,
                         verbose=False):
    """
        Initializes a NWB file to copy over data from Blackrock file.  User specified information is
        added to NWB file as metadata about data.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change TDT extension to 'nwb'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :param experiment: [optional] {dict} Dictionary that contains information about experimenter, name, animal, lab, and institution
        :param subject: [optional] {dict} Dictionary that contains information about subject
        :param electrode_info: [optional] {list} List of dictionaries carrying information about electrodes
        :param electrode_group: [optional] {list} List of dictionaries carrying information about electrode groupings
        :param notes: [optional] {str} Notes relevant for the dataset
        :param overwrite: [optional] {bool} Whether to overwrite the NWB file if it already exists
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = tdt_block_dir + path.splitext(
            tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file, get header and basic information
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
        tdt_data = tdt.read_block(tdt_block_dir, evtype=['epocs'])
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError("Couldn't open TDT file. Error: %s" % e)

    # Process experiment information from inputs
    if (not experiment) or not (type(experiment) is dict):
        if verbose: print("Using default experimental details.")
        nwb_session_description = 'Experimental details not provided'
        experimenter = ''
        lab = ''
        institution = ''
    else:
        if 'name' not in experiment.keys():
            experiment['name'] = ''
        if 'animal' not in experiment.keys():
            experiment['animal'] = ''
        if 'experimenter' not in experiment.keys():
            experiment['experimenter'] = ''
        if 'lab' not in experiment.keys():
            experiment['lab'] = ''
        if 'institution' not in experiment.keys():
            experiment['institution'] = ''
        experimenter = experiment['experimenter']
        lab = experiment['lab']
        institution = experiment['institution']
        nwb_session_description = 'Experiment: ' + experiment['name'] + \
                                  ', Animal: ' + experiment['animal'] + \
                                  ', Date: ' + tdt_data.info.start_date.strftime('%m/%d/%Y')  # description of the recording session
        if verbose:
            print("Experiment description: %s" % (nwb_session_description))

    # Process subject information
    if (not subject) or not (type(subject) is dict):
        cur_subject = None
    else:
        try:
            # Create a subject object from the passed dictionary
            cur_subject = file.Subject(**subject)
        except:  # catch *all* exceptions
            e = sys.exc_info()[0]
            raise ValueError("Couldn't create subject object. Error: %s" % (e))

    # Define the NWB file identifier as the filename, as this should be unique
    nwb_identifier = path.split(tdt_tsq_files[0])[1]  # unique file identifier

    # Initialize the NWB file
    nwb_io = []
    try:
        if not overwrite and path.isfile(nwb_file_name):
            raise FileExistsError(
                "Can't update basic attributes of NWB file. Create new NWB file if you need to change attributes."
            )
        else:
            if verbose:
                print("Creating new NWB file %s..." % (nwb_file_name), end='')
            nwb_file_append = False
            nwb_io = NWBHDF5IO(nwb_file_name, mode='w')
            tdt_notes = ''
            if 'Note' in tdt_data.epocs.keys():
                for cur_note in tdt_data.epocs.Note:
                    tdt_notes = tdt_notes + cur_note.notes + '; '
            nwb_file = NWBFile(nwb_session_description,
                               nwb_identifier,
                               tdt_data.info.start_date,
                               file_create_date=datetime.now(tzlocal()),
                               experimenter=experimenter,
                               subject=cur_subject,
                               lab=lab,
                               institution=institution,
                               source_script='ConvertTDTToNWB.py',
                               source_script_file_name='ConvertTDTToNWB.py',
                               notes=notes + tdt_notes)
            if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        tdt_data.clear()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)
    if verbose: print("Created NWB file.")

    # Create device in the NWB file
    device = nwb_file.create_device(name='TDT')
    if verbose: print("Created device.")

    # Make sure electrode input is provided and valid
    if (not electrode_info) or not (type(electrode_info) is list):
        if verbose: print("Creating electrode information from directory.")

        # Grab information about SEV files
        tdt_sev_files = [
            f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.sev'
        ]
        electrode_list = []
        for sev_ind, sev_file in enumerate(tdt_sev_files):
            sev_match = re.search(".+_RAW(\\d+)_Ch(\\d+).sev", sev_file)
            electrode_list.append((int(sev_match.groups()[0]) - 1) * 16 +
                                  int(sev_match.groups()[1]))
        electrode_list = np.sort(electrode_list)

        # Create electrode information for each electrode
        electrode_info = []
        for cur_elec in electrode_list:
            electrode_info.append({
                'x': 0.0,
                'y': 0.0,
                'z': 0.0,
                'impedance': -1.0,
                'location': 'unknown',
                'group': 'electrodes',
                'id': cur_elec,
                'filtering': 'Unknown'
            })
        if verbose: print("\tCreated %d electrodes." % (len(electrode_info)))

    # Make sure electrode group input is provided and valid
    if verbose: print("Processing electrode groups.")
    default_electrode_group = {
        'name': 'default electrode group',
        'description': 'Generic electrode group for ungrouped electrodes.',
        'location': 'Unknown'
    }
    if (not electrode_group) or not (type(electrode_group) is list):
        electrode_group = [default_electrode_group]
    else:
        electrode_group.insert(0, default_electrode_group)

    # Fill in any missing information about electrodes
    ovr_elec_group_list = [
    ]  # will be used to keep track of electrode group list
    if verbose: print("Processing electrode information.")
    for cur_elec_info_ind, cur_elec_info in enumerate(electrode_info):
        # If id is not provided, then throw an error
        if ('id' not in cur_elec_info.keys()) or (not cur_elec_info['id']):
            tdt_data.clear()
            nwb_io.close()
            raise ValueError(
                "Couldn't process electrode inputs. Must provide IDs for all electrodes or pass empty array."
            )
        # If filtering is not provided, add it from the file (can't with TDT)
        if ('filtering' not in cur_elec_info.keys()) or (
                not cur_elec_info['filtering']):
            electrode_info[cur_elec_info_ind]['filtering'] = 'Unknown'
        # If other variables are not provided, just initialize them to defaults
        if ('x' not in cur_elec_info.keys()) or (not cur_elec_info['x']):
            electrode_info[cur_elec_info_ind]['x'] = 0.0
        if ('y' not in cur_elec_info.keys()) or (not cur_elec_info['y']):
            electrode_info[cur_elec_info_ind]['y'] = 0.0
        if ('z' not in cur_elec_info.keys()) or (not cur_elec_info['z']):
            electrode_info[cur_elec_info_ind]['z'] = 0.0
        if ('location' not in cur_elec_info.keys()) or (
                not cur_elec_info['location']):
            electrode_info[cur_elec_info_ind]['location'] = 'unknown'
        if ('impedance' not in cur_elec_info.keys()) or (
                not cur_elec_info['impedance']):
            electrode_info[cur_elec_info_ind]['impedance'] = float(-1.0)
        if ('group'
                not in cur_elec_info.keys()) or (not cur_elec_info['group']):
            electrode_info[cur_elec_info_ind][
                'group'] = 'default electrode group'
        # Find the index to the electrode group
        grp_elec_ind = 0
        while (grp_elec_ind < len(electrode_group)) and (
                cur_elec_info['group'] !=
                electrode_group[grp_elec_ind]['name']):
            grp_elec_ind = grp_elec_ind + 1
        # If we made it past the end, this electrode group doesn't exist, put it in the default
        if grp_elec_ind >= len(electrode_group):
            grp_elec_ind = -1
        # Save the index into group dictionary for easy reference
        electrode_info[cur_elec_info_ind][
            'electrode_group_index'] = grp_elec_ind
        ovr_elec_group_list.append(grp_elec_ind)

    # Create electrode groups
    nwb_electrode_group = []
    if verbose: print("Creating electrode groups.")
    for cur_elec_group_ind, cur_elec_group in enumerate(electrode_group):
        # Create and add to our list
        nwb_electrode_group.append(
            nwb_file.create_electrode_group(
                cur_elec_group['name'],
                description=cur_elec_group['description'],
                location=cur_elec_group['location'],
                device=device))
    if verbose:
        print("\tCreated %d electrode groups." % (len(electrode_group)))

    # Create electrodes in NWB file
    if verbose: print("Adding electrodes to NWB.")
    nwb_file_elec_list = []
    for cur_elec_ind, cur_elec in enumerate(electrode_info):
        # Add electrode to NWB file
        nwb_file.add_electrode(
            id=cur_elec['id'],
            x=cur_elec['x'],
            y=cur_elec['y'],
            z=cur_elec['z'],
            imp=cur_elec['impedance'],
            location=cur_elec['location'],
            filtering=cur_elec['filtering'],
            group=nwb_electrode_group[cur_elec['electrode_group_index']])
        # Keep track of electrodes entered so we can index for the electrode table
        nwb_file_elec_list.append(cur_elec['id'])
    if verbose: print("\tAdded %d electrodes." % (len(electrode_info)))

    # Close NSx file
    tdt_data.clear()

    # Write to file and close
    if verbose: print("\tWriting NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 11
0
def AddTDTRawDataToNWB(tdt_block_dir,
                       nwb_file_name='',
                       verbose=False,
                       elec_ids=None):
    """
        Copies raw electrophysiological data from TDT format to Neurodata Without Borders (NWB) format.
        Typically, the NWB file will already be initialized by calling InitializeNWBFromTDT. Multiple
        electrodes can be added at once or with separate calls to this function.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change blackrock extension to 'nwb'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :param elec_ids: [optional] {list} List of electrode IDs to copy over. If empty, all are copied
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = path.splitext(tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError(
            "Couldn't open TDT file. Error: {:s}".format(e))

    # Initialize the NWB file
    nwb_io = []
    try:
        if not path.isfile(nwb_file_name):
            # Initialize NWB file
            if verbose:
                print("NWB file doesn't exist. Creating new one: %s..." %
                      (nwb_file_name))
            InitializeNWBFromTDT(tdt_block_dir, nwb_file_name, verbose=verbose)

        # Append to existing file
        if verbose: print("Opening NWB file %s..." % (nwb_file_name), end='')
        nwb_file_append = True
        nwb_io = NWBHDF5IO(nwb_file_name, mode='a')
        nwb_file = nwb_io.read()
        if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)

    # Validate the elec_ids list
    if not elec_ids:
        # Grab electroe list from NWB file
        elec_ids = nwb_file.electrodes.id[:]

    # Load data for all of the electrodes
    if verbose: print("Loading raw data from TDT file...")
    chan_data = []
    starting_time = 0
    fs = 24414.0625
    for cur_elec_id in elec_ids:
        # Convert electrode id into RAW and channel
        cur_raw = np.floor((cur_elec_id - 1) / 16) + 1
        cur_raw = 'RAW{:1.0f}'.format(cur_raw)
        cur_chan = ((cur_elec_id - 1) % 16) + 1
        print('{:s}_{:1.0f}'.format(cur_raw, cur_chan))

        #Read this channel data in from file
        cur_chan_data = tdt.read_sev(tdt_block_dir,
                                     channel=cur_chan,
                                     event_name=cur_raw)
        fs = cur_chan_data[cur_raw].fs
        starting_time = cur_chan_data.time_ranges[0][0]
        cur_chan_data = cur_chan_data[cur_raw].data
        cur_chan_data = np.reshape(cur_chan_data, [len(cur_chan_data), 1])
        if len(chan_data) == 0:
            chan_data = cur_chan_data
        else:
            chan_data = np.concatenate([chan_data, cur_chan_data], axis=1)

        if verbose: print("\tLoaded electrode %d..." % (cur_elec_id))

    # Get resolution of signal in V
    if verbose: print("Processing electrode information.")
    V_resolution = 0.0  #TODO: See if we can get resolution information from TDT?

    # Create electrode table for new electrodes
    elec_table_ind = np.ones((len(elec_ids), 1)) * np.NaN
    for cur_elec_ind, cur_elec in enumerate(elec_ids):
        # In order to create electrode table, we have to have indexes for each channel into electrode list in NWB file
        cur_elec_table_ind = 0
        while (cur_elec_table_ind < len(nwb_file.electrodes)) and (
                nwb_file.electrodes[cur_elec_table_ind, 0] != cur_elec):
            cur_elec_table_ind = cur_elec_table_ind + 1
        if cur_elec_table_ind >= len(nwb_file.electrodes):
            raise ValueError("Couldn't find electrode %d in NWB file list." %
                             (cur_elec))
        elec_table_ind[cur_elec_ind] = cur_elec_table_ind
    elec_table_ind = elec_table_ind.transpose().tolist()[
        0]  # Convert to list for create_electrode_table_region
    electrode_table_region = nwb_file.create_electrode_table_region(
        elec_table_ind, "Electrodes %s" % (elec_ids))

    # Write raw data for the electrode
    if verbose: print("\tAdding to NWB...", end='')
    ephys_ts = ElectricalSeries(
        'Raw Data for Channels %s' % (elec_ids),
        chan_data,
        electrode_table_region,
        starting_time=starting_time,
        rate=fs,
        resolution=V_resolution,
        conversion=float(
            1.0),  #TODO: Check that TDT does read in V, as expected by NWB
        comments="",
        description="Raw data from TDT file.")
    nwb_file.add_acquisition(ephys_ts)
    if verbose: print("done")

    # Write the file
    if verbose: print("\tWriting NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 12
0
def chang2nwb(blockpath,
              outpath=None,
              session_start_time=None,
              session_description=None,
              identifier=None,
              anin4=False,
              ecog_format='auto',
              external_subject=True,
              include_pitch=False,
              include_intensity=False,
              speakers=True,
              mic=False,
              mini=False,
              hilb=False,
              verbose=False,
              imaging_path=None,
              parse_transcript=False,
              include_cortical_surfaces=True,
              include_electrodes=True,
              include_ekg=True,
              subject_image_list=None,
              rest_period=None,
              load_warped=False,
              **kwargs):
    """

    Parameters
    ----------
    blockpath: str
    outpath: None | str
        if None, output = [blockpath]/[blockname].nwb
    session_start_time: datetime.datetime
        default: datetime(1900, 1, 1)
    session_description: str
        default: blockname
    identifier: str
        default: blockname
    anin4: False | str
        Whether or not to convert ANIN4. ANIN4 is used as an extra channel for
        things like button presses, and is usually unused. If a string is
        supplied, that is used as the name of the timeseries.
    ecog_format: str
        ({'htk'}, 'mat', 'raw')
    external_subject: bool (optional)
        True: (default) cortical mesh is saved in an external file and a link is
            provided to that file. This is useful if you have multiple sessions for a single subject.
        False: cortical mesh is saved normally
    include_pitch: bool (optional)
        add pitch data. Default: False
    include_intensity: bool (optional)
        add intensity data. Default: False
    speakers: bool (optional)
        Default: False
    mic: bool (optional)
        default: False
    mini: only save data stub. Used for testing
    hilb: bool
        include Hilbert Transform data. Default: False
    verbose: bool (optional)
    imaging_path: str (optional)
        None: use IMAGING_DIR
        'local': use subject_dir/Imaging/
        else: use supplied string
    parse_transcript: str (optional)
    include_cortical_surfaces: bool (optional)
    include_electrodes: bool (optional)
    include_ekg: bool (optional)
    subject_image_list: list (optional)
        List of paths of images to include
    rest_period: None | array-like
    kwargs: dict
        passed to pynwb.NWBFile

    Returns
    -------

    """

    behav_module = None

    basepath, blockname = os.path.split(blockpath)
    subject_id = get_subject_id(blockname)
    if identifier is None:
        identifier = blockname

    if session_description is None:
        session_description = blockname

    if outpath is None:
        outpath = blockpath + '.nwb'
    out_base_path = os.path.split(outpath)[0]

    if session_start_time is None:
        session_start_time = datetime(1900, 1, 1).astimezone(timezone('UTC'))

    if imaging_path is None:
        subj_imaging_path = path.join(IMAGING_PATH, subject_id)
    elif imaging_path == 'local':
        subj_imaging_path = path.join(basepath, 'imaging')
    else:
        subj_imaging_path = os.path.join(imaging_path, subject_id)

    # file paths
    bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')
    ecog_path = path.join(blockpath, 'RawHTK')
    ecog400_path = path.join(blockpath, 'ecog400', 'ecog.mat')
    elec_metadata_file = path.join(subj_imaging_path, 'elecs',
                                   'TDT_elecs_all.mat')
    mesh_path = path.join(subj_imaging_path, 'Meshes')
    pial_files = glob.glob(path.join(mesh_path, '*pial.mat'))

    # Create the NWB file object
    nwbfile = NWBFile(session_description,
                      identifier,
                      session_start_time,
                      datetime.now().astimezone(),
                      session_id=identifier,
                      institution='University of California, San Francisco',
                      lab='Chang Lab',
                      **kwargs)

    nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')

    bad_elecs_inds = get_bad_elecs(blockpath)

    if include_electrodes:
        add_electrodes(nwbfile,
                       elec_metadata_file,
                       bad_elecs_inds,
                       load_warped=load_warped)
    else:
        device = nwbfile.create_device('256Grid')
        electrode_group = nwbfile.create_electrode_group(
            name='256Grid electrodes',
            description='auto_group',
            location='location',
            device=device)

        for elec_counter in range(256):
            bad = elec_counter in bad_elecs_inds
            nwbfile.add_electrode(id=elec_counter + 1,
                                  x=np.nan,
                                  y=np.nan,
                                  z=np.nan,
                                  imp=np.nan,
                                  location=' ',
                                  filtering='none',
                                  group=electrode_group,
                                  bad=bad)
    ecog_elecs = list(range(len(nwbfile.electrodes)))
    ecog_elecs_region = nwbfile.create_electrode_table_region(
        ecog_elecs, 'ECoG electrodes on brain')

    # Read electrophysiology data from HTK files and add them to NWB file
    if ecog_format == 'auto':
        ecog_rate, data, ecog_path = auto_ecog(blockpath,
                                               ecog_elecs,
                                               verbose=False)
    elif ecog_format == 'htk':
        if verbose:
            print('reading htk acquisition...', flush=True)
        ecog_rate, data = readhtks(ecog_path, ecog_elecs)
        data = data.squeeze()
        if verbose:
            print('done', flush=True)

    elif ecog_format == 'mat':
        with File(ecog400_path, 'r') as f:
            data = f['ecogDS']['data'][:, ecog_elecs]
            ecog_rate = f['ecogDS']['sampFreq'][:].ravel()[0]
        ecog_path = ecog400_path

    elif ecog_format == 'raw':
        ecog_path = os.path.join(tdt_data_path, subject_id, blockname,
                                 'raw.mat')
        ecog_rate, data = load_wavs(ecog_path)

    else:
        raise ValueError('unrecognized argument: ecog_format')

    ts_desc = "all Wav data"

    if mini:
        data = data[:2000]

    ecog_ts = ElectricalSeries(name='ElectricalSeries',
                               data=H5DataIO(data, compression='gzip'),
                               electrodes=ecog_elecs_region,
                               rate=ecog_rate,
                               description=ts_desc,
                               conversion=0.001)
    nwbfile.add_acquisition(ecog_ts)

    if include_ekg:
        ekg_elecs = find_ekg_elecs(elec_metadata_file)
        if len(ekg_elecs):
            add_ekg(nwbfile, ecog_path, ekg_elecs)

    if mic:
        # Add microphone recording from room
        fs, data = get_analog(blockpath, 1)
        nwbfile.add_acquisition(
            TimeSeries('microphone',
                       data,
                       'audio unit',
                       rate=fs,
                       description="audio recording from microphone in room"))
    if speakers:
        fs, data = get_analog(blockpath, 2)
        # Add audio stimulus 1
        nwbfile.add_stimulus(
            TimeSeries('speaker 1',
                       data,
                       'NA',
                       rate=fs,
                       description="audio stimulus 1"))

        # Add audio stimulus 2
        fs, data = get_analog(blockpath, 3)
        if fs is not None:
            nwbfile.add_stimulus(
                TimeSeries('speaker 2',
                           data,
                           'NA',
                           rate=fs,
                           description='the second stimulus source'))

    if anin4:
        fs, data = get_analog(blockpath, 4)
        nwbfile.add_acquisition(
            TimeSeries(anin4,
                       data,
                       'aux unit',
                       rate=fs,
                       description="aux analog recording"))

    # Add bad time segments
    if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:
        bad_time = sio.loadmat(bad_time_file)['badTimeSegments']
        for row in bad_time:
            nwbfile.add_invalid_time_interval(start_time=row[0],
                                              stop_time=row[1],
                                              tags=('ECoG artifact', ),
                                              timeseries=ecog_ts)

    if rest_period is not None:
        nwbfile.add_epoch_column(name='label', description='label')
        nwbfile.add_epoch(start_time=rest_period[0],
                          stop_time=rest_period[1],
                          label='rest_period')

    if hilb:
        block_hilb_path = os.path.join(hilb_dir, subject_id, blockname,
                                       blockname + '_AA.h5')
        file = File(block_hilb_path, 'r')

        data = transpose_iter(
            file['X'])  # transposes data during iterative write
        filter_center = file['filter_center'][:]
        filter_sigma = file['filter_sigma'][:]

        data = H5DataIO(DataChunkIterator(tqdm(data,
                                               desc='writing hilbert data'),
                                          buffer_size=400 * 20),
                        compression='gzip')

        decomp_series = DecompositionSeries(
            name='LFPDecompositionSeries',
            description='Gaussian band Hilbert transform',
            data=data,
            rate=400.,
            source_timeseries=ecog_ts,
            metric='amplitude')

        for band_mean, band_stdev in zip(filter_center, filter_sigma):
            decomp_series.add_band(band_mean=band_mean, band_stdev=band_stdev)

        hilb_mod = nwbfile.create_processing_module(
            name='ecephys', description='holds hilbert analysis results')
        hilb_mod.add_container(decomp_series)

    if include_cortical_surfaces:
        subject = ECoGSubject(subject_id=subject_id)
        subject.cortical_surfaces = create_cortical_surfaces(
            pial_files, subject_id)
    else:
        subject = Subject(subject_id=subject_id, species='H**o sapiens')

    if subject_image_list is not None:
        subject = add_images_to_subject(subject, subject_image_list)

    if external_subject:
        subj_fpath = path.join(out_base_path, subject_id + '.nwb')
        if not os.path.isfile(subj_fpath):
            subj_nwbfile = NWBFile(session_description=subject_id,
                                   identifier=subject_id,
                                   subject=subject,
                                   session_start_time=datetime(
                                       1900, 1, 1).astimezone(timezone('UTC')))
            with NWBHDF5IO(subj_fpath, manager=manager, mode='w') as subj_io:
                subj_io.write(subj_nwbfile)
        subj_read_io = NWBHDF5IO(subj_fpath, manager=manager, mode='r')
        subj_nwbfile = subj_read_io.read()
        subject = subj_nwbfile.subject

    nwbfile.subject = subject

    if parse_transcript:
        if parse_transcript == 'CV':
            parseout = parse(blockpath, blockname)
            df = make_df(parseout, 0, subject_id, align_pos=1)
            nwbfile.add_trial_column('cv_transition_time',
                                     'time of CV transition in seconds')
            nwbfile.add_trial_column(
                'speak',
                'if True, subject is speaking. If False, subject is listening')
            nwbfile.add_trial_column('condition', 'syllable spoken')
            for _, row in df.iterrows():
                nwbfile.add_trial(start_time=row['start'],
                                  stop_time=row['stop'],
                                  cv_transition_time=row['align'],
                                  speak=row['mode'] == 'speak',
                                  condition=row['label'])
        elif parse_transcript == 'singing':
            parseout = parse(blockpath, blockname)
            df = make_df(parseout, 0, subject_id, align_pos=0)
            if not len(df):
                df = pd.DataFrame(parseout)
                df['mode'] = 'speak'

            df = df.loc[df['label'].astype('bool'), :]  # handle empty labels
            nwbfile.add_trial_column(
                'speak',
                'if True, subject is speaking. If False, subject is listening')
            nwbfile.add_trial_column('condition', 'syllable spoken')
            for _, row in df.iterrows():
                nwbfile.add_trial(start_time=row['start'],
                                  stop_time=row['stop'],
                                  speak=row['mode'] == 'speak',
                                  condition=row['label'])
        elif parse_transcript == 'emphasis':
            parseout = parse(blockpath, blockname)
            try:
                df = make_df(parseout, 0, subject_id, align_pos=0)
            except:
                df = pd.DataFrame(parseout)
            if not len(df):
                df = pd.DataFrame(parseout)
            df = df.loc[df['label'].astype('bool'), :]  # handle empty labels
            nwbfile.add_trial_column('condition', 'word emphasized')
            nwbfile.add_trial_column(
                'speak',
                'if True, subject is speaking. If False, subject is listening')
            for _, row in df.iterrows():
                nwbfile.add_trial(start_time=row['start'],
                                  stop_time=row['stop'],
                                  speak=True,
                                  condition=row['label'])
        elif parse_transcript == 'MOCHA':
            nwbfile = create_transcription(nwbfile, transcript_path, blockname)

    # behavior
    if include_pitch:
        if behav_module is None:
            behav_module = nwbfile.create_processing_module(
                'behavior', 'processing about behavior')
        if os.path.isfile(
                os.path.join(blockpath, 'pitch_' + blockname + '.mat')):
            fs, data = load_pitch(blockpath)
            pitch_ts = TimeSeries(
                data=data,
                rate=fs,
                unit='Hz',
                name='pitch',
                description=
                'Pitch as extracted from Praat. NaNs mark unvoiced regions.')
            behav_module.add_container(
                BehavioralTimeSeries(name='pitch', time_series=pitch_ts))
        else:
            print('No pitch file for ' + blockname)

    if include_intensity:
        if behav_module is None:
            behav_module = nwbfile.create_processing_module(
                'behavior', 'processing about behavior')
        if os.path.isfile(
                os.path.join(blockpath, 'intensity_' + blockname + '.mat')):
            fs, data = load_pitch(blockpath)
            intensity_ts = TimeSeries(
                data=data,
                rate=fs,
                unit='dB',
                name='intensity',
                description='Intensity of speech in dB extracted from Praat.')
            behav_module.add_container(
                BehavioralTimeSeries(name='intensity',
                                     time_series=intensity_ts))
        else:
            print('No intensity file for ' + blockname)

    # Export the NWB file
    with NWBHDF5IO(outpath, manager=manager, mode='w') as io:
        io.write(nwbfile)

    if external_subject:
        subj_read_io.close()

    if hilb:
        file.close()

    # read check
    with NWBHDF5IO(outpath, manager=manager, mode='r') as io:
        io.read()
Exemplo n.º 13
0
# this file is for testing the nwb api calls so we can translate them into the webpage with ease
import os
from dateutil.tz import tzlocal
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

# nwb stuff
from pynwb import NWBFile
from pynwb import NWBHDF5IO
from std import nwb_metric


io = NWBHDF5IO('flask/nwb_files/test.nwb', 'r')
nwbfile_in = io.read()
print(nwbfile_in)

data = nwbfile_in.acquisition['flow']
data2 = nwbfile_in.acquisition['wheel']
# drop nan from data2
# data2 = data2[np.isfinite(data2)]
# data2 = data2[np.logical_not(np.isnan(data2))]

data3 = nwbfile_in.acquisition['TwoPhotonSeries1']
# drop nan from data3
# data3 = data3[np.isfinite(data3)]

print("flow STD is: " + str(nwb_metric.check_spread(nwb_metric, data.data[:])))
print("flow prob jumps is: " +
      str(nwb_metric.prob_jumps(nwb_metric, data.data[:])))
Exemplo n.º 14
0
    return spk


def get_avg_spk(spk, defs=[(-20, 0), (-60, -40), (-80, -60)]):

    hist = {}

    for dur, data in spk.items():
        hist[dur] = {}
        for _def in defs:
            dd, ff = data
            print(dd, ff)
            freqs = ff[np.logical_and(dd >= _def[0], dd < _def[1])]
            hist[dur][_def] = (np.mean(freqs), np.std(freqs))

    return hist


if __name__ == '__main__':
    io = NWBHDF5IO('rebound.nwb', 'r')
    nwbfile = io.read()

    import matplotlib.pyplot as plt
    spk = get_rebound_spk(nwbfile, "control_%g", np.arange(0.025, 0.5, 0.025))
    avgspk = get_avg_spk(spk)

    for dur, nspk in avgspk.items():
        for _def, _nspk in nspk.items():
            __def = (abs(_def[1]), abs(_def[0]))
            print('%g-%g %g %g' % (__def + _nspk))
Exemplo n.º 15
0
#    However, only *GZIP* is built by default with HDF5, i.e., while data compressed
#    with *GZIP* can be read on all platforms and installation of HDF5, other
#    compressors may not be installed everywhere so that not all users may
#    be able to access those files.
#

####################
# Writing the data
# ----------------
#
#
# Writing the data now works as usual.

from pynwb import NWBHDF5IO

io = NWBHDF5IO('advanced_io_example.nwb', 'w')
io.write(nwbfile)
io.close()

####################
# Reading the data
# ----------------
#
#
# Again, nothing has changed for read. All of the above advanced I/O features are handled transparently.

io = NWBHDF5IO('advanced_io_example.nwb', 'r')
nwbfile = io.read()

####################
# Now lets have a look to confirm that all our I/O settings where indeed used.
Exemplo n.º 16
0
            [0].keys() if tag not in acquisition.TrialStimInfo.primary_key
        }
        # Add entry to the trial-table
        for trial in (acquisition.TrialSet.Trial
                      & session_key).fetch(as_dict=True):
            photostim_tag = (acquisition.TrialStimInfo
                             & trial).fetch(as_dict=True)
            trial_tag_value = {
                **trial,
                **photostim_tag[0]
            } if len(photostim_tag) == 1 else {
                **trial,
                **photostim_tag_default
            }
            # rename 'trial_id' to 'id'
            trial_tag_value['id'] = trial_tag_value['trial_id']
            [
                trial_tag_value.pop(k)
                for k in acquisition.TrialSet.Trial.primary_key
            ]
            nwbfile.add_trial(**trial_tag_value)

    # =============== Write NWB 2.0 file ===============
    save_path = os.path.join('data', 'NWB 2.0')
    save_file_name = ''.join([nwbfile.identifier, '.nwb'])
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    with NWBHDF5IO(os.path.join(save_path, save_file_name), mode='w') as io:
        io.write(nwbfile)
        print(f'Write NWB 2.0 file: {save_file_name}')
Exemplo n.º 17
0
        label_pieces = os.path.split(txt_file)[1].split('_')
        if label_pieces[0] == 'soma':
            compartment_label = 'soma'
        else:
            compartment_label = label_pieces[1] + '_' + label_pieces[2]
        compartment_labels.append(compartment_label)

        all_compartments[-1].append(i)
        mp_data.append(np.loadtxt(txt_file))
    nwbfile.add_unit(compartment_labels=compartment_labels)

mp_data = np.column_stack(mp_data)
if COMPRESS:
    mp_data = H5DataIO(mp_data, compression='gzip')
compartments, compartments_index = create_ragged_array(all_compartments)
cs = CompartmentSeries('membrane_potential',
                       mp_data,
                       unit='mV',
                       rate=np.nan,
                       compartments=compartments,
                       compartments_index=compartments_index,
                       unit_id=np.arange(len(cell_paths), dtype=int))
nwbfile.add_acquisition(cs)

with NWBHDF5IO(run_dir + '.nwb', 'w') as io:
    io.write(nwbfile)

# read data
io_read = NWBHDF5IO(run_dir + '.nwb', 'r')
nwbfile = io_read.read()
Exemplo n.º 18
0
def test_ext():
    nwbfile = NWBFile('description', 'id', datetime.now().astimezone())

    device = nwbfile.create_device('device_name')

    electrode_group = nwbfile.create_electrode_group('electrode_group',
                                                     'desc',
                                                     'loc',
                                                     device=device)

    for i in np.arange(20.):
        nwbfile.add_electrode(i, i, i, np.nan, 'loc', 'filt', electrode_group)

    electrodes = DynamicTableRegion(
        name='electrodes',
        data=np.arange(0, 3),
        description='desc',
        table=nwbfile.electrodes,
    )

    source_ec_series = ElectricalSeries(
        name='source_ec_series',
        description='desc',
        data=np.random.rand(100, 3),
        rate=1000.,
        electrodes=electrodes,
    )

    nwbfile.add_acquisition(source_ec_series)

    bipolar_scheme_table = BipolarSchemeTable(name='bipolar_scheme',
                                              description='desc')

    bipolar_scheme_table.add_row(anodes=[0], cathodes=[1])
    bipolar_scheme_table.add_row(anodes=[0, 1], cathodes=[2, 3])
    bipolar_scheme_table.add_row(anodes=[0, 1], cathodes=[2])

    bipolar_scheme_table['anodes'].target.table = nwbfile.electrodes
    bipolar_scheme_table['cathodes'].target.table = nwbfile.electrodes

    bipolar_scheme_region = DynamicTableRegion(
        name='electrodes',
        data=np.arange(0, 3),
        description='desc',
        table=bipolar_scheme_table,
    )

    ec_series = ElectricalSeries(
        name='dest_ec_series',
        description='desc',
        data=np.random.rand(100, 3),
        rate=1000.,
        electrodes=bipolar_scheme_region,
    )

    nwbfile.add_acquisition(ec_series)

    ndx_bipolar_scheme = NdxBipolarScheme(
        bipolar_scheme_tables=[bipolar_scheme_table], source=source_ec_series)
    nwbfile.add_lab_meta_data(ndx_bipolar_scheme)

    with NWBHDF5IO('test_nwb.nwb', 'w') as io:
        io.write(nwbfile)

    with NWBHDF5IO('test_nwb.nwb', 'r', load_namespaces=True) as io:
        nwbfile = io.read()
        assert_array_equal(
            nwbfile.acquisition['dest_ec_series'].electrodes.table['anodes'][2]
            ['x'], [0., 1.])

    os.remove('test_nwb.nwb')
Exemplo n.º 19
0
    def save(self, session_object):
        # Cannot type session_object due to a circular dependency
        # TODO fix circular dependency and add type

        session_metadata: BehaviorOphysMetadata = \
            session_object.api.get_metadata()

        session_type = session_metadata.session_type

        nwbfile = NWBFile(
            session_description=session_type,
            identifier=str(session_object.ophys_experiment_id),
            session_start_time=session_metadata.date_of_acquisition,
            file_create_date=pytz.utc.localize(datetime.datetime.now()),
            institution="Allen Institute for Brain Science",
            keywords=[
                "2-photon", "calcium imaging", "visual cortex", "behavior",
                "task"
            ],
            experiment_description=get_expt_description(session_type))

        # Add stimulus_timestamps to NWB in-memory object:
        nwb.add_stimulus_timestamps(nwbfile,
                                    session_object.stimulus_timestamps)

        # Add running acquisition ('dx', 'v_sig', 'v_in') data to NWB
        # This data should be saved to NWB but not accessible directly from
        # Sessions
        nwb.add_running_acquisition_to_nwbfile(
            nwbfile, session_object.api.get_running_acquisition_df())

        # Add running data to NWB in-memory object:
        nwb.add_running_speed_to_nwbfile(nwbfile,
                                         session_object.running_speed,
                                         name="speed",
                                         from_dataframe=True)
        nwb.add_running_speed_to_nwbfile(nwbfile,
                                         session_object.raw_running_speed,
                                         name="speed_unfiltered",
                                         from_dataframe=True)

        # Add stimulus template data to NWB in-memory object:
        # Use the semi-private _stimulus_templates attribute because it is
        # a StimulusTemplate object. The public stimulus_templates property
        # of the session_object returns a DataFrame.
        session_stimulus_templates = session_object._stimulus_templates
        self._add_stimulus_templates(
            nwbfile=nwbfile,
            stimulus_templates=session_stimulus_templates,
            stimulus_presentations=session_object.stimulus_presentations)

        # search for omitted rows and add stop_time before writing to NWB file
        set_omitted_stop_time(
            stimulus_table=session_object.stimulus_presentations)

        # Add stimulus presentations data to NWB in-memory object:
        nwb.add_stimulus_presentations(nwbfile,
                                       session_object.stimulus_presentations)

        # Add trials data to NWB in-memory object:
        nwb.add_trials(nwbfile, session_object.trials,
                       TRIAL_COLUMN_DESCRIPTION_DICT)

        # Add licks data to NWB in-memory object:
        if len(session_object.licks) > 0:
            nwb.add_licks(nwbfile, session_object.licks)

        # Add rewards data to NWB in-memory object:
        if len(session_object.rewards) > 0:
            nwb.add_rewards(nwbfile, session_object.rewards)

        # Add max_projection image data to NWB in-memory object:
        nwb.add_max_projection(nwbfile, session_object.max_projection)

        # Add average_image image data to NWB in-memory object:
        nwb.add_average_image(nwbfile, session_object.average_projection)

        # Add segmentation_mask_image image data to NWB in-memory object:
        nwb.add_segmentation_mask_image(nwbfile,
                                        session_object.segmentation_mask_image)

        # Add metadata to NWB in-memory object:
        nwb.add_metadata(nwbfile, session_object.metadata, behavior_only=False)

        # Add task parameters to NWB in-memory object:
        nwb.add_task_parameters(nwbfile, session_object.task_parameters)

        # Add roi metrics to NWB in-memory object:
        nwb.add_cell_specimen_table(nwbfile,
                                    session_object.cell_specimen_table,
                                    session_object.metadata)

        # Add dff to NWB in-memory object:
        nwb.add_dff_traces(nwbfile, session_object.dff_traces,
                           session_object.ophys_timestamps)

        # Add corrected_fluorescence to NWB in-memory object:
        nwb.add_corrected_fluorescence_traces(
            nwbfile, session_object.corrected_fluorescence_traces)

        # Add motion correction to NWB in-memory object:
        nwb.add_motion_correction(nwbfile, session_object.motion_correction)

        # Add eye tracking and rig geometry to NWB in-memory object
        # if eye_tracking data exists.
        if session_object.eye_tracking is not None:
            self.add_eye_tracking_data_to_nwb(
                nwbfile, session_object.eye_tracking,
                session_object.eye_tracking_rig_geometry)

        # Add events
        self.add_events(nwbfile=nwbfile, events=session_object.events)

        # Write the file:
        with NWBHDF5IO(self.path, 'w') as nwb_file_writer:
            nwb_file_writer.write(nwbfile)

        return nwbfile
Exemplo n.º 20
0
device = nwbfile.create_device(name='trodes_rig123')
electrode_name = 'tetrode1'
description = "an example tetrode"
location = "somewhere in the hippocampus"

electrode_group = nwbfile.create_electrode_group(electrode_name,
                                                 description=description,
                                                 location=location,
                                                 device=device)

for idx in [1, 2, 3, 4]:
    nwbfile.add_electrode(id=idx,
                          x=1.0,
                          y=2.0,
                          z=3.0,
                          imp=float(-idx),
                          location='CA1',
                          filtering='none',
                          group=electrode_group)
electrode_table_region = nwbfile.create_electrode_table_region(
    [0, 2], 'the first and third electrodes')

eseries = tdt.extract_tdt('ECoG', None, electrode_table_region)

nwbfile.add_acquisition(eseries)

# Write the data to file
io = NWBHDF5IO('test.nwb', 'w')
io.write(nwbfile)
io.close()
Exemplo n.º 21
0

####################
# .. _basic_writing:
#
# Writing an NWB file
# -------------------
#
# NWB I/O is carried out using the :py:class:`~pynwb.NWBHDF5IO` class [#]_. This class is responsible
# for mapping an :py:class:`~pynwb.file.NWBFile` object into HDF5 according to the NWB schema.
#
# To write an :py:class:`~pynwb.file.NWBFile`, use the :py:func:`~pynwb.form.backends.io.FORMIO.write` method.

from pynwb import NWBHDF5IO

io = NWBHDF5IO('basic_example.nwb', mode='w')
io.write(nwbfile)
io.close()

####################
# You can also use :py:func:`~pynwb.NWBHDF5IO` as a context manager:

with NWBHDF5IO('basic_example.nwb', 'w') as io:
    io.write(nwbfile)

####################
# .. _basic_reading:
#
# Reading an NWB file
# -------------------
#
Exemplo n.º 22
0
    def check_metadata_write(self, metadata: dict, nwbfile_path: Path, recording: se.RecordingExtractor):
        standard_metadata = get_nwb_metadata(recording=recording)
        device_defaults = dict(  # from the individual add_devices function
            name="Device",
            description="no description"
        )
        electrode_group_defaults = dict(  # from the individual add_electrode_groups function
            name="Electrode Group",
            description="no description",
            location="unknown",
            device="Device"
        )

        with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io:
            nwbfile = io.read()

            device_source = metadata["Ecephys"].get("Device", standard_metadata["Ecephys"]["Device"])
            self.assertEqual(len(device_source), len(nwbfile.devices))
            for device in device_source:
                device_name = device.get("name", device_defaults["name"])
                self.assertIn(device_name, nwbfile.devices)
                self.assertEqual(
                    device.get("description", device_defaults["description"]), nwbfile.devices[device_name].description
                )
                self.assertEqual(device.get("manufacturer"), nwbfile.devices[device["name"]].manufacturer)

            electrode_group_source = metadata["Ecephys"].get(
                "ElectrodeGroup",
                standard_metadata["Ecephys"]["ElectrodeGroup"]
            )
            self.assertEqual(len(electrode_group_source), len(nwbfile.electrode_groups))
            for group in electrode_group_source:
                group_name = group.get("name", electrode_group_defaults["name"])
                self.assertIn(group_name, nwbfile.electrode_groups)
                self.assertEqual(
                    group.get("description", electrode_group_defaults["description"]),
                    nwbfile.electrode_groups[group_name].description
                )
                self.assertEqual(
                    group.get("location", electrode_group_defaults["location"]),
                    nwbfile.electrode_groups[group_name].location
                )
                device_name = group.get("device", electrode_group_defaults["device"])
                self.assertIn(device_name, nwbfile.devices)
                self.assertEqual(nwbfile.electrode_groups[group_name].device, nwbfile.devices[device_name])

            n_channels = len(recording.get_channel_ids())
            electrode_source = metadata["Ecephys"].get("Electrodes", [])
            self.assertEqual(n_channels, len(nwbfile.electrodes))
            for column in electrode_source:
                column_name = column["name"]
                self.assertIn(column_name, nwbfile.electrodes)
                self.assertEqual(column["description"], getattr(nwbfile.electrodes, column_name).description)
                if column_name in ["x", "y", "z", "rel_x", "rel_y", "rel_z"]:
                    for j in n_channels:
                        self.assertEqual(column["data"][j], getattr(nwbfile.electrodes[j], column_name).values[0])
                else:
                    for j in n_channels:
                        self.assertTrue(
                            column["data"][j] == getattr(nwbfile.electrodes[j], column_name).values[0]
                            or (
                                    np.isnan(column["data"][j])
                                    and np.isnan(getattr(nwbfile.electrodes[j], column_name).values[0])
                            )
                        )
Exemplo n.º 23
0
    "This data was randomly generated with numpy, using 1234 as the seed",
    description="Random numbers generated with numpy.random.rand")
nwbfile.add_acquisition(ts)

####################
# .. note::
#
#     For more information on writing :py:class:`~pynwb.ecephys.ElectricalSeries`,
#     see :ref:`ecephys_tutorial`.
#
# Now that we have some data, lets write our file. You can choose not to cache the spec by setting
# cache_spec=False in :py:meth:`~hdmf.backends.hdf5.h5tools.HDF5IO.write`

from pynwb import NWBHDF5IO

io = NWBHDF5IO('cache_spec_example.nwb', mode='w')
io.write(nwbfile)
io.close()

####################
# .. note::
#
#     For more information on writing NWB files, see :ref:`basic_writing`.

####################
# By default, PyNWB does not use the namespaces cached in a file--you must
# explicitly specify this. This behavior is enabled by the *load_namespaces*
# argument to the :py:class:`~pynwb.NWBHDF5IO` constructor.

io = NWBHDF5IO('cache_spec_example.nwb', mode='r', load_namespaces=True)
nwbfile = io.read()
Exemplo n.º 24
0
import luigi
from pynwb import NWBHDF5IO
from src.pipeline.process import NWB

tasks = [NWB(patient_id=1)]

luigi.build(tasks, local_scheduler=True, workers=4)

io = NWBHDF5IO(tasks[0].output().path, 'r')
nwb = io.read()
print(nwb)
Exemplo n.º 25
0
def from_nwb_pupil(nwb_file, nwb_format,fs=20,with_pupil=False,running_speed=False,as_dict=True):
#def from_nwb(cls, nwb_file, nwb_format,with_pupil=False,fs=20):
    """
    The NWB (Neurodata Without Borders) format is a unified data format developed by the Allen Brain Institute.
    Data is stored as an HDF5 file, with the format varying depending how the data was saved.
    
    References:
      - https://nwb.org
      - https://pynwb.readthedocs.io/en/latest/index.html
    :param nwb_file: path to the nwb file
    :param nwb_format: specifier for how the data is saved in the container
    :param int fs: will match for all signals
    :param bool with_pupil, running speed: whether to return pupil, speed signals in recording
    :param bool as_dict: return a dictionary of recording objects, each corresponding to a single unit/neuron
                         else a single recording object w/ each unit corresponding to a channel in pointprocess signal
    :return: a recording object
    """
    #log.info(f'Loading NWB file with format "{nwb_format}" from "{nwb_file}".')

    # add in supported nwb formats here
    assert nwb_format in ['neuropixel'], f'"{nwb_format}" not a supported NWB file format.'

    nwb_filepath = Path(nwb_file)
    if not nwb_filepath.exists():
        raise FileNotFoundError(f'"{nwb_file}" could not be found.')

    if nwb_format == 'neuropixel':
        """
        In neuropixel ecephys nwb files, data is stored in several attributes of the container: 
          - units: individual cell metadata, a dataframe
          - epochs: timing of the stimuli, series of arrays
          - lab_meta_data: metadata about the experiment, such as specimen details
          
        Spike times are saved as arrays in the 'spike_times' column of the units dataframe as xarrays. 
        The frequency defaults to match pupil - if no pupil data retrieved, set to chosen value (previous default 1250).
          
        Refs:
          - https://allensdk.readthedocs.io/en/latest/visual_coding_neuropixels.html
          - https://allensdk.readthedocs.io/en/latest/_static/examples/nb/ecephys_quickstart.html
          - https://allensdk.readthedocs.io/en/latest/_static/examples/nb/ecephys_data_access.html
        """
        try:
            from pynwb import NWBHDF5IO
            from allensdk.brain_observatory.ecephys import nwb  # needed for ecephys format compat
        except ImportError:
            m = 'The "allensdk" library is required to work with neuropixel nwb formats, available on PyPI.'
            #log.error(m)
            raise ImportError(m)

        session_name = nwb_filepath.stem
        with NWBHDF5IO(str(nwb_filepath), 'r') as nwb_io:
            nwbfile = nwb_io.read()

            units = nwbfile.units
            epochs = nwbfile.epochs
           
            spike_times = dict(zip(units.id[:].astype(str), units['spike_times'][:]))

            # extract the metadata and convert to dict
            metadata = nwbfile.lab_meta_data['metadata'].to_dict()
            metadata['uri'] = str(nwb_filepath)  # add in uri
            #add invalid times data to meta as df if exist - includes times and probe id?
            if nwbfile.invalid_times is not None:
                invalid_times = nwbfile.invalid_times
                invalid_times =  np.array([invalid_times[col][:] for col in invalid_times.colnames])
                metadata['invalid_times'] = pd.DataFrame(invalid_times.transpose(),columns=['start_time', 'stop_time', 'tags'])
                
            # build the units metadata
            units_data = {
                col.name: col.data for col in units.columns
                if col.name not in ['spike_times', 'spike_times_index', 'spike_amplitudes',
                                    'spike_amplitudes_index', 'waveform_mean', 'waveform_mean_index']
            }

            # needs to be a dict
            units_meta = pd.DataFrame(units_data, index=units.id[:])
            #add electrode info to units meta
            electrodes=nwbfile.electrodes
            e_data = {col.name: col.data for col in electrodes.columns}
            e_meta = pd.DataFrame(e_data,index=electrodes.id[:])
            units_meta=pd.merge(units_meta,e_meta,left_on=units_meta.peak_channel_id,right_index=True, 
                                suffixes=('_unit','_channel')).drop(['key_0','group'],axis=1).to_dict('index')# needs to be a dict    

            # build the epoch dataframe
            epoch_data = {
                col.name: col.data for col in epochs.columns
                if col.name not in ['tags', 'timeseries', 'tags_index', 'timeseries_index']
            }

            epoch_df = pd.DataFrame(epoch_data, index=epochs.id[:]).rename({
                'start_time': 'start',
                'stop_time': 'end',
                'stimulus_name': 'name'
            }, axis='columns')

 
            #rename epochs to correspond to different nat scene/movie frames - 
            epoch_df.loc[epoch_df['frame'].notna(),'name'] = epoch_df.loc[epoch_df['frame'].notna(),'name'] + '_' + \
            epoch_df[epoch_df['frame'].notna()].iloc[:]['frame'].astype(int).astype(str)
            
            
            #drop extra columns
            metadata['epochs']=epoch_df #save extra stim info to meta
            epoch_df=epoch_df.drop([col for col in epoch_df.columns if col not in ['start','end','name']],axis=1)

#            #rename natural scene epochs to work w/demo
            df_copy = epoch_df[epoch_df.name.str.contains('natural_scene')].copy()
            df_copy.loc[:,'name']='REFERENCE'

            epoch_df=epoch_df.append(df_copy,ignore_index=True)
            #expand epoch bounds epochs will overlap to test evoked potential
#            to_adjust=epoch_df.loc[:,['start','end']].to_numpy()
#            epoch_df.loc[:,['start','end']] = nems.epoch.adjust_epoch_bounds(to_adjust,-0.1,0.1)
            
            
            # save the spike times as a point process signal frequency set to match other signals 
            pp = PointProcess(fs, spike_times, name='resp', recording=session_name, epochs=epoch_df,
                              chans=[str(c) for c in nwbfile.units.id[:]],meta=units_meta)
            #dict to pass to recording
            #signal_dict = {pp.name: pp}

          #  log.info('Successfully loaded nwb file.')
            from scipy.interpolate import interp1d
           #save pupil data as rasterized signal
            if with_pupil:
                try:
                    pupil = nwbfile.modules['eye_tracking'].data_interfaces['pupil_ellipse_fits']
                    t = pupil['timestamps'][:]
                    pupil = pupil['width'][:].reshape(1,-1) #only 1 dimension - or get 'height'
                    
                     #interpolate to set sampling rate
                    f = interp1d(t,pupil,bounds_error=False,fill_value=np.nan)

                    new_t = np.arange(0.0,(t.max()+1/fs),1/fs)#pupil data starting at timepoint 0.0 (nan filler)
                    pupil = f(new_t)
                    
                    pupil_signal = RasterizedSignal(fs=fs,data=pupil,recording=session_name,name='pupil',
                                                    epochs=epoch_df,chans=['pupil']) #for all data list(pupil_data.colnames[0:5])
                    
                #if no pupil data for session - still get spike data
                except KeyError:
                    print(session_name + ' has no pupil data.')

            
            if running_speed:
                running = nwbfile.modules['running'].data_interfaces['running_speed']
                t = running.timestamps[:][1]#data has start and end timestamps, here only end used
                running = running.data[:].reshape(1,-1)

                f = interp1d(t,running)
                #new_t = np.arange(np.min(t),np.max(t),1/fs)
                new_t = np.arange(epoch_df.start.min(),epoch_df.end.max(),1/fs)
                running = f(new_t)
                running=RasterizedSignal(fs=fs,data=running,name='running',recording=session_name,epochs=epoch_df)



            if as_dict:
                #each unit has seperate recording in dict
                rec_dict={}
                for c in pp.chans:
                    unit_signal=pp.extract_channels([c])
                    rec=Recording({'resp':unit_signal},meta=metadata)
                    if with_pupil:
                        rec.add_signal(pupil_signal)
                    if running_speed:
                        rec.add_signal(running)
                    rec_dict[c]=rec
                return rec_dict
            
            else:
                rec=Recording({'resp':pp},meta=metadata)
                if with_pupil:
                    rec.add_signal(pupil_signal)
                if running_speed:
                    rec.add_signal(running)
                return rec
Exemplo n.º 26
0
def test_nwb_converter(tmp_path, build_converter):
    nwbfileloc = str(tmp_path/'test.nwb')
    if isinstance(build_converter, Exception):
        print(str(build_converter))
        return
    full_metadata = build_converter.complete_metadata
    save_raw = False
    save_camera_raw = False
    converter_nwb1 = Alyx2NWBConverter(
        nwb_metadata_file=build_converter.complete_metadata,
        saveloc=nwbfileloc,
        save_raw=save_raw,
        save_camera_raw=save_camera_raw)
    converter_nwb2 = Alyx2NWBConverter(
        metadata_obj=build_converter,
        saveloc=nwbfileloc,
        save_raw=save_raw,
        save_camera_raw=save_camera_raw)
    converter_nwb1.run_conversion()
    converter_nwb1.write_nwb()
    with NWBHDF5IO(nwbfileloc, 'r') as io:
        nwbfile = io.read()
        # test nwbfile fields:
        for i, j in full_metadata['NWBFile'].items():
            assert getattr(nwbfile, i, False) is not False
            if not i == 'session_start_time':
                if isinstance(getattr(nwbfile, i), h5py.Dataset):
                    assert all(getattr(nwbfile, i)[()] == j)
                else:
                    if i == 'experimenter':
                        assert list(getattr(nwbfile, i)) == j
                    else:
                        assert getattr(nwbfile, i) == j
        # test iblsubject:
        for i, j in full_metadata['IBLSubject'].items():
            assert getattr(nwbfile.subject, i, False) is not False
            if not 'date' in i:
                if isinstance(getattr(nwbfile.subject, i), h5py.Dataset):
                    assert all(getattr(nwbfile.subject, i)[()] == j)
                else:
                    assert getattr(nwbfile.subject, i) == j
        # test iblsessions:
        for i, j in full_metadata['IBLSessionsData'].items():
            assert nwbfile.lab_meta_data['Ibl_session_data'].fields.get(i) is not None
            if isinstance(nwbfile.lab_meta_data['Ibl_session_data'].fields.get(i), h5py.Dataset):
                assert all(nwbfile.lab_meta_data['Ibl_session_data'].fields.get(i)[()] == j)
            else:
                if i not in ['json', 'extended_qc']:
                    assert nwbfile.lab_meta_data['Ibl_session_data'].fields.get(i) == j
                else:
                    assert len(set(j).difference(set(nwbfile.lab_meta_data['Ibl_session_data'].fields.get(i)))) == 0
        # test probes:
        if full_metadata['Ecephys']['Ecephys'].get('Device'):
            device_dict = full_metadata['Ecephys']['Ecephys']['Device']
            name = device_dict.pop('name')
            assert name in nwbfile.devices
            assert nwbfile.devices[name].fields == device_dict
        for probe in full_metadata['Probes']:
            name = probe.pop('name')
            assert name in nwbfile.devices
            traj_est = nwbfile.devices[name].fields.pop('trajectory_estimate')
            traj_est0 = probe.pop('trajectory_estimate')
            assert all(traj_est[()] == traj_est0)
            assert nwbfile.devices[name].fields == probe
        # test trials:
        for trlcol in full_metadata['Trials']:
            assert trlcol['name'] in nwbfile.trials.colnames
            dtcol = [nwbfile.trials.columns[no] for no, i in enumerate(nwbfile.trials.colnames)
                     if trlcol['name'] == i and isinstance(nwbfile.trials.columns[no], VectorData)][0]
            if trlcol['data'].split('.')[-1] != 'intervals':
                data = converter_nwb1._one_data.loaded_datasets.get(trlcol['data']).data[0]
                assert_array_equal(data, dtcol.data[()])
        # test units:
        unit_data_len = sum(converter_nwb1._one_data.data_attrs_dump['unit_table_length'])
        dt_column_names = [getattr(nwbfile.units, i['name']).name for i in full_metadata['Units']]
        for no, unitcol in enumerate(full_metadata['Units']):
            assert unitcol['name'] == dt_column_names[no]
        assert nwbfile.units.id.shape[0] == unit_data_len
        # test electrode group:
        for group in full_metadata['Ecephys']['ElectrodeGroup']:
            name = group.pop('name')
            assert name in nwbfile.electrode_groups
            electrode_group_dict = nwbfile.electrode_groups[name].fields
            if electrode_group_dict.get('device'):
                electrode_group_dict['device'] = electrode_group_dict['device'].name
            assert group == electrode_group_dict
        # test electrode table:
        elec_tbl_len = sum(converter_nwb1._one_data.data_attrs_dump['electrode_table_length'])
        for electrode in full_metadata['ElectrodeTable']:
            assert electrode['name'] in nwbfile.electrodes.colnames
        assert nwbfile.electrodes.id.shape[0] == elec_tbl_len
        # test timeseries ephys:
        ephys_datasets = nwbfile.processing['ecephys'].data_interfaces
        for i, j in full_metadata['Ecephys']['Ecephys'].items():
            for j1 in j:
                assert j1['data'] in converter_nwb1._one_data.data_attrs_dump
                field_names = converter_nwb1._one_data.data_attrs_dump[j1['data']]
                for k in field_names:
                    assert k in ephys_datasets
                    if 'Spectrum' in i:
                        assert isinstance(ephys_datasets[k], Spectrum)
                    else:
                        assert isinstance(ephys_datasets[k], TimeSeries)
        # test behavior:
        ephys_datasets = nwbfile.processing['behavior'].data_interfaces
        for i, j in full_metadata['Behavior'].items():
            for i1, j1 in j.items():
                for j11 in j1:
                    assert i in ephys_datasets
                    if j11['name'] != 'camera_dlc':
                        assert j11['name'] in getattr(ephys_datasets[i], i1)
                        if i == 'Position':
                            assert isinstance(getattr(ephys_datasets[i], i1)[j11['name']], SpatialSeries)
                        else:
                            assert isinstance(getattr(ephys_datasets[i], i1)[j11['name']], TimeSeries)
        # test acquisition: test only presence of names of datasets:
        if save_raw and save_camera_raw:
            acq_datasets = nwbfile.acquisition
            for i, j in full_metadata['Acquisition'].items():
                for j1 in j:
                    assert any([True for h in acq_datasets.keys() if j1['name'] in h])
Exemplo n.º 27
0
def preprocess_raw_data(block_path, config):
    """
    Takes raw data and runs:
    1) CAR
    2) notch filters
    3) Downsampling

    Parameters
    ----------
    block_path : str
        subject file path
    config : dictionary
        'referencing' - tuple specifying electrode referencing (type, options)
            ('CAR', N_channels_per_group)
            ('CMR', N_channels_per_group)
            ('bipolar', INCLUDE_OBLIQUE_NBHD)
        'Notch' - Main frequency (Hz) for notch filters (default=60)
        'Downsample' - Downsampling frequency (Hz, default= 400)

    Returns
    -------
    Saves preprocessed signals (LFP) in the current NWB file.
    Only if containers for these data do not exist in the current file.
    """
    subj_path, block_name = os.path.split(block_path)
    block_name = os.path.splitext(block_path)[0]
    start = time.time()

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()

        # Storage of processed signals on NWB file ----------------------------
        if 'ecephys' in nwb.processing:
            ecephys_module = nwb.processing['ecephys']
        else:  # creates ecephys ProcessingModule
            ecephys_module = ProcessingModule(
                name='ecephys',
                description='Extracellular electrophysiology data.')
            # Add module to NWB file
            nwb.add_processing_module(ecephys_module)
            print('Created ecephys')

        # LFP: Downsampled and power line signal removed ----------------------
        if 'LFP' in nwb.processing['ecephys'].data_interfaces:
            ######
            # What's the point of this?  Nothing is done with these vars...
            lfp = nwb.processing['ecephys'].data_interfaces['LFP']
            lfp_ts = nwb.processing['ecephys'].data_interfaces[
                'LFP'].electrical_series['preprocessed']
            ######
        else:  # creates LFP data interface container
            lfp = LFP()

            # Data source
            source_list = [
                acq for acq in nwb.acquisition.values()
                if type(acq) == ElectricalSeries
            ]
            assert len(source_list) == 1, (
                'Not precisely one ElectricalSeries in acquisition!')
            source = source_list[0]
            nChannels = source.data.shape[1]

            # Downsampling
            if config['Downsample'] is not None:
                print("Downsampling signals to " + str(config['Downsample']) +
                      " Hz.")
                print("Please wait, this might take around 30 minutes.")
                start = time.time()
                # zeros to pad to make signal length a power of 2
                nBins = source.data.shape[0]
                extraBins0 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins0)
                rate = config['Downsample']

                # malloc
                T = int(np.ceil((nBins + extraBins0) * rate / source.rate))
                X = np.zeros((source.data.shape[1], T))

                # One channel at a time, to improve memory usage for long signals
                for ch in np.arange(nChannels):
                    # 1e6 scaling helps with numerical accuracy
                    Xch = source.data[:, ch] * 1e6
                    # Make length a power of 2, improves performance
                    Xch = np.append(Xch, extraZeros)
                    X[ch, :] = resample(Xch, rate, source.rate)
                print(
                    'Downsampling finished in {} seconds'.format(time.time() -
                                                                 start))
            else:  # No downsample
                extraBins0 = 0
                rate = source.rate
                X = source.data[()].T * 1e6

            # re-reference the (scaled by 1e6!) data
            electrodes = source.electrodes
            if config['referencing'] is not None:
                if config['referencing'][0] == 'CAR':
                    print(
                        "Computing and subtracting Common Average Reference in "
                        + str(config['referencing'][1]) + " channel blocks.")
                    start = time.time()
                    X = subtract_CAR(X, b_size=config['referencing'][1])
                    print('CAR subtract time for {}: {} seconds'.format(
                        block_name,
                        time.time() - start))
                elif config['referencing'][0] == 'bipolar':
                    X, bipolarTable, electrodes = get_bipolar_referenced_electrodes(
                        X, electrodes, rate, grid_step=1)

                    # add data interface for the metadata for saving
                    ecephys_module.add_data_interface(bipolarTable)
                    print('bipolarElectrodes stored for saving in ' +
                          block_path)
                else:
                    print('UNRECOGNIZED REFERENCING SCHEME; ', end='')
                    print('SKIPPING REFERENCING!')

            # Apply Notch filters
            if config['Notch'] is not None:
                print("Applying notch filtering of " + str(config['Notch']) +
                      " Hz")
                # zeros to pad to make signal lenght a power of 2
                nBins = X.shape[1]
                extraBins1 = 2**(np.ceil(np.log2(nBins)).astype('int')) - nBins
                extraZeros = np.zeros(extraBins1)
                start = time.time()
                for ch in np.arange(nChannels):
                    Xch = np.append(X[ch, :], extraZeros).reshape(1, -1)
                    Xch = linenoise_notch(Xch,
                                          rate,
                                          notch_freq=config['Notch'])
                    if ch == 0:
                        X2 = Xch.reshape(1, -1)
                    else:
                        X2 = np.append(X2, Xch.reshape(1, -1), axis=0)
                print('Notch filter time for {}: {} seconds'.format(
                    block_name,
                    time.time() - start))

                X = np.copy(X2)
                del X2
            else:
                extraBins1 = 0

            # Remove excess bins (because of zero padding on previous steps)
            excessBins = int(
                np.ceil(extraBins0 * rate / source.rate) + extraBins1)
            X = X[:, 0:-excessBins]
            X = X.astype('float32')  # signal (nChannels,nSamples)
            X /= 1e6  # Scales signals back to volts

            # Add preprocessed downsampled signals as an electrical_series
            referencing = 'None' if config['referencing'] is None else config[
                'referencing'][0]
            notch = 'None' if config['Notch'] is None else str(config['Notch'])
            downs = 'No' if config['Downsample'] is None else 'Yes'
            config_comment = ('referencing:' + referencing + ',Notch:' +
                              notch + ', Downsampled:' + downs)

            # create an electrical series for the LFP and store it in lfp
            lfp_ts = lfp.create_electrical_series(name='preprocessed',
                                                  data=X.T,
                                                  electrodes=electrodes,
                                                  rate=rate,
                                                  description='',
                                                  comments=config_comment)
            ecephys_module.add_data_interface(lfp)

            # Write LFP to NWB file
            io.write(nwb)
            print('LFP saved in ' + block_path)
Exemplo n.º 28
0
####################
# If you are interested in all PatchClampSeries with a given sweep number, use get_series()
# exposed via the :py:meth:`~pynwb.icephys.SweepTable.get_series` attribute.

series = nwbfile.sweep_table.get_series(15)

####################
# .. _icephys_writing:
#
# Once you have finished adding all of your data to the :py:class:`~pynwb.file.NWBFile`,
# write the file with :py:class:`~pynwb.NWBHDF5IO`.

from pynwb import NWBHDF5IO

io = NWBHDF5IO('icephys_example.nwb', 'w')
io.write(nwbfile)
io.close()

####################
# For more details on :py:class:`~pynwb.NWBHDF5IO`, see the :ref:`basic tutorial <basic_writing>`.

####################
# .. _icephys_reading:
#
# Reading electrophysiology data
# ------------------------------
#
# Now that you have written some intracellular electrophysiology data, you can read it back in.

io = NWBHDF5IO('icephys_example.nwb', 'r')
Exemplo n.º 29
0
def high_gamma_estimation(block_path, bands_vals, new_file=''):
    """
    Takes preprocessed LFP data and calculates High-Gamma power from the
    averaged power of standard Hilbert transform on 70~150 Hz bands.

    Parameters
    ----------
    block_path : str
        subject file path
    bands_vals : [2,nBands] numpy array with Gaussian filter parameters, where:
        bands_vals[0,:] = filter centers [Hz]
        bands_vals[1,:] = filter sigmas [Hz]
    new_file : str
        if this argument is of form 'path/to/new_file.nwb', High Gamma power
        will be saved in a new file. If it is an empty string, '', High Gamma
        power will be saved in the current NWB file.

    Returns
    -------
    Saves High Gamma power (TimeSeries) in the current or new NWB file.
    Only if container for this data do not exist in the file.
    """

    # Get filter parameters
    band_param_0 = bands_vals[0, :]
    band_param_1 = bands_vals[1, :]

    with NWBHDF5IO(block_path, 'r+', load_namespaces=True) as io:
        nwb = io.read()
        lfp = nwb.processing['ecephys'].data_interfaces[
            'LFP'].electrical_series['preprocessed']
        rate = lfp.rate

        nBands = len(band_param_0)
        nSamples = lfp.data.shape[0]
        nChannels = lfp.data.shape[1]
        Xp = np.zeros(
            (nBands, nChannels, nSamples))  #power (nBands,nChannels,nSamples)

        # Apply Hilbert transform ---------------------------------------------
        print('Running High Gamma estimation...')
        start = time.time()
        for ch in np.arange(nChannels):
            Xch = lfp.data[:,
                           ch] * 1e6  # 1e6 scaling helps with numerical accuracy
            Xch = Xch.reshape(1, -1)
            Xch = Xch.astype('float32')  # signal (nChannels,nSamples)
            X_fft_h = None
            for ii, (bp0, bp1) in enumerate(zip(band_param_0, band_param_1)):
                kernel = gaussian(Xch, rate, bp0, bp1)
                X_analytic, X_fft_h = hilbert_transform(Xch,
                                                        rate,
                                                        kernel,
                                                        phase=None,
                                                        X_fft_h=X_fft_h)
                Xp[ii, ch, :] = abs(X_analytic).astype('float32')
        print(
            'High Gamma estimation finished in {} seconds'.format(time.time() -
                                                                  start))

        # data: (ndarray) dims: num_times * num_channels * num_bands
        Xp = np.swapaxes(Xp, 0, 2)
        HG = np.mean(Xp, 2)  # average of high gamma bands

        # Storage of High Gamma on NWB file -----------------------------
        if new_file == '' or new_file is None:  # on current file
            # make electrodes table
            nElecs = HG.shape[1]
            ecephys_module = nwb.processing['ecephys']

            # first check for a table among the file's data_interfaces
            ####
            if lfp.electrodes.table.name in ecephys_module.data_interfaces:
                LFP_dynamic_table = ecephys_module.data_interfaces[
                    lfp.electrodes.table.name]
            else:
                # othewise use the electrodes as the table
                LFP_dynamic_table = nwb.electrodes
            ####
            elecs_region = LFP_dynamic_table.create_region(
                name='electrodes',
                region=[i for i in range(nChannels)],
                description='all electrodes')
            hg = ElectricalSeries(name='high_gamma',
                                  data=HG,
                                  electrodes=elecs_region,
                                  rate=rate,
                                  description='')

            ecephys_module.add_data_interface(hg)
            io.write(nwb)
            print('High Gamma power saved in ' + block_path)
        else:  # on new file
            with NWBHDF5IO(new_file, 'r+', load_namespaces=True) as io_new:
                nwb_new = io_new.read()
                # make electrodes table
                nElecs = HG.shape[1]
                elecs_region = nwb_new.electrodes.create_region(
                    name='electrodes',
                    region=np.arange(nElecs).tolist(),
                    description='all electrodes')
                hg = ElectricalSeries(name='high_gamma',
                                      data=HG,
                                      electrodes=elecs_region,
                                      rate=rate,
                                      description='')

                try:  # if ecephys module already exists
                    ecephys_module = nwb_new.processing['ecephys']
                except:  # creates ecephys ProcessingModule
                    ecephys_module = ProcessingModule(
                        name='ecephys',
                        description='Extracellular electrophysiology data.')
                    nwb_new.add_processing_module(ecephys_module)

                ecephys_module.add_data_interface(hg)
                io_new.write(nwb_new)
                print('High Gamma power saved in ' + new_file)
Exemplo n.º 30
0
nwbfile.create_epoch('the second epoch', 6.0, 8.0, ['second', 'example'], [test_ts, mod_ts])

####################
# .. _basic_writing:
#
# Writing an NWB file
# -------------------
#
# NWB I/O is carried out using the :py:class:`~pynwb.NWBHDF5IO` class [#]_. This class is responsible
# for mapping an :py:class:`~pynwb.file.NWBFile` object into HDF5 according to the NWB schema.
#
# To write an :py:class:`~pynwb.file.NWBFile`, use the :py:func:`~pynwb.form.backends.io.FORMIO.write` method.

from pynwb import NWBHDF5IO

io = NWBHDF5IO('basic_example.nwb', 'w')
io.write(nwbfile)
io.close()

####################
# You can also use :py:func:`~pynwb.NWBHDF5IO` as a context manager:

with NWBHDF5IO('basic_example.nwb', 'w') as io:
    io.write(nwbfile)

####################
# .. _basic_reading:
#
# Reading an NWB file
# -------------------
#