Exemplo n.º 1
0
def read_tdt_block(binPath, t1=None, t2=None, store=None, channel=None):
    """Wrapper arount tdt.read_block that avoids bug in the function.

    tdt.read_block returns "channel 1 not found in store" if first channel
    of a single-channel store.
    """
    try:
        return tdt.read_block(binPath,
                              t1=t1,
                              t2=t2,
                              store=store,
                              channel=channel)
    except Exception as e:
        # tdt.read_block returns "channel 1 not found in store" if first channel
        # of a single-channel store.
        single_chan_store = tdt.read_block(
            binPath, t2=1.0, nodata=True
        ).streams[
            store].data.ndim == 1  # data is 1-dim if there's one channel in store, 2 dim otherwise
        if single_chan_store and channel == 1:
            print(f"`{store}` have only 1 chan -> Contourn tdt.read_block bug")
            return tdt.read_block(
                binPath, t1=t1, t2=t2, store=store,
                channel=0)  # Return "all" channels of single channel store
        raise e
Exemplo n.º 2
0
    def __init__(self, path, channels=None):
        self.path = path
        self.channels = channels

        if channels is None:
            self.tdt_obj = tdt.read_block(path)
        else:
            self.tdt_obj = tdt.read_block(path, channel=channels)

        self.streams = self.get_streams()
        self.block_name = self.tdt_obj['info']['blockname']
        self.start_time = self.tdt_obj['info']['utc_start_time']

        logger.debug('Streams: ' + ', '.join(self.streams))
Exemplo n.º 3
0
    def run_conversion(
        self,
        block_path,
        electrode_table_path,
        session_id,
        session_description="test read",
    ):
        # Converts TDT block to NWB data

        # read in TDT file
        print("Reading {} at {}".format(session_id, block_path))
        tdt_data = tdt.read_block(block_path)

        # initialize nwb file, requires a session description, id, and start time
        self.nwb = NWBFile(
            session_description=session_description,  # required
            identifier=session_id,  # required
            session_start_time=tdt_data.info.start_date,
        )
        electrode_group = self.create_electrode_devices()
        electrode_df = self.read_electrode_table(electrode_table_path)
        self.populate_electrode_tables(electrode_df, electrode_group)
        electrode_table_region = self.create_electrode_region(electrode_df)
        self.add_electrical_series(electrode_table_region, tdt_data)
        self.add_audio(tdt_data)
        self.write_nwb("{}.nwb".format(session_id))
Exemplo n.º 4
0
 def __extract_from_tdt_files(self, device_name, dev_conf):
     tdt_path = self.tdt_path
     header = tdt.read_block(tdt_path, headers=1)
     tdt_tmp = tdt.read_block(tdt_path, channel=1)
     num_channels = len(header.stores[device_name].chan)
     num_samples = len(tdt_tmp.streams[device_name].data)
     # Iteratively write one channel at a time
     data = DataChunkIterator(data=self.__iter_tdt_channels(
         tdt_path, device_name, header, num_channels),
                              maxshape=(num_samples, num_channels),
                              buffer_size=BUFFER_SIZE)
     tdt_params = {}
     tdt_params['start_time'] = header.start_time[
         0]  #returning numeric since that's what nwb wants for an eseries
     tdt_params['stop_time'] = header.stop_time[0]
     #tdt_params['stop_time'] = dt.datetime.fromtimestamp(header.stop_time[0])
     tdt_params['sample_rate'] = header.stores[device_name].fs
     return data, tdt_params
Exemplo n.º 5
0
    def get_tdt_object(self):
        '''
        gets TDT meta data object

        Returns:
        - tdt_object: (struct) TDT meta data object
        '''
        tdt_object = tdt.read_block(self.tdt_path, headers=1)
        return tdt_object
Exemplo n.º 6
0
def make_ppp_video_dict(rat, diet, box, tank, cam, vidfile, posfile, scorer,
                        cas_ttl, malt_ttl, casxy, maltxy):
    """
    Function to make dictionary with position and cue information for rats in PPP
    experiment.

    Parameters
    ----------
    tank : String
        Path to TDT tank.
    posfile : String
        Path to hd5 file generated by DeepLabCut.
    scorer : String
        Scorer string from DeepLabCut file. Can be generated automatically in future.
    cas_ttl : String
        TTL for casein trial times from TDT file.
    malt_ttl : String
        TTL for maltodextring trial times.
    casxy : Tuple
        Position of casein cue light.
    maltxy : Tuple
        Position of maltodextrin cue light.

    Returns
    -------
    data : Dictionary
        Includes position data, cue data and other information needed for further analysis.

    """

    epocs = (tdt.read_block(tank, evtype=['epocs'])).epocs

    cap = cv.VideoCapture(vidfile)
    fps = int(cap.get(cv.CAP_PROP_FPS))
    success, image = cap.read()
    print(success, vidfile)
    cv.imwrite(Path(vidfile).stem + ".jpg", image)
    cap.release()

    df = pd.read_hdf(posfile)

    return {
        'rat': rat,
        'diet': diet,
        'box': box,
        'nose': df[scorer, 'nose'],
        'L_ear': df[scorer, 'L_ear'],
        'R_ear': df[scorer, 'R_ear'],
        'fps': fps,
        'cas_cue': getattr(epocs, cas_ttl).onset,
        'malt_cue': getattr(epocs, malt_ttl).onset,
        'cas_pos': casxy,
        'malt_pos': maltxy
    }
def loaddata(tdtfile, SigBlue, SigUV):
    
    tdtfile=raw_datafolder+tdtfile
    
    try:
        tmp = tdt.read_block(tdtfile, evtype=['streams'], store=[SigBlue])
        data = getattr(tmp.streams, SigBlue)['data']
        fs = getattr(tmp.streams, SigBlue)['fs']

        tmp = tdt.read_block(tdtfile, evtype=['streams'], store=[SigUV])
        dataUV = getattr(tmp.streams, SigUV)['data']

        ttls = tdt.read_block(tdtfile, evtype=['epocs']).epocs
    except:
        print('Unable to load data properly.')
        data=[]
        dataUV=[]
        ttls=[]
        fs=[]
        
    return data, dataUV, fs, ttls
Exemplo n.º 8
0
 def __iter_tdt_channels(self, tdt_path, stream, header, num_channels):
     #this is inefficient because it loads all streams. As far as I can tell,
     #there's no way to select streams to load
     for ch in range(num_channels):
         tdt_ch = ch + 1
         tdt_struct = tdt.read_block(tdt_path,
                                     channel=tdt_ch,
                                     headers=header,
                                     evtype=['streams'])
         ch_data = tdt_struct.streams[stream].data
         ch_data = np.reshape(
             1, -1
         )  #weird quirk to make data samples x channels since it's tranposed downstream
         yield ch_data
     return
Exemplo n.º 9
0
def load_stimulation_data(path):
    all_data = {}
    block_names = os.listdir(path)
    for block in block_names:
        full_path_tdt = os.path.join(path, block)
        if os.path.isdir(full_path_tdt) and "Block" in block:
            data = tdt.read_block(full_path_tdt,
                                  evtype=['epocs', 'snips', 'scalars'],
                                  nodata=1)
            block_number = int(block.split('-')[1])
            all_data[block_number] = {
                "epocs": data.epocs,
                "snips": data.snips,
                "eNeu": data.snips.eNeu
            }
    return all_data
Exemplo n.º 10
0
def get_data(blocks, channels):
    # extracts data from TDT tank and stores it in a MultiIndex pandas dataframe
    dframe = []
    for i in range(len(blocks)):
        for c in channels:
            print(i)
            print(c)
            if i == 25 or i == 371 or i == 382 or i == 436 or i == 443 or i == 444 or i == 466 or i == 666:  # skipping a weird block
                continue
            data = read_block(blocks[i],
                              evtype=['streams', 'epocs'],
                              channel=c)

            nblock = int(data.info.blockname.partition("-")[2])  # gets block #
            nepocs = pd.DataFrame(data.epocs).columns.values.tolist()
            if 'PROT' in nepocs: nepocs.remove('PROT')
            # df = pd.DataFrame([nepocs]).T
            # df.columns = [nblock]
            # dframe.merge(df)

            epoc_list = ['TLvl', 'FDur', 'vdur',
                         'Nois']  # use epoc_list to automate PARAM1 selection
            PARAM1 = next(iter(set(nepocs).intersection(epoc_list)))
            number_epocs = len(nepocs)

            filtered_data = epoc_filter(data, PARAM1, t=TRANGE)
            all_signals = pd.DataFrame(
                data=extract_lfp(filtered_data, ARTIFACT))
            tankname = filtered_data.info.tankpath.split()[4].partition(
                "/")[2].partition("_")
            all_signals['catname'] = tankname[0]
            all_signals['tanknumber'] = tankname[2]
            all_signals['channel'] = c
            all_signals['blockname'] = str_blocknames(nblock, PARAM1)
            for e in range(len(nepocs)):
                all_signals[str(nepocs[e])] = data.epocs[nepocs[e]].data
            col_names = [
                x for x in all_signals.columns.values.tolist()
                if not isinstance(x, int)
            ]
            # df['column_new_1'], df['column_new_2'], df['column_new_3'] = [np.nan, 'dogs', 3]
            id_signals = all_signals.set_index(
                col_names) * 1e6  #index df and data in microvolts
            dframe.append(id_signals)
    df = pd.concat(dframe, ignore_index=False)
    return df
def extract_data_object_photometry_raw_traces(Mouse_Date_FileName,path_to_import,path_to_save):   

    l_mouse = list(Mouse_Date_FileName["mouse"]) 
    l_photo_day = list(Mouse_Date_FileName["file name"]) 

    raw_photo_data_per_trial_channels_data_set = []
    counter = 1 
    
    
    numer_of_sessions_dataset = len(l_mouse)
    
    for (mouse, photo_day) in zip(l_mouse, l_photo_day):
        print ("session number"+" "+str(counter)+" "+"was imported (out of"+" "+str(numer_of_sessions_dataset)+")")
        
        photometry_data = tdt.read_block(path_to_import+"/"+mouse+"/"+photo_day)
        photometry_data = reading_in_photometryData_all_Channels(photometry_data)
        raw_photo_data_per_trial_channels_data_set.append(photometry_data)
        
        counter += 1 
        
    return raw_photo_data_per_trial_channels_data_set
Exemplo n.º 12
0
    def __extract_stream_data(self, device_name, dev_conf):
        '''
        extracts TDT stream

        Args:
        - device_name: (str) stream name (eg. 'ECoG' or 'Poly')
        - dev_conf: (dict) metadata for the device.
                           nwb_builder.metadata['device'][device_name]

        Returns:
        - data: (numpy array) [samples] by [channels]
        - parameters (dict) includes stream meta data
        '''
        tdt_struct = tdt.read_block(self.tdt_path, store=device_name)
        stream = tdt_struct.streams[device_name]
        data = stream.data.T
        parameters = {
            'start_time': stream.start_time,
            'sample_rate': stream.fs
        }
        return data, parameters
Exemplo n.º 13
0
print(read_sev.__doc__)
doc.add_text(stdoutput)  # handout: exclude
doc.show()  # handout: exclude
"""
Download demo data from the TDT website
"""

download_demo_data()
"""
This example uses our [example data sets](https://www.tdt.com/files/examples/TDTExampleData.zip). To import your own data, replace BLOCK_PATH with the full path to your own data block.

In Synapse, you can find the block path in the database. Go to Menu > History. Find your block, then Right-Click > Copy path to clipboard.
"""

BLOCK_PATH = 'data/Algernon-180308-130351'
data = read_block(BLOCK_PATH)
print(data)
doc.add_text(stdoutput)  # handout: exclude
doc.show()  # handout: exclude
"""
**read_block** returns a structured object. It is a Python dictionary but also allows you to use the dot syntax like in Matlab, so you can access fields within the structure with either method. These two ways of looking at the block info field are equivalent:
"""

print(data.info)
doc.add_text(stdoutput)  # handout: exclude
doc.show()  # handout: exclude
print(data['info'])
doc.add_text(stdoutput)  # handout: exclude
doc.show()  # handout: exclude
"""
These three methods to access the 'Wav1' store sampling rate are equivalent:
Exemplo n.º 14
0
def AddTDTRawDataToNWB(tdt_block_dir,
                       nwb_file_name='',
                       verbose=False,
                       elec_ids=None):
    """
        Copies raw electrophysiological data from TDT format to Neurodata Without Borders (NWB) format.
        Typically, the NWB file will already be initialized by calling InitializeNWBFromTDT. Multiple
        electrodes can be added at once or with separate calls to this function.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change blackrock extension to 'nwb'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :param elec_ids: [optional] {list} List of electrode IDs to copy over. If empty, all are copied
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = path.splitext(tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError(
            "Couldn't open TDT file. Error: {:s}".format(e))

    # Initialize the NWB file
    nwb_io = []
    try:
        if not path.isfile(nwb_file_name):
            # Initialize NWB file
            if verbose:
                print("NWB file doesn't exist. Creating new one: %s..." %
                      (nwb_file_name))
            InitializeNWBFromTDT(tdt_block_dir, nwb_file_name, verbose=verbose)

        # Append to existing file
        if verbose: print("Opening NWB file %s..." % (nwb_file_name), end='')
        nwb_file_append = True
        nwb_io = NWBHDF5IO(nwb_file_name, mode='a')
        nwb_file = nwb_io.read()
        if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)

    # Validate the elec_ids list
    if not elec_ids:
        # Grab electroe list from NWB file
        elec_ids = nwb_file.electrodes.id[:]

    # Load data for all of the electrodes
    if verbose: print("Loading raw data from TDT file...")
    chan_data = []
    starting_time = 0
    fs = 24414.0625
    for cur_elec_id in elec_ids:
        # Convert electrode id into RAW and channel
        cur_raw = np.floor((cur_elec_id - 1) / 16) + 1
        cur_raw = 'RAW{:1.0f}'.format(cur_raw)
        cur_chan = ((cur_elec_id - 1) % 16) + 1
        print('{:s}_{:1.0f}'.format(cur_raw, cur_chan))

        #Read this channel data in from file
        cur_chan_data = tdt.read_sev(tdt_block_dir,
                                     channel=cur_chan,
                                     event_name=cur_raw)
        fs = cur_chan_data[cur_raw].fs
        starting_time = cur_chan_data.time_ranges[0][0]
        cur_chan_data = cur_chan_data[cur_raw].data
        cur_chan_data = np.reshape(cur_chan_data, [len(cur_chan_data), 1])
        if len(chan_data) == 0:
            chan_data = cur_chan_data
        else:
            chan_data = np.concatenate([chan_data, cur_chan_data], axis=1)

        if verbose: print("\tLoaded electrode %d..." % (cur_elec_id))

    # Get resolution of signal in V
    if verbose: print("Processing electrode information.")
    V_resolution = 0.0  #TODO: See if we can get resolution information from TDT?

    # Create electrode table for new electrodes
    elec_table_ind = np.ones((len(elec_ids), 1)) * np.NaN
    for cur_elec_ind, cur_elec in enumerate(elec_ids):
        # In order to create electrode table, we have to have indexes for each channel into electrode list in NWB file
        cur_elec_table_ind = 0
        while (cur_elec_table_ind < len(nwb_file.electrodes)) and (
                nwb_file.electrodes[cur_elec_table_ind, 0] != cur_elec):
            cur_elec_table_ind = cur_elec_table_ind + 1
        if cur_elec_table_ind >= len(nwb_file.electrodes):
            raise ValueError("Couldn't find electrode %d in NWB file list." %
                             (cur_elec))
        elec_table_ind[cur_elec_ind] = cur_elec_table_ind
    elec_table_ind = elec_table_ind.transpose().tolist()[
        0]  # Convert to list for create_electrode_table_region
    electrode_table_region = nwb_file.create_electrode_table_region(
        elec_table_ind, "Electrodes %s" % (elec_ids))

    # Write raw data for the electrode
    if verbose: print("\tAdding to NWB...", end='')
    ephys_ts = ElectricalSeries(
        'Raw Data for Channels %s' % (elec_ids),
        chan_data,
        electrode_table_region,
        starting_time=starting_time,
        rate=fs,
        resolution=V_resolution,
        conversion=float(
            1.0),  #TODO: Check that TDT does read in V, as expected by NWB
        comments="",
        description="Raw data from TDT file.")
    nwb_file.add_acquisition(ephys_ts)
    if verbose: print("done")

    # Write the file
    if verbose: print("\tWriting NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 15
0
def InitializeNWBFromTDT(tdt_block_dir,
                         nwb_file_name='',
                         experiment=None,
                         subject=None,
                         electrode_info=None,
                         electrode_group=None,
                         notes=None,
                         overwrite=False,
                         verbose=False):
    """
        Initializes a NWB file to copy over data from Blackrock file.  User specified information is
        added to NWB file as metadata about data.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change TDT extension to 'nwb'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :param experiment: [optional] {dict} Dictionary that contains information about experimenter, name, animal, lab, and institution
        :param subject: [optional] {dict} Dictionary that contains information about subject
        :param electrode_info: [optional] {list} List of dictionaries carrying information about electrodes
        :param electrode_group: [optional] {list} List of dictionaries carrying information about electrode groupings
        :param notes: [optional] {str} Notes relevant for the dataset
        :param overwrite: [optional] {bool} Whether to overwrite the NWB file if it already exists
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = tdt_block_dir + path.splitext(
            tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file, get header and basic information
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
        tdt_data = tdt.read_block(tdt_block_dir, evtype=['epocs'])
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError("Couldn't open TDT file. Error: %s" % e)

    # Process experiment information from inputs
    if (not experiment) or not (type(experiment) is dict):
        if verbose: print("Using default experimental details.")
        nwb_session_description = 'Experimental details not provided'
        experimenter = ''
        lab = ''
        institution = ''
    else:
        if 'name' not in experiment.keys():
            experiment['name'] = ''
        if 'animal' not in experiment.keys():
            experiment['animal'] = ''
        if 'experimenter' not in experiment.keys():
            experiment['experimenter'] = ''
        if 'lab' not in experiment.keys():
            experiment['lab'] = ''
        if 'institution' not in experiment.keys():
            experiment['institution'] = ''
        experimenter = experiment['experimenter']
        lab = experiment['lab']
        institution = experiment['institution']
        nwb_session_description = 'Experiment: ' + experiment['name'] + \
                                  ', Animal: ' + experiment['animal'] + \
                                  ', Date: ' + tdt_data.info.start_date.strftime('%m/%d/%Y')  # description of the recording session
        if verbose:
            print("Experiment description: %s" % (nwb_session_description))

    # Process subject information
    if (not subject) or not (type(subject) is dict):
        cur_subject = None
    else:
        try:
            # Create a subject object from the passed dictionary
            cur_subject = file.Subject(**subject)
        except:  # catch *all* exceptions
            e = sys.exc_info()[0]
            raise ValueError("Couldn't create subject object. Error: %s" % (e))

    # Define the NWB file identifier as the filename, as this should be unique
    nwb_identifier = path.split(tdt_tsq_files[0])[1]  # unique file identifier

    # Initialize the NWB file
    nwb_io = []
    try:
        if not overwrite and path.isfile(nwb_file_name):
            raise FileExistsError(
                "Can't update basic attributes of NWB file. Create new NWB file if you need to change attributes."
            )
        else:
            if verbose:
                print("Creating new NWB file %s..." % (nwb_file_name), end='')
            nwb_file_append = False
            nwb_io = NWBHDF5IO(nwb_file_name, mode='w')
            tdt_notes = ''
            if 'Note' in tdt_data.epocs.keys():
                for cur_note in tdt_data.epocs.Note:
                    tdt_notes = tdt_notes + cur_note.notes + '; '
            nwb_file = NWBFile(nwb_session_description,
                               nwb_identifier,
                               tdt_data.info.start_date,
                               file_create_date=datetime.now(tzlocal()),
                               experimenter=experimenter,
                               subject=cur_subject,
                               lab=lab,
                               institution=institution,
                               source_script='ConvertTDTToNWB.py',
                               source_script_file_name='ConvertTDTToNWB.py',
                               notes=notes + tdt_notes)
            if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        tdt_data.clear()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)
    if verbose: print("Created NWB file.")

    # Create device in the NWB file
    device = nwb_file.create_device(name='TDT')
    if verbose: print("Created device.")

    # Make sure electrode input is provided and valid
    if (not electrode_info) or not (type(electrode_info) is list):
        if verbose: print("Creating electrode information from directory.")

        # Grab information about SEV files
        tdt_sev_files = [
            f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.sev'
        ]
        electrode_list = []
        for sev_ind, sev_file in enumerate(tdt_sev_files):
            sev_match = re.search(".+_RAW(\\d+)_Ch(\\d+).sev", sev_file)
            electrode_list.append((int(sev_match.groups()[0]) - 1) * 16 +
                                  int(sev_match.groups()[1]))
        electrode_list = np.sort(electrode_list)

        # Create electrode information for each electrode
        electrode_info = []
        for cur_elec in electrode_list:
            electrode_info.append({
                'x': 0.0,
                'y': 0.0,
                'z': 0.0,
                'impedance': -1.0,
                'location': 'unknown',
                'group': 'electrodes',
                'id': cur_elec,
                'filtering': 'Unknown'
            })
        if verbose: print("\tCreated %d electrodes." % (len(electrode_info)))

    # Make sure electrode group input is provided and valid
    if verbose: print("Processing electrode groups.")
    default_electrode_group = {
        'name': 'default electrode group',
        'description': 'Generic electrode group for ungrouped electrodes.',
        'location': 'Unknown'
    }
    if (not electrode_group) or not (type(electrode_group) is list):
        electrode_group = [default_electrode_group]
    else:
        electrode_group.insert(0, default_electrode_group)

    # Fill in any missing information about electrodes
    ovr_elec_group_list = [
    ]  # will be used to keep track of electrode group list
    if verbose: print("Processing electrode information.")
    for cur_elec_info_ind, cur_elec_info in enumerate(electrode_info):
        # If id is not provided, then throw an error
        if ('id' not in cur_elec_info.keys()) or (not cur_elec_info['id']):
            tdt_data.clear()
            nwb_io.close()
            raise ValueError(
                "Couldn't process electrode inputs. Must provide IDs for all electrodes or pass empty array."
            )
        # If filtering is not provided, add it from the file (can't with TDT)
        if ('filtering' not in cur_elec_info.keys()) or (
                not cur_elec_info['filtering']):
            electrode_info[cur_elec_info_ind]['filtering'] = 'Unknown'
        # If other variables are not provided, just initialize them to defaults
        if ('x' not in cur_elec_info.keys()) or (not cur_elec_info['x']):
            electrode_info[cur_elec_info_ind]['x'] = 0.0
        if ('y' not in cur_elec_info.keys()) or (not cur_elec_info['y']):
            electrode_info[cur_elec_info_ind]['y'] = 0.0
        if ('z' not in cur_elec_info.keys()) or (not cur_elec_info['z']):
            electrode_info[cur_elec_info_ind]['z'] = 0.0
        if ('location' not in cur_elec_info.keys()) or (
                not cur_elec_info['location']):
            electrode_info[cur_elec_info_ind]['location'] = 'unknown'
        if ('impedance' not in cur_elec_info.keys()) or (
                not cur_elec_info['impedance']):
            electrode_info[cur_elec_info_ind]['impedance'] = float(-1.0)
        if ('group'
                not in cur_elec_info.keys()) or (not cur_elec_info['group']):
            electrode_info[cur_elec_info_ind][
                'group'] = 'default electrode group'
        # Find the index to the electrode group
        grp_elec_ind = 0
        while (grp_elec_ind < len(electrode_group)) and (
                cur_elec_info['group'] !=
                electrode_group[grp_elec_ind]['name']):
            grp_elec_ind = grp_elec_ind + 1
        # If we made it past the end, this electrode group doesn't exist, put it in the default
        if grp_elec_ind >= len(electrode_group):
            grp_elec_ind = -1
        # Save the index into group dictionary for easy reference
        electrode_info[cur_elec_info_ind][
            'electrode_group_index'] = grp_elec_ind
        ovr_elec_group_list.append(grp_elec_ind)

    # Create electrode groups
    nwb_electrode_group = []
    if verbose: print("Creating electrode groups.")
    for cur_elec_group_ind, cur_elec_group in enumerate(electrode_group):
        # Create and add to our list
        nwb_electrode_group.append(
            nwb_file.create_electrode_group(
                cur_elec_group['name'],
                description=cur_elec_group['description'],
                location=cur_elec_group['location'],
                device=device))
    if verbose:
        print("\tCreated %d electrode groups." % (len(electrode_group)))

    # Create electrodes in NWB file
    if verbose: print("Adding electrodes to NWB.")
    nwb_file_elec_list = []
    for cur_elec_ind, cur_elec in enumerate(electrode_info):
        # Add electrode to NWB file
        nwb_file.add_electrode(
            id=cur_elec['id'],
            x=cur_elec['x'],
            y=cur_elec['y'],
            z=cur_elec['z'],
            imp=cur_elec['impedance'],
            location=cur_elec['location'],
            filtering=cur_elec['filtering'],
            group=nwb_electrode_group[cur_elec['electrode_group_index']])
        # Keep track of electrodes entered so we can index for the electrode table
        nwb_file_elec_list.append(cur_elec['id'])
    if verbose: print("\tAdded %d electrodes." % (len(electrode_info)))

    # Close NSx file
    tdt_data.clear()

    # Write to file and close
    if verbose: print("\tWriting NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 16
0
def AddTDTAnalogDataToNWB(tdt_block_dir,
                          nwb_file_name='',
                          signal_info=None,
                          module_name='behavior',
                          verbose=False):
    """
        Copies analog (continuous) data from the specified Blackrock file to Neurodata Without Borders (NWB) file.
        This is usually continuous signals about behavior (joystick position, screen refreshes, etc).
        User should provide information about the signals to help end users understand what is in each signal.
        Multiple calls can be used to load multiple data signals from multiple files.

        Typically, the NWB file will already be initialized by calling InitializeNWBFromBlackrock.

        :param tdt_block_dir: {str} full path of TDT files to convert to NWB. If empty, will open dialog.
        :param nwb_file_name: [optional] {str} full path of NWB file to export to. Default is to change blackrock extension to 'nwb'
        :param signal_info: [optional] {list} List of dictionaries with information about the signals to save.
        :param module_name: [optional] {str} Name of module to store data.  Usually 'behavior' but could also be 'ecephys' or 'misc'
        :param verbose: [optional] {bool} whether to print updates while converting. Default is false.
        :return: {str} filename of NWB file (empty if error)
        """
    # Check to see if user specified a TDT filename
    if not tdt_block_dir:  # no file name passed
        # Ask user to specify a file
        if 'app' not in locals():
            app = QApplication([])
        tdt_block_dir = QFileDialog.getExistingDirectory(
            QFileDialog(), 'Select Directory', getcwd())

    # Check to see if valid nwb_file_name is passed
    tdt_tsq_files = [
        f for f in listdir(tdt_block_dir) if path.splitext(f)[1] == '.tsq'
    ]
    if not nwb_file_name:
        nwb_file_name = path.splitext(tdt_tsq_files[0])[0] + '.nwb'
    if verbose: print("Writing to NWB data file %s" % (nwb_file_name))

    # Initialize the TDT file
    try:
        # Requires the raw data to be imported
        tdt_header = tdt.read_block(tdt_block_dir, headers=1)
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        raise FileNotFoundError(
            "Couldn't open TDT file. Error: {:s}".format(e))

    # Initialize the NWB file
    nwb_io = []
    try:
        if not path.isfile(nwb_file_name):
            # Initialize NWB file
            if verbose:
                print("NWB file doesn't exist. Creating new one: %s..." %
                      (nwb_file_name))
            InitializeNWBFromTDT(tdt_block_dir, nwb_file_name, verbose=verbose)

        # Append to existing file
        if verbose: print("Opening NWB file %s..." % (nwb_file_name), end='')
        nwb_file_append = True
        nwb_io = NWBHDF5IO(nwb_file_name, mode='a')
        nwb_file = nwb_io.read()
        if verbose: print("done.")
    except:  # catch *all* exceptions
        e = sys.exc_info()[0]
        if nwb_io: nwb_io.close()
        raise FileExistsError("Couldn't open NWB file. Error: %s" % e)

    # Make sure module name is either behavior or misc
    module_name = module_name.lower()
    if (module_name != 'behavior') and (module_name != 'misc'):
        raise ValueError("Module type must either be 'behavior' or 'misc'.")

    # Parse the signal_info list
    if not signal_info:
        raise ValueError("Must specify signals to load.")
    elec_ids = []
    for cur_signal_ind, cur_signal_info in enumerate(signal_info):
        if 'label' not in cur_signal_info.keys():
            raise ValueError(
                "Signal information must have a label for each signal.")
        if 'name' not in cur_signal_info.keys():
            raise ValueError(
                "Signal information must have a name for each signal. (Should be user-understandable)"
            )
        if 'comments' not in cur_signal_info.keys():
            signal_info[cur_signal_ind]['comments'] = ''
        # Find electrode IDs for this signal
        if ('elec_id' not in cur_signal_info.keys()) or (
                not cur_signal_info['elec_id']):
            # Loop through and grab all signals of type 'streams' that aren't RAW data
            signal_info[cur_signal_ind]['elec_id'] = []
            for cur_store in tdt_header.stores.keys():
                # Grab all 'streams' but ignore RAWs
                if (tdt_header.stores[cur_store].type_str
                        == 'streams') and (cur_store[0:3] != 'RAW'):
                    signal_info[cur_signal_ind]['elec_id'].append(cur_store)

    # Create processing module for saving data
    if module_name not in nwb_file.processing.keys():
        if verbose:
            print(
                "Specified processing module (%s) does not exist. Creating." %
                (module_name))
        signal_module = ProcessingModule(
            name=module_name,
            description="Processing module for continuous signal data from %s."
            % (path.split(tdt_tsq_files[0])[1]))
        nwb_file.add_processing_module(signal_module)

    # Create data interface for the analog signals
    signal_info_str = signal_info[0]['name']
    for i in range(1, len(signal_info)):
        signal_info_str = signal_info_str + ", " + signal_info[i]['name']
    if verbose:
        print("Creating %s data interface for signals %s." %
              (module_name, signal_info_str))
    if module_name == 'behavior':
        cur_data_interface = BehavioralTimeSeries(name="Analog signals (" +
                                                  signal_info_str + ")")
    elif module_name == 'misc':
        cur_data_interface = AbstractFeatureSeries(name="Analog signals (" +
                                                   signal_info_str + ")")
    else:
        raise ValueError("Module type must either be 'behavior' or 'misc'.")

    if verbose: print("Adding signals...")
    for cur_signal_ind, cur_signal_info in enumerate(signal_info):
        # Get data from file
        analog_data = []
        analog_fs = []
        analog_start_time = []
        for cur_elec_id in cur_signal_info['elec_id']:
            cur_data = tdt.read_block(tdt_block_dir, store=cur_elec_id)
            cur_analog_data = cur_data.streams[cur_elec_id].data
            cur_analog_data = np.reshape(cur_analog_data,
                                         [len(cur_analog_data), 1])
            if len(analog_data) == 0:
                analog_fs = cur_data.streams[cur_elec_id].fs
                analog_start_time = cur_data.streams[cur_elec_id].start_time
                analog_data = cur_analog_data
            else:
                analog_fs.append(cur_data.streams[cur_elec_id].fs)
                analog_start_time.append(
                    cur_data.streams[cur_elec_id].start_time)
                analog_data = np.concatenate([analog_data, cur_analog_data],
                                             axis=1)

        # Make sure all of the fs and start_times are the same
        analog_start_time = np.unique(analog_start_time)
        analog_fs = np.unique(analog_fs)
        if len(analog_start_time) != 1 or len(analog_fs) != 1:
            raise ValueError(
                'Start time and sampling frequency need to be the same for signals to be combined.'
            )

        # Create time series
        cur_data_interface.create_timeseries(
            name=cur_signal_info['name'],
            data=analog_data,
            comments=cur_signal_info['comments'],
            unit="V",  #TODO: Check that this is correct for TDT
            resolution=1.0,  #TODO: Can we get this from TDT?
            conversion=
            0.001,  #TODO: Check what the correct conversion is for TDT
            starting_time=analog_start_time[0],
            rate=analog_fs[0],
            description="Signal %s from %s." %
            (cur_signal_info['label'], path.split(tdt_tsq_files[0])[1]))
        if verbose: print("\tAdded %s." % (cur_signal_info['label']))

    # Add data interface to module in NWB file
    if verbose: print("Adding data interface to module.")
    nwb_file.processing[module_name].add(cur_data_interface)

    # Write the file
    if verbose: print("Writing NWB file and closing.")
    nwb_io.write(nwb_file)
    nwb_io.close()

    return nwb_file_name
Exemplo n.º 17
0
        fid.write(os.linesep)
fid.close()
print("Closing...")

# Extract video timing information
print("trying to get timing for video...")
# OLD VERSION
#tank = '\\'.join(re.split('\\\\', params['tank'])[0:-1])
#block = re.split('\\\\', params['tank'])[-1]
#import win32com.client
#h = win32com.client.Dispatch('matlab.application')
#cmd  =  "data = TDTbin2mat(fullfile('%s' ,'%s'),'Type',{'epocs'}); onset=data.epocs.Cam1.onset; offset=data.epocs.Cam1.offset; save(fullfile('%s', '%s','video_timing.mat'), 'onset', 'offset')" % (tank,block, tank,block)
#h.Execute(cmd)
# END [OLD VERSION] ####################
tank = params['tank']
ep_data = tdt.read_block(tank, evtype=['epocs'])
onset = ep_data.epocs.Cam1.onset
offset = ep_data.epocs.Cam1.offset
so.savemat(os.path.join(tank, 'video_timing.mat'), {
    'onset': onset,
    'offset': offset
})

# extract movie frames
ffmpeg_path = 'C:\\Users\ChungWeberPC_04\\Documents\\Ground\\ffmpeg-20180227-fa0c9d6-win64-static\\bin\\ffmpeg'
video_file = [
    f for f in os.listdir(params['tank']) if re.match('^.*\.avi$', f)
][0]
if not (os.path.isdir(os.path.join(params['tank'], 'Stack'))):
    os.mkdir(os.path.join(params['tank'], 'Stack'))
Exemplo n.º 18
0
Set up the varibles for the data you want to extract. We will extract channel 1 from the eNe1 snippet data store, created by the PCA Sorting gizmo, and use our PulseGen epoc event `PC0/` as our stimulus onset.
"""

REF_EPOC = 'PC0/'
SNIP_STORE = 'eNe1'
SORTID = 'TankSort'
CHANNEL = 3
SORTCODE = 0  # set to 0 to use all sorts
TRANGE = [-0.3, 0.8]
"""
Now read the specified data from our block into a Python structure. The `nodata` flag means that we are only intereseted in the snippet timestamps, not the actual snippet waveforms in this example.
"""

data = read_block(BLOCK_PATH,
                  evtype=['epocs', 'snips', 'scalars'],
                  sortname=SORTID,
                  channel=CHANNEL,
                  nodata=1)
"""
## Use epoc_filter to extract data around our epoc event
Using the `t` parameter extracts data only from the time range around our epoc event.
"""

raster_data = epoc_filter(data, REF_EPOC, t=TRANGE)
"""
Adding the `tref` flag makes all of the timestamps relative to the epoc event, which is ideal for generating histograms.
"""

hist_data = epoc_filter(data, REF_EPOC, t=TRANGE, tref=1)
"""
And that's it! Your data is now in Python. The rest of the code is a simple plotting example. First, we'll find matching timestamps for our selected sort code (unit).
Exemplo n.º 19
0
download_demo_data()
BLOCK_PATH = 'data/Subject1-180426-120951'
"""
### Set up the variables for the data you want to extract.
We will extract channel 1 from the EEG1 stream data store.
"""

STORE = 'EEG1'
CHANNEL = 1
ONSET = [-3]  # relative onset, in seconds, from the note timestamp
"""
Now read the specified data from our block into a Python structure
"""

data = read_block(BLOCK_PATH, channel=CHANNEL)
"""
All user notes are stored in a special epoc event called 'Note'
"""

# find all the unique note values
notes, counts = np.unique(data.epocs.Note.notes, return_counts=True)

# find the highest number of occurrences (to inform our plot)
maxOccur = np.max(counts)
"""
### Loop through the notes for plotting
"""

# some useful variables
num_notes = len(notes)
Exemplo n.º 20
0
# import the primary functions from the tdt library only
from tdt import read_block, download_demo_data
"""
### Importing the Data
This example uses our [example data sets](https://www.tdt.com/files/examples/TDTExampleData.zip). To import your own data, replace BLOCK_PATH with the full path to your own data block.

In Synapse, you can find the block path in the database. Go to Menu > History. Find your block, then Right-Click > Copy path to clipboard.
"""

download_demo_data()
BLOCK_PATH = 'data/Algernon-180308-130351'
"""
Now read all epocs and just channel 1 from all stream data into a Python structure called 'data'
"""

data = read_block(BLOCK_PATH, evtype=['streams', 'epocs'], channel=1)
"""
And that's it! Your data is now in Python. The rest of the code is a simple plotting example.

### Stream Store Plotting
Let's create time vectors for each stream store for plotting in time.
"""

len_Wav1 = len(data.streams.Wav1.data)
len_LFP1 = len(data.streams.LFP1.data)
len_pNe1 = len(data.streams.pNe1.data)
time_Wav1 = np.linspace(1, len_Wav1, len_Wav1) / data.streams.Wav1.fs
time_LFP1 = np.linspace(1, len_LFP1, len_LFP1) / data.streams.LFP1.fs
time_pNe1 = np.linspace(1, len_pNe1, len_pNe1) / data.streams.pNe1.fs
"""
### Plot five seconds of data from each store
Exemplo n.º 21
0
def read_TDT(binPath,
             downSample=None,
             tStart=None,
             tEnd=None,
             chanList=None,
             ds_method='interpolation'):
    """Load TDT data using the tdt python package.

    Args:
        binPath (str | pathlib.Path): Path to block

    Kwargs:
        downSample (int | float | None): Frequency in Hz at which the data is
            subsampled. No subsampling if None. (default None)
        tStart (float | None): Time in seconds from start of recording of first
            loaded sample. Default 0.0
        tEnd (float | None): Time in seconds from start of recording of last
            loaded sample. Duration of recording by default
        chanList (list(string)): List of loaded channels. Should be non empty.
            The provided list should be formatted as follows::
                    [<score_name>-<channel_index>, ...]
            Where channels are 1-indexed, (IMPORTANT) not 0-indexed (for
            consistency with tdt methods)
                eg: [LFPs-1, LFPs-2, EEGs-1, EEGs-94, EMGs-1...]
        ds_method (str): Method for resampling. Passed to
            ``resample.signal_resample``. 'poly' is more accurate,
            'interpolation' is faster (default 'interpolation')

    Returns:
        data (np.ndarray): The raw data of shape (n_channels, n_points)
        downsample (float):The down-sampling frequency used.
        chanList (list(str)): List of channels
    """
    import tdt

    print(f"Load TDT block at {binPath}")

    if tStart is None:
        tStart = 0.0
    if tEnd is None:
        tEnd = 0.0
    print(f"tStart = {tStart}, tEnd={tEnd}")

    def validate_chan(s):
        return isinstance(s, str) and len(s.split('-')) == 2

    def parse_chan(score_chan):
        store, chan = [s.strip(' ') for s in score_chan.split('-')]
        return store, int(chan)

    # Check length and formatting of `chanList parameter`
    chanList_error_msg = (
        "`chanList` should be a non-empty list of strings and formatted as "
        "follows:\n         [<score_name>-<channel_index>, ...], \n"
        "where channel indices are 1-indexed (not 0-indexed). eg: \n"
        "       [LFPs-1, LFPs-2, EEGs-1, EEGs-94, EMGs-1...] \n"
        f"Currently chanList = {chanList}")
    if chanList is None:
        raise ValueError(chanList_error_msg)
    if not (len(chanList) > 0 and all([validate_chan(s) for s in chanList])
            and all([parse_chan(s)[1] > 0 for s in chanList])):
        raise ValueError(chanList_error_msg)

    # Load and downsample data for all requested channels
    chan_dat_list = []  # List of channel data arrays to concatenate
    chan_ts_list = []  # List of timestamps for each channel
    # Iterate on channels:
    for store_chan in chanList:
        store, chan = parse_chan(store_chan)
        print(f"Load channel {chan} from store {store}", end=", ")
        blk = read_tdt_block(binPath,
                             t1=tStart,
                             t2=tEnd,
                             store=store,
                             channel=chan)

        # Check that the requested data is actually there
        if store not in blk.streams.keys():
            stores = tdt.read_block(binPath, t2=1.0,
                                    nodata=True).streams.keys()
            raise Exception(f"Store `{store}` not found in data."
                            f" Existing stores = {stores}")

        sRate = blk.streams[store].fs
        chandat = blk.streams[store].data  # (nSamples x 0)-array

        # Downsample the data
        if downSample is None or downSample == sRate:
            downSample = sRate
            chan_dat_ds = chandat
        else:
            if downSample > sRate:
                print(
                    f"Warning: The resampling rate ({downSample}) is greater "
                    f"than the original sampling rate ({sRate})")
            print(f"-> Resampling from {sRate}Hz to {downSample}Hz using "
                  f"'{ds_method}' method")
            if not chan_dat_list:
                # First channel: downsample to target
                chan_dat_ds = resample.signal_resample(
                    chandat,
                    sampling_rate=sRate,
                    desired_sampling_rate=downSample,
                    method=ds_method,
                )
            else:
                # next channels: downsample to match first channel's length
                chan_dat_ds = resample.signal_resample(
                    chandat,
                    sampling_rate=sRate,
                    desired_length=len(chan_dat_list[0]),
                    method=ds_method,
                )

        # Add data and timestamps
        chan_ts_ds = [
            blk.streams[store].start_time + i / downSample
            for i in range(len(chan_dat_ds))
        ]
        chan_dat_list.append(chan_dat_ds)
        chan_ts_list.append(chan_ts_ds)

    # Check same number of samples for all channels
    assert all([dat.shape == chan_dat_list[0].shape for dat in chan_dat_list])
    # Check data is aligned for all channels (~same timestamps for each channel)
    MAX_DIFF = 0.001  # (s)
    ts_diff = [max(ts_list) - min(ts_list) for ts_list in zip(*chan_ts_list)]
    assert all([v <= MAX_DIFF for v in ts_diff])

    return np.stack(chan_dat_list), downSample, chanList
Exemplo n.º 22
0
"""
### Set up the variables for the data you want to extract.
We will extract channel 3 from the LFP1 stream data store, created by the Neural Stream Processor gizmo, and use our PulseGen epoc event ('PC0/') as our stimulus onset.
"""

REF_EPOC = 'PC0/'
STREAM_STORE = 'LFP1'
ARTIFACT = np.inf  # optionally set an artifact rejection level
CHANNEL = 3
TRANGE = [-0.3, 0.8]  # [start time relative to epoc onset, window duration]
"""
### Now read the specified data from our block into a Python structure
"""

data = read_block(BLOCK_PATH,
                  evtype=['epocs', 'scalars', 'streams'],
                  channel=CHANNEL)
"""
## Use epoc_filter to extract data around our epoc event

Using the `t` parameter extracts data only from the time range around our epoc event. For stream events, the chunks of data are stored in a list.
"""

data = epoc_filter(data, 'PC0/', t=TRANGE)
"""
Optionally remove artifacts
"""

art1 = np.array(
    [np.any(x > ARTIFACT) for x in data.streams[STREAM_STORE].filtered],
    dtype=bool)