def test_event_onsets(self):
        eventFn = 'all_channels.events'
        spikesFn = 'Tetrode2.spikes'
        eventFile = os.path.join(testDataDir, eventFn)
        spikesFile = os.path.join(testDataDir, spikesFn)
        eventData = loadopenephys.Events(eventFile)
        dataSpikes = loadopenephys.DataSpikes(spikesFile)
        spikeTimestamps = dataSpikes.timestamps
        eventOnsetTimes = eventData.get_event_onset_times()

        #convert to seconds
        samplingRate = eventData.samplingRate
        spikeTimestamps = spikeTimestamps / samplingRate
        eventOnsetTimes = eventOnsetTimes / samplingRate
        assert len(eventOnsetTimes) == 513

        timeRange = [-0.5, 1.0]
        #Remove events except from frist pulse in laser train
        eventOnsetTimes = spikesanalysis.minimum_event_onset_diff(
            eventOnsetTimes, 0.5)
        assert len(eventOnsetTimes) == 103

        (spikeTimesFromEventOnset, trialIndexForEachSpike,
         indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
             spikeTimestamps, eventOnsetTimes, timeRange)

        plt.plot(spikeTimesFromEventOnset, trialIndexForEachSpike, '.')
Example #2
0
 def load_data(self, animalName, ephysSession, tetrode):
     print 'Loading data...'
     self.dataTT = loadopenephys.DataSpikes(
         self.tetrodeFile)  #,readWaves=True)
     self.nSpikes = self.dataTT.samples.shape[
         0]  # FIXME: this is specific to the OpenEphys format
     self.dataTT.samples = self.dataTT.samples.astype(
         float) - 2**15  # FIXME: this is specific to OpenEphys
     print 'Aligning to peak...'
     #self.dataTT.samples = spikesorting.align_waveforms(self.dataTT.samples)
     self.align_spikes()
     # FIXME: This assumes the gain is the same for all channels and records
     self.dataTT.samples = (1000.0 /
                            self.dataTT.gain[0, 0]) * self.dataTT.samples
     self.dataTT.timestamps = self.dataTT.timestamps / self.dataTT.samplingRate
     # -- Load clusters if required --
     self.clustersFile = os.path.join(self.clustersDir,
                                      'Tetrode%d.clu.1' % tetrode)
     if os.path.isfile(self.clustersFile):
         self.dataTT.set_clusters(self.clustersFile)
         self.assign_clusters()
     else:
         print('Clusters file does not exist for this tetrode: {0}'.format(
             self.clustersFile))
     self.set_attributes()
Example #3
0
    def load_all_waveforms(self):
        for ind, session in enumerate(self.sessionList):
            if session:  #This is a fix for when some sessions are 'None'
                ephysDir = os.path.join(settings.EPHYS_PATH, self.animalName,
                                        session)
                spikeFile = os.path.join(
                    ephysDir, 'Tetrode{0}.spikes'.format(self.tetrode))
                dataSpkObj = loadopenephys.DataSpikes(spikeFile)
                numSpikes = dataSpkObj.nRecords

                #Add the ind to a vector of zeros, indicates which recording this is from.
                sessionVector = np.zeros(numSpikes) + ind

                samplesThisSession = dataSpkObj.samples.astype(
                    float) - 2**15  # FIXME: this is specific to OpenEphys
                samplesThisSession = (
                    1000.0 / dataSpkObj.gain[0, 0]) * samplesThisSession
                timestampsThisSession = dataSpkObj.timestamps / self.SAMPLING_RATE

                #Set the values when working with the first session, then append for the other sessions.
                if ind == 0:
                    self.samples = samplesThisSession
                    self.timestamps = timestampsThisSession
                    self.recordingNumber = sessionVector
                else:
                    self.samples = np.concatenate(
                        [self.samples, samplesThisSession])
                    self.timestamps = np.concatenate(
                        [self.timestamps, timestampsThisSession])
                    self.recordingNumber = np.concatenate(
                        [self.recordingNumber, sessionVector])

        self.nSpikes = len(self.timestamps)
Example #4
0
    def get_session_ephys_data(self,
                               session,
                               tetrode,
                               convert_to_seconds=True):
        '''
        Method to retrieve the ephys data for a session/tetrode. Automatically loads the 
        clusters if clustering has been done for the session

        '''

        SAMPLING_RATE = 30000.0

        ephysSession = self.get_session_name(session)

        ephysDir = os.path.join(self.localEphysDir, ephysSession)
        plotTitle = ephysDir
        event_filename = os.path.join(ephysDir, 'all_channels.events')

        eventData = loadopenephys.Events(event_filename)

        spikeFilename = os.path.join(ephysDir,
                                     'Tetrode{}.spikes'.format(tetrode))
        spikeData = loadopenephys.DataSpikes(spikeFilename)

        if convert_to_seconds:
            spikeData.timestamps = spikeData.timestamps / SAMPLING_RATE

        #If clustering has been done for the tetrode, add the clusters to the spikedata object
        clustersDir = os.path.join(
            settings.EPHYS_PATH, '%s/%s_kk/' % (self.animalName, ephysSession))
        clustersFile = os.path.join(clustersDir, 'Tetrode%d.clu.1' % tetrode)
        if os.path.isfile(clustersFile):
            spikeData.set_clusters(clustersFile)

        return spikeData, eventData, plotTitle
Example #5
0
def get_session_ephys(cell, sessiontype):
    '''
    Load the spikes and events from a cell for a single session.
    Args:
        cell (pandas.Series): One row from a pandas cell database created using generate_cell_database or by
                              manually constructing a pandas.Series object that contains the required fields.
        sessiontype (str): The type of session
    Returns:
        spikeData (jaratoolbox.loadopenephys.DataSpikes): The spike data for the session
        eventData (jaratoolbox.loadopenephys.Events): The event data for the session
    '''
    sessionInds = get_session_inds(cell, sessiontype)
    sessionInd = sessionInds[0]  #FIXME: Just takes the first one for now
    ephysSession = cell['ephys'][sessionInd]
    ephysBaseDir = os.path.join(settings.EPHYS_PATH, cell['subject'])
    tetrode = int(cell['tetrode'])
    eventFilename = os.path.join(ephysBaseDir, ephysSession,
                                 'all_channels.events')
    spikesFilename = os.path.join(ephysBaseDir, ephysSession,
                                  'Tetrode{}.spikes'.format(tetrode))
    eventData = loadopenephys.Events(eventFilename)
    spikeData = loadopenephys.DataSpikes(spikesFilename)
    if spikeData.timestamps is not None:
        clustersDir = os.path.join(ephysBaseDir, '{}_kk'.format(ephysSession))
        clustersFile = os.path.join(clustersDir,
                                    'Tetrode{}.clu.1'.format(tetrode))
        spikeData.set_clusters(clustersFile)
        spikeData.samples = spikeData.samples[spikeData.clusters ==
                                              cell['cluster']]
        spikeData.timestamps = spikeData.timestamps[spikeData.clusters ==
                                                    cell['cluster']]
        spikeData = convert_openephys(spikeData)
    eventData = convert_openephys(eventData)
    return spikeData, eventData
Example #6
0
    def get_session_spike_data_one_tetrode(self, session, tetrode, convert_to_seconds=True):
        '''
        Method to retrieve the spike data for a session/tetrode. Automatically loads the 
        clusters if clustering has been done for the session. This method converts the spike 
        timestamps to seconds by default. 

        '''
        
        ephysSession = self.get_session_name(session)
        ephysDir=os.path.join(self.localEphysDir, ephysSession)
        spikeFilename = os.path.join(ephysDir, 'Tetrode{}.spikes'.format(tetrode))
        spikeData = loadopenephys.DataSpikes(spikeFilename)
        
        if convert_to_seconds and hasattr(spikeData, 'timestamps'):
            spikeData.timestamps = spikeData.timestamps/self.SAMPLING_RATE
        else:
            spikeData.timestamps = np.array([])

        #If clustering has been done for the tetrode, add the clusters to the spikedata object
        clustersDir = os.path.join(settings.EPHYS_PATH,'%s/%s_kk/'%(self.animalName,ephysSession))
        clustersFile = os.path.join(clustersDir,'Tetrode%d.clu.1'%tetrode)
        if os.path.isfile(clustersFile):
            spikeData.set_clusters(clustersFile)

        return spikeData
Example #7
0
def load_remote_2afc_spikes(oneCell,
                            behavDir=BEHAVDIR_MOUNTED,
                            ephysDir=EPHYSDIR_MOUNTED):
    '''
    Given a CellInfo object and remote behavior and ephys directories, this function loads the associated 2afc spikes from the mounted jarastore drive. Returns eventOnsetTimes, spikeTimestamps.
    '''
    ### GEt spike data of just this cluster ###
    spikeFilename = os.path.join(ephysDir, oneCell.animalName,
                                 oneCell.ephysSession,
                                 'Tetrode{}.spikes'.format(oneCell.tetrode))
    spikeData = loadopenephys.DataSpikes(spikeFilename)
    spikeData.timestamps = spikeData.timestamps / EPHYS_SAMPLING_RATE
    clustersDir = os.path.join(ephysDir, oneCell.animalName,
                               oneCell.ephysSession) + '_kk'
    clusterFilename = os.path.join(clustersDir,
                                   'Tetrode{}.clu.1'.format(oneCell.tetrode))
    clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]
    spikeData.timestamps = spikeData.timestamps[clusters == oneCell.cluster]
    spikeData.samples = spikeData.samples[clusters == oneCell.cluster, :, :]
    spikeData.samples = spikeData.samples.astype(
        float) - 2**15  # FIXME: this is specific to OpenEphys
    # FIXME: This assumes the gain is the same for all channels and records
    spikeData.samples = (1000.0 / spikeData.gain[0, 0]) * spikeData.samples
    #spikeData = ephyscore.CellData(oneCell) #This defaults to settings ephys path
    return spikeData
    def get_session_spike_data_one_tetrode(self,
                                           session,
                                           tetrode,
                                           convert_to_seconds=True,
                                           convert_to_mV=True):
        '''
        Get the spike data for one session, one tetrode.

        Method to retrieve the spike data for a session/tetrode. Automatically loads the
        clusters if clustering has been done for the session. This method converts the spike
        timestamps to seconds by default.

        Args:

        session (str or int): If a string, then must be either the full name of the ephys file (e.g. '2015-06-24_12-24-03')
        or just the timestamp portion of the file ('12-24-03'), in which case self.date will be used to construct the
        full file name. If an int, then the files that were recorded on self.date will be sorted, and the value provided
        will be used to index the sorted list. Therefore, -1 will return the session with the latest timestamp on the recording day.

        tetrode (int): The tetrode number to retrieve

        convert_to_seconds (bool): Whether or not to divide by the value stored in self.SAMPLING_RATE before returning spike
        timestamps

        Returns:

        spikeData (object of type jaratoolbox.loadopenephys.DataSpikes)

        '''

        ephysSession = self.get_session_name(session)
        ephysDir = os.path.join(self.localEphysDir, ephysSession)
        spikeFilename = os.path.join(ephysDir,
                                     'Tetrode{}.spikes'.format(tetrode))
        spikeData = loadopenephys.DataSpikes(spikeFilename)

        if convert_to_mV and hasattr(spikeData, 'samples'):
            spikeData.samples = spikeData.samples.astype(
                float) - 2**15  # FIXME: this is specific to OpenEphys
            spikeData.samples = (1000.0 /
                                 spikeData.gain[0, 0]) * spikeData.samples
        else:
            spikeData.samples = np.array([])

        if convert_to_seconds and hasattr(spikeData, 'timestamps'):
            spikeData.timestamps = spikeData.timestamps / self.SAMPLING_RATE
        else:
            spikeData.timestamps = np.array([])

        print "FIXME: getting spike data without converting values will result in no timestamps or samples being returned"

        #If clustering has been done for the tetrode, add the clusters to the spikedata object
        clustersDir = os.path.join(
            settings.EPHYS_PATH, '%s/%s_kk/' % (self.animalName, ephysSession))
        clustersFile = os.path.join(clustersDir, 'Tetrode%d.clu.1' % tetrode)
        if os.path.isfile(clustersFile):
            spikeData.set_clusters(clustersFile)

        return spikeData
Example #9
0
    def get_session_spikes(self, session, tetrode, cluster=None, electrodeName='Tetrode'):
        '''
        Get the spike data for one session, one tetrode.

        Method to retrieve the spike data for a session/tetrode. Automatically loads the
        clusters if clustering has been done for the session. This method converts the spike
        timestamps to seconds by default.

        Args:
            session (str or int): If a string, then must be either the full name of the ephys file (e.g. '2015-06-24_12-24-03') or just the timestamp portion of the file ('12-24-03'), in which case self.date will be used to construct the full file name. If an int, then the files that were recorded on self.date will be sorted, and the value provided will be used to index the sorted list. Therefore, -1 will return the session with the latest timestamp on the recording day.

            tetrode (int): The tetrode number to retrieve

            electrodeName (str): The name preceeding the electrode number, saved by openephys eg 'Tetrode6.spikes' needs 'Tetrode'

        Returns:
            spikeData (object of type jaratoolbox.loadopenephys.DataSpikes)
        '''
        if self.mode=='online':
            ephysSession = self.get_session_filename(session)
            spikeFilename = os.path.join(self.onlineEphysPath, ephysSession, '{0}{1}.spikes'.format(electrodeName, tetrode))

        elif self.mode=='offline': #The session should already be relative to the mouse
            spikeFilename = os.path.join(settings.EPHYS_PATH, session, '{0}{1}.spikes'.format(electrodeName, tetrode))

        spikeData = loadopenephys.DataSpikes(spikeFilename)

        #Make samples an empty array if there are no spikes
        if not hasattr(spikeData, 'samples'):
            spikeData.samples = np.array([])

        #Convert the spike samples to mV
        spikeData.samples = spikeData.samples.astype(float)-2**15# FIXME: this is specific to OpenEphys
        spikeData.samples = (1000.0/spikeData.gain[0,0]) * spikeData.samples

        #Make timestamps an empty array if it does not exist
        if not hasattr(spikeData, 'timestamps'):
            spikeData.timestamps = np.array([])

        #Convert the timestamps to seconds
        spikeData.timestamps = spikeData.timestamps/self.EPHYS_SAMPLING_RATE

        #If clustering has been done for the tetrode, add the clusters to the spikedata object
        if self.mode=='online':
            clustersDir = os.path.join(settings.EPHYS_PATH,'%s/%s_kk/'%(self.animalName,ephysSession)) #FIXME: Change to python 3 compatible format
            clustersFile = os.path.join(clustersDir,'Tetrode%d.clu.1'%tetrode)
        elif self.mode=='offline':
            clustersFile = os.path.join(settings.EPHYS_PATH, '{}_kk/'.format(session), 'Tetrode{}.clu.1'.format(tetrode))

        if os.path.isfile(clustersFile):
            spikeData.set_clusters(clustersFile)

        if cluster:
            spikeData.samples=spikeData.samples[spikeData.clusters==cluster]
            spikeData.timestamps=spikeData.timestamps[spikeData.clusters==cluster]
            
        return spikeData
Example #10
0
    def plot_array_raster(self,
                          session,
                          replace=0,
                          SAMPLING_RATE=30000.0,
                          timeRange=[-0.5, 1],
                          numTetrodes=4,
                          tetrodeIDs=[3, 4, 5, 6]):

        ephysSession = self.get_session_name(session)

        ephysDir = os.path.join(self.localEphysDir, ephysSession)

        event_filename = os.path.join(ephysDir, 'all_channels.events')

        #Load event data and convert event timestamps to ms
        ev = loadopenephys.Events(event_filename)
        eventTimes = np.array(ev.timestamps) / SAMPLING_RATE
        evID = np.array(ev.eventID)
        evChannel = np.array(ev.eventChannel)
        eventOnsetTimes = eventTimes[(evID == 1) & (evChannel == 0)]

        evdiff = np.r_[1.0, np.diff(eventOnsetTimes)]
        eventOnsetTimes = eventOnsetTimes[evdiff > 0.5]

        if replace:
            clf()
        else:
            figure()

        for ind, tetrodeID in enumerate(tetrodeIDs):
            spike_filename = os.path.join(
                ephysDir, 'Tetrode{0}.spikes'.format(tetrodeID))
            sp = loadopenephys.DataSpikes(spike_filename)
            try:
                spkTimeStamps = np.array(sp.timestamps) / SAMPLING_RATE
                (spikeTimesFromEventOnset, trialIndexForEachSpike,
                 indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
                     spkTimeStamps, eventOnsetTimes, timeRange)

                subplot(numTetrodes, 1, ind + 1)

                plot(spikeTimesFromEventOnset,
                     trialIndexForEachSpike,
                     '.',
                     ms=1)
                axvline(x=0, ymin=0, ymax=1, color='r')
                if ind == 0:
                    title(ephysDir)
                #title('Channel {0} spikes'.format(ind+1))
            except AttributeError:  #Spikes files without any spikes will throw an error
                pass

        xlabel('time(sec)')
        #tight_layout()
        draw()
        show()
Example #11
0
    def plot_clustered_raster(self, session, tetrode, clustersToPlot):

        ephysSession = self.get_session_name(session)

        animalName = self.animalName
        SAMPLING_RATE = 30000.0
        timeRange = [
            -0.5, 1
        ]  #FIXME: These should be object methods, not just specific to this function
        spike_filename = os.path.join(settings.EPHYS_PATH, animalName,
                                      ephysSession,
                                      'Tetrode{0}.spikes'.format(tetrode))
        sp = loadopenephys.DataSpikes(spike_filename)
        clustersDir = os.path.join(settings.EPHYS_PATH,
                                   '%s/%s_kk/' % (animalName, ephysSession))
        clustersFile = os.path.join(clustersDir, 'Tetrode%d.clu.1' % tetrode)
        sp.set_clusters(clustersFile)
        event_filename = os.path.join(settings.EPHYS_PATH, animalName,
                                      ephysSession, 'all_channels.events')
        ev = loadopenephys.Events(event_filename)

        eventTimes = np.array(ev.timestamps) / SAMPLING_RATE
        evID = np.array(ev.eventID)
        evChannel = np.array(ev.eventChannel)
        eventOnsetTimes = eventTimes[(evID == 1) & (evChannel == 0)]

        evdiff = np.r_[1.0, np.diff(eventOnsetTimes)]
        eventOnsetTimes = eventOnsetTimes[evdiff > 0.5]

        #Already divided by the sampling rate in spikesorting
        allSpkTimestamps = np.array(sp.timestamps) / SAMPLING_RATE
        #allSpkTimestamps = np.array(oneTT.dataTT.timestamps)
        spkClusters = sp.clusters

        figure()
        for ind, clusterNum in enumerate(clustersToPlot):
            clusterspikes = allSpkTimestamps[spkClusters == clusterNum]

            spkTimeStamps = clusterspikes

            (spikeTimesFromEventOnset, trialIndexForEachSpike,
             indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
                 spkTimeStamps, eventOnsetTimes, timeRange)

            subplot(len(clustersToPlot), 1, ind + 1)

            plot(spikeTimesFromEventOnset, trialIndexForEachSpike, '.', ms=1)
            title('Cluster {}'.format(clusterNum))
            axvline(x=0, ymin=0, ymax=1, color='r')

        xlabel('Time (sec)')
        #tight_layout()
        draw()
        show()
Example #12
0
 def __init__(self, onecell):
     self.animalName = onecell.animalName
     self.ephysSession = onecell.ephysSession
     self.behavSession = onecell.behavSession
     self.tetrode = onecell.tetrode
     self.cluster = onecell.cluster
     self.filename = onecell.get_filename()
     self.spikes = loadopenephys.DataSpikes(self.filename)
     self.spikes.timestamps = self.spikes.timestamps / self.spikes.samplingRate
     self.load_clusters()
     self.select_cluster()  # Remove all spikes from other clusters
Example #13
0
 def load_waveforms(self):
     '''
     https://github.com/open-ephys/GUI/wiki/Data-format
     Since the samples are saved as unsigned integers, converting them to microvolts
     involves subtracting 32768, dividing by the gain, and multiplying by 1000.
     '''
     print 'Loading data...'
     self.dataTT = loadopenephys.DataSpikes(self.tetrodeFile) #,readWaves=True)
     self.nSpikes = self.dataTT.nRecords# FIXME: this is specific to the OpenEphys format
     self.dataTT.samples = self.dataTT.samples.astype(float)-2**15# FIXME: this is specific to OpenEphys
     # FIXME: This assumes the gain is the same for all channels and records
     self.dataTT.samples = (1000.0/self.dataTT.gain[0,0]) * self.dataTT.samples
     self.dataTT.timestamps = self.dataTT.timestamps/self.dataTT.samplingRate
Example #14
0
    def get_session_spikes(self, sessionDir, tetrode, cluster=None, electrodeName='Tetrode'):
        '''
        Get the spike data for one session, one tetrode.

        Method to retrieve the spike data for a session/tetrode. Automatically loads the
        clusters if clustering has been done for the session. This method converts the spike
        timestamps to seconds by default.

        Args:
            sessionDir (str): The ephys directory
            tetrode (int): The tetrode number to retrieve
            electrodeName (str): The name preceeding the electrode number, saved by openephys eg 'Tetrode6.spikes' needs 'Tetrode'

        Returns:
            spikeData (object of type jaratoolbox.loadopenephys.DataSpikes)
        '''
        spikeFilename = os.path.join(self.ephysPath, sessionDir, '{}{}.spikes'.format(electrodeName, tetrode))

        spikeData = loadopenephys.DataSpikes(spikeFilename)

        #TODO: Why do we need this?
        #Make samples an empty array if there are no spikes
        if spikeData.samples is None:
            spikeData.samples = np.array([])

        #TODO: Make this an option
        #Convert the spike samples to mV
        spikeData.samples = spikeData.samples.astype(float)-2**15# FIXME: this is specific to OpenEphys
        if spikeData.gain is not None:
            spikeData.samples = (1000.0/spikeData.gain[0,0]) * spikeData.samples

        #Make timestamps an empty array if it does not exist
        if spikeData.timestamps is None:
            spikeData.timestamps = np.array([])

        #Convert the timestamps to seconds
        if self.convertSeconds:
            spikeData.timestamps = spikeData.timestamps/self.EPHYS_SAMPLING_RATE

        #If clustering has been done for the tetrode, add the clusters to the spikedata object
        clustersDir = os.path.join(self.ephysPath, '{}_kk'.format(sessionDir))
        clustersFile = os.path.join(clustersDir,'{}{}.clu.1'.format(electrodeName, tetrode))
        # print clustersFile #NOTE: For debugging
        if os.path.isfile(clustersFile):
            spikeData.set_clusters(clustersFile)

        if cluster:
            spikeData.samples=spikeData.samples[spikeData.clusters==cluster]
            spikeData.timestamps=spikeData.timestamps[spikeData.clusters==cluster]

        return spikeData
Example #15
0
def load_remote_tuning_data(oneCell,
                            behavDir=BEHAVDIR_MOUNTED,
                            ephysDir=EPHYSDIR_MOUNTED):
    '''
    Given a CellInfo object and remote behavior and ephys directories, this function loads the associated tuning ephys and tuning behav data from the mounted jarastore drive. Returns eventOnsetTimes, spikeTimestamps, and bData objects.
    '''

    ### Get behavior data associated with tuning curve ###
    behavFileName = '{0}_{1}_{2}.h5'.format(oneCell.animalName, 'tuning_curve',
                                            oneCell.tuningBehavior)
    behavFile = os.path.join(behavDir, oneCell.animalName, behavFileName)
    bData = loadbehavior.BehaviorData(behavFile, readmode='full')

    ### Get events data ###
    fullEventFilename = os.path.join(ephysDir, oneCell.animalName,
                                     oneCell.tuningSession,
                                     'all_channels.events')
    eventData = loadopenephys.Events(fullEventFilename)

    ### Get event onset times ###
    eventTimestamps = np.array(
        eventData.timestamps
    ) / EPHYS_SAMPLING_RATE  #hard-coded ephys sampling rate!!
    evID = np.array(eventData.eventID)
    eventOnsetTimes = eventTimestamps[(evID == 1)]

    ### GEt spike data for just this cluster ###
    spikeFilename = os.path.join(ephysDir, oneCell.animalName,
                                 oneCell.tuningSession,
                                 'Tetrode{}.spikes'.format(oneCell.tetrode))
    spikeData = loadopenephys.DataSpikes(spikeFilename)
    spikeData.timestamps = spikeData.timestamps / EPHYS_SAMPLING_RATE
    clustersDir = os.path.join(ephysDir, oneCell.animalName,
                               oneCell.tuningSession) + '_kk'
    clusterFilename = os.path.join(clustersDir,
                                   'Tetrode{}.clu.1'.format(oneCell.tetrode))
    clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]
    spikeData.timestamps = spikeData.timestamps[clusters == oneCell.cluster]
    spikeData.samples = spikeData.samples[clusters == oneCell.cluster, :, :]
    spikeData.samples = spikeData.samples.astype(
        float) - 2**15  # FIXME: this is specific to OpenEphys
    # FIXME: This assumes the gain is the same for all channels and records
    spikeData.samples = (1000.0 / spikeData.gain[0, 0]) * spikeData.samples
    #spikeData = ephyscore.CellData(oneCell)
    #spikeTimestamps=spikeData.spikes.timestamps

    return (eventOnsetTimes, spikeData.timestamps, bData)
Example #16
0
def loadEphys(subject, ephysSession, tetrodeID):
    #####################################################################################################################################################################
    #PARAMETERS
    #####################################################################################################################################################################

    ephysRoot = '/home/billywalker/data/ephys/'
    #ephysSession = '2014-12-24_17-11-53'
    #tetrodeID is which tetrode to plot
    #####################################################################################################################################################################
    #####################################################################################################################################################################

    # -- Global variables --
    SAMPLING_RATE = 30000.0

    ephysRoot = ephysRoot + subject + '/' + 'psyCurve/'

    # -- Load event data and convert event timestamps to ms --
    ephysDir = os.path.join(ephysRoot, ephysSession)
    eventFilename = os.path.join(ephysDir, 'all_channels.events')
    events = loadopenephys.Events(eventFilename)  # Load events data
    eventTimes = np.array(
        events.timestamps
    ) / SAMPLING_RATE  #get array of timestamps for each event and convert to seconds by dividing by sampling rate (Hz). matches with eventID and
    multipleEventOnset = np.array(
        events.eventID
    )  #loads the onset times of all events (matches up with eventID to say if event 1 went on (1) or off (0)
    eventChannel = np.array(
        events.eventChannel
    )  #loads the ID of the channel of the event. For example, 0 is sound event, 1 is trial event, 2 ...

    # -- Load spike data and convert spike timestamps to ms --
    spike_filename = os.path.join(
        ephysDir, 'Tetrode{0}.spikes'.format(tetrodeID)
    )  #make a path to ephys spike data of specified tetrode tetrodeID
    spikeData = loadopenephys.DataSpikes(
        spike_filename)  #load spike data from specified tetrode tetrodeID
    spkTimeStamps = np.array(
        spikeData.timestamps
    ) / SAMPLING_RATE  #array of timestamps for each spike in seconds (thats why you divide by sampling rate)

    return [eventTimes, multipleEventOnset, eventChannel, spkTimeStamps]
def load_spike_data(animal, ephysSession, tetrode, cluster):
    '''
    Function to load spike data of just one isolated cluster. 
    :param arg1: String containing animal name.
    :param arg2: A string of the name of the ephys session, this is the full filename, in {date}_XX-XX-XX format. 
    :param arg3: Integer in range(1,9) for tetrode number.
    :param arg4: Integer for cluster number.
    :return: spikeData object (as defined in loadopenephys).
    '''
    spikeFilename = os.path.join(EPHYS_PATH,animal,ephysSession, 'Tetrode{}.spikes'.format(tetrode))
    spikeData = loadopenephys.DataSpikes(spikeFilename)
    spikeData.timestamps = spikeData.timestamps/EPHYS_SAMPLING_RATE
    clustersDir = os.path.join(EPHYS_PATH,animal,ephysSession)+'_kk'
    clusterFilename = os.path.join(clustersDir, 'Tetrode{}.clu.1'.format(tetrode))
    clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]
    spikeData.timestamps = spikeData.timestamps[clusters==cluster]
    spikeData.samples = spikeData.samples[clusters==cluster, :, :]
    spikeData.samples = spikeData.samples.astype(float)-2**15# FIXME: this is specific to OpenEphys
    # FIXME: This assumes the gain is the same for all channels and records
    spikeData.samples = (1000.0/spikeData.gain[0,0]) * spikeData.samples
    return spikeData
def calculate_avg_waveforms(subject,
                            ephysSession,
                            tetrode,
                            clustersPerTetrode=12,
                            wavesize=160):
    '''
    NOTE: This methods should look through sessions, not clusters.
          The idea is to compare clusters within a tetrode, and then across sessions
          but still within a tetrode.
    NOTE: This method is inefficient because it load the spikes file for each cluster.
    '''

    # DONE: Load data for one tetrodes and calculate average for each cluster.
    #ephysFilename = ???
    ephysDir = os.path.join(settings.EPHYS_PATH, subject, ephysSession)
    ephysFilename = os.path.join(ephysDir, 'Tetrode{}.spikes'.format(tetrode))
    spikes = loadopenephys.DataSpikes(ephysFilename)

    # DONE: Load cluster file
    #kkDataDir = os.path.dirname(self.filename)+'_kk'
    #fullPath = os.path.join(kkDataDir,clusterFilename)
    clustersDir = '{}_kk'.format(ephysDir)
    clusterFilename = os.path.join(clustersDir,
                                   'Tetrode{}.clu.1'.format(tetrode))
    clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]

    # DONE: loop through clusters
    allWaveforms = np.empty((clustersPerTetrode, wavesize))
    for indc in range(clustersPerTetrode):
        print('Estimating average waveform for {0} T{1}c{2}'.format(
            ephysSession, tetrode, indc + 1))

        # DONE: get waveforms for one cluster
        #Add 1 to the cluster index because clusters start from 1
        waveforms = spikes.samples[clusters == indc + 1, :, :]

        alignedWaveforms = spikesorting.align_waveforms(waveforms)
        meanWaveforms = np.mean(alignedWaveforms, axis=0)
        allWaveforms[indc, :] = meanWaveforms.flatten()
    return allWaveforms
def calculate_avg_waveforms(subject, cellDB, ephysSession, tetrode,  wavesize=160):
    '''
    NOTE: This methods should look through sessions, not clusters.
          The idea is to compare clusters within a tetrode, and then across sessions
          but still within a tetrode.
    NOTE: This method is inefficient because it load the spikes file for each cluster.
    '''
    
    date = ephysSession.split('_')[0]
    #passingClusters = np.array(np.repeat(0,12), dtype=bool) #default to all false
    cells = cellDB.loc[(cellDB.date==date) & (cellDB.tetrode==tetrode)]
    if len(cells) == 0: #This tetrode doesn't exist in this session
        allWaveforms = None
    else:
        # DONE: Load data for one tetrodes and calculate average for each cluster.
        #ephysFilename = ???
        ephysDir = os.path.join(settings.EPHYS_PATH_REMOTE, subject, ephysSession)
        ephysFilename = os.path.join(ephysDir, 'Tetrode{}.spikes'.format(tetrode))
        spikes = loadopenephys.DataSpikes(ephysFilename)

        # DONE: Load cluster file
        #kkDataDir = os.path.dirname(self.filename)+'_kk'
        #fullPath = os.path.join(kkDataDir,clusterFilename)
        clustersDir = '{}_kk'.format(ephysDir)
        clusterFilename = os.path.join(clustersDir, 'Tetrode{}.clu.1'.format(tetrode))
        clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]
        clustersThisSession = np.unique(clusters)
        numClustersThisTetrode = len(cells) #Sometimes clustersThisSession don't include all the possible clusters this tetrode; on rare occasions cells don't include all possible clusters this tetrode?
        # DONE: loop through clusters
        allWaveforms = np.empty((numClustersThisTetrode,wavesize))
        for indc, cluster in enumerate(cells.cluster.values): #clustersThisSession):
            print 'Estimating average waveform for {0} T{1}c{2}'.format(ephysSession,tetrode,cluster)

            # DONE: get waveforms for one cluster
            waveforms = spikes.samples[clusters==cluster, :, :]
            alignedWaveforms = spikesorting.align_waveforms(waveforms)
            meanWaveforms = np.mean(alignedWaveforms,axis=0)
            allWaveforms[indc,:] = meanWaveforms.flatten()
    return allWaveforms
Example #20
0
def load_ephys_data(subject, session, tetrode, cluster=None):
    ephysBaseDir = os.path.join(settings.EPHYS_PATH, subject)
    eventFilename=os.path.join(ephysBaseDir,
                               session,
                               'all_channels.events')
    spikesFilename=os.path.join(ephysBaseDir,
                                session,
                                'Tetrode{}.spikes'.format(tetrode))
    eventData=loadopenephys.Events(eventFilename)
    spikeData = loadopenephys.DataSpikes(spikesFilename)
    clustersDir = os.path.join(ephysBaseDir, '{}_kk'.format(session))
    clustersFile = os.path.join(clustersDir,'Tetrode{}.clu.1'.format(tetrode))
    spikeData.set_clusters(clustersFile)
    if cluster is not None:
        spikeData.samples=spikeData.samples[spikeData.clusters==cluster]
        spikeData.timestamps=spikeData.timestamps[spikeData.clusters==cluster]
    
    # convert to seconds and millivolts
    spikeData.samples = spikeData.samples.astype(float)-2**15
    spikeData.samples = (1000.0/spikeData.gain[0,0]) * spikeData.samples
    spikeData.timestamps = spikeData.timestamps/spikeData.samplingRate
    eventData.timestamps = eventData.timestamps/eventData.samplingRate
    return eventData, spikeData
Example #21
0
def get_session_ephys(cell, sessiontype):
    sessionInds = get_session_inds(cell, sessiontype)
    sessionInd = sessionInds[0] #FIXME: Just takes the first one for now
    ephysSession = cell['ephys'][sessionInd]
    ephysBaseDir = os.path.join(settings.EPHYS_PATH, cell['subject'])
    tetrode=int(cell['tetrode'])
    eventFilename=os.path.join(ephysBaseDir,
                               ephysSession,
                               'all_channels.events')
    spikesFilename=os.path.join(ephysBaseDir,
                                ephysSession,
                                'Tetrode{}.spikes'.format(tetrode))
    eventData=loadopenephys.Events(eventFilename)
    spikeData = loadopenephys.DataSpikes(spikesFilename)
    if spikeData.timestamps is not None:
        clustersDir = os.path.join(ephysBaseDir, '{}_kk'.format(ephysSession))
        clustersFile = os.path.join(clustersDir,'Tetrode{}.clu.1'.format(tetrode))
        spikeData.set_clusters(clustersFile)
        spikeData.samples=spikeData.samples[spikeData.clusters==cell['cluster']]
        spikeData.timestamps=spikeData.timestamps[spikeData.clusters==cell['cluster']]
        spikeData = convert_openephys(spikeData)
    eventData = convert_openephys(eventData)
    return spikeData, eventData
Example #22
0
tetrodes = range(1, 9)
# sessions = inforec.test098.experiments[0].sites[0].sessions

celldb = celldatabase.NewCellDB()

for experiment in inforec.experiments:
    for site in experiment.sites:
        for session in site.sessions:
            for tetrode in tetrodes:
                fullPath = session.full_ephys_path()
                fullFn = os.path.join(fullPath,
                                      'Tetrode{}.spikes'.format(tetrode))
                #print fullFn
                fullBehav = session.full_behav_filename()
                #print fullBehav
                dataSpikes = loadopenephys.DataSpikes(fullFn)
                ephysSession = '{}_{}'.format(session.date, session.timestamp)
                features = ['peak', 'valleyFirstHalf']
                oneTT = spikesorting.TetrodeToCluster(session.subject,
                                                      ephysSession, tetrode,
                                                      features)

                oneTT.load_waveforms()
                clusterFile = os.path.join(oneTT.clustersDir,
                                           'Tetrode%d.clu.1' % oneTT.tetrode)
                if os.path.isfile(clusterFile):
                    oneTT.dataTT.clusters = np.fromfile(clusterFile,
                                                        dtype='int32',
                                                        sep=' ')[1:]
                else:
                    oneTT.create_fet_files()
Example #23
0
# -- Workaround for bug (as of 2014-07-08) --
#freqEachTrial = freqEachTrial[1:]
#freqEachTrial = np.roll(freqEachTrial,-1)

possibleFreq = np.unique(freqEachTrial)

sortedTrials = []
for indf, oneFreq in enumerate(possibleFreq):
    indsThisFreq = np.flatnonzero(freqEachTrial == oneFreq)
    sortedTrials = np.concatenate((sortedTrials, indsThisFreq))
sortingInds = argsort(sortedTrials)

# -- Load ephys data --

ev = loadopenephys.Events(event_filename)
sp = loadopenephys.DataSpikes(spike_filename)

spkTimeStamps = np.array(sp.timestamps) / SAMPLING_RATE
eventTimes = np.array(ev.timestamps) / SAMPLING_RATE
evID = np.array(ev.eventID)
eventOnsetTimes = eventTimes[evID == 1]

# -- Remove last started (but not finished) trial --
eventOnsetTimes = eventOnsetTimes[:-1]

timeRange = [-0.5, 1]
(spikeTimesFromEventOnset, trialIndexForEachSpike,
 indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
     spkTimeStamps, eventOnsetTimes, timeRange)

sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike]
Example #24
0
def load_ephys(subject,
               paradigm,
               sessionDir,
               tetrode,
               cluster=None,
               useModifiedClusters=False):

    #Setup path and filenames
    ephysBaseDir = os.path.join(settings.EPHYS_PATH, subject)
    eventsFilename = os.path.join(ephysBaseDir, sessionDir,
                                  'all_channels.events')
    spikesFilename = os.path.join(ephysBaseDir, sessionDir,
                                  'Tetrode{}.spikes'.format(tetrode))

    #Load the spikes and events
    eventData = loadopenephys.Events(eventsFilename)
    spikeData = loadopenephys.DataSpikes(spikesFilename)

    #Fail if there are no spikes for the tetrode
    if spikeData.timestamps is None:
        raise ValueError('File {} contains no spikes.'.format(spikesFilename))

    #Set clusters and limits spikes and samples to one cluster
    if cluster is not None:
        clustersDir = os.path.join(ephysBaseDir, '{}_kk'.format(sessionDir))
        # Get clusters file name, load and set the clusters
        if useModifiedClusters is False:
            # Always use the original .clu.1 file
            clustersFile = os.path.join(clustersDir,
                                        'Tetrode{}.clu.1'.format(tetrode))
            spikeData.set_clusters(clustersFile)
        elif useModifiedClusters is True:
            # Use the modified .clu file if it exists, otherwise use the original one
            clustersFileModified = os.path.join(
                clustersDir, 'Tetrode{}.clu.modified'.format(tetrode))
            if os.path.exists(clustersFileModified):
                print("Loading modified .clu file for session {}".format(
                    spikesFilename))
                spikeData.set_clusters(clustersFileModified)
            else:
                print(
                    "Modified .clu file does not exist, loading standard .clu file for session {}"
                    .format(spikesFilename))
                clustersFile = os.path.join(clustersDir,
                                            'Tetrode{}.clu.1'.format(tetrode))
                spikeData.set_clusters(clustersFile)
        spikeData.samples = spikeData.samples[spikeData.clusters == cluster]
        spikeData.timestamps = spikeData.timestamps[spikeData.clusters ==
                                                    cluster]

    #Convert to real units
    spikeData = loadopenephys.convert_openephys(spikeData)
    eventData = loadopenephys.convert_openephys(eventData)

    #Choose channel map based on paradigm
    channelMap = CHANNELMAPS[paradigm]
    eventDict = {}
    for channelType, channelID in channelMap.items():
        thisChannelOn = eventData.get_event_onset_times(eventID=1,
                                                        eventChannel=channelID)
        thisChannelOff = eventData.get_event_onset_times(
            eventID=0, eventChannel=channelID)
        eventDict.update({
            '{}On'.format(channelType): thisChannelOn,
            '{}Off'.format(channelType): thisChannelOff
        })
    ephysData = {
        'spikeTimes': spikeData.timestamps,
        'samples': spikeData.samples,
        'events': eventDict
    }
    return ephysData
Example #25
0
oneTT = spikesorting.TetrodeToCluster(animalName, ephysSession, tetrode)
#oneTT.load_waveforms()

# --- Test ---
from jaratoolbox import loadopenephys
reload(loadopenephys)
from pylab import *
N_CHANNELS = 4
SAMPLES_PER_SPIKE = 40

dataDir = os.path.join(settings.EPHYS_PATH,
                       '%s/%s/' % (animalName, ephysSession))
tetrodeFile = os.path.join(dataDir, 'Tetrode%d.spikes' % tetrode)

dataTT = loadopenephys.DataSpikes(tetrodeFile)
dataTT.timestamps = dataTT.timestamps / 0.03  # in microsec
dataTT.samples = dataTT.samples.astype(float) - 2**15
dataTT.set_clusters('/tmp/TT2.clu.1')

crep = spikesorting.ClusterReportFromData(dataTT)
'''
dataTT.samples = dataTT.samples.reshape((-1,N_CHANNELS,SAMPLES_PER_SPIKE),order='C')

fetArray = spikesorting.calculate_features(dataTT.samples,['peak','valley'])

spikesorting.write_fet_file('/tmp/TT2.fet.1',fetArray)
'''
'''
plot(dataTT.samples[:10,:].T,'.-')
draw()
Example #26
0
###Testing on spike data
from jaratoolbox import loadopenephys
from jaratoolbox import spikesorting
from jaratest.nick import clustercutting
from matplotlib import pyplot as plt
import os
animalName='pinp013'
ephysLoc = '/home/nick/data/ephys/'
ephysPath = os.path.join(ephysLoc, animalName)
ephysFn='2016-05-27_14-13-26'
tetrode=3
spikesFn = os.path.join(ephysPath, ephysFn, 'Tetrode{}.spikes'.format(tetrode))


dataSpikes = loadopenephys.DataSpikes(spikesFn)

GAIN = 5000.0
SAMPLING_RATE=30000.0
dataSpikes.samples = ((dataSpikes.samples - 32768.0) / GAIN) * 1000.0
dataSpikes.timestamps = dataSpikes.timestamps/SAMPLING_RATE

(numSpikes, numChans, numSamples) = shape(dataSpikes.samples)

allWaves = dataSpikes.samples.reshape(numSpikes, numChans*numSamples)

#Testing larger data sizes
### Looks like even with 3 times the spikes the algo is still fast
# allWaves = vstack((allWaves, allWaves, allWaves))

msz0 = 30
Example #27
0
    def plot_tc_psth(self, session, tetrode, behavFileIdentifier, cluster=None):

        #FIXME: This method needs a lot of work
        
        SAMPLING_RATE = 30000.0
        PLOTTING_WINDOW = 0.1 #Window to plot, in seconds
        
        ephysSession = self.get_session_name(session)

        ephysDir=os.path.join(self.localEphysDir, ephysSession)
        event_filename=os.path.join(ephysDir, 'all_channels.events')

        behaviorDir=os.path.join(self.localBehavPath, self.animalName)
        fullBehavFilename = ''.join([self.behavFileBaseName, behavFileIdentifier, '.h5'])
        behavDataFileName=os.path.join(behaviorDir, fullBehavFilename)

        #Extract the frequency presented each trial from the behavior data
        bdata = loadbehavior.BehaviorData(behavDataFileName,readmode='full')
        freqEachTrial = bdata['currentFreq']
        intensityEachTrial = bdata['currentIntensity']

        possibleFreq = np.unique(freqEachTrial) 
        possibleIntensity = np.unique(intensityEachTrial)

        #Get the event timestamps from openEphys
        ev=loadopenephys.Events(event_filename)
        evID=np.array(ev.eventID)
        eventOnsetTimes=ev.timestamps[evID==1] #The timestamps of all of the stimulus onsets

        tetrode = 6 #The tetrode to plot
        spikeFilename = os.path.join(ephysDir, 'Tetrode{}.spikes'.format(tetrode))
        spikeData = loadopenephys.DataSpikes(spikeFilename)
        
        if cluster:
            clustersDir = os.path.join(settings.EPHYS_PATH,'%s/%s_kk/'%(self.animalName,ephysSession))
            clustersFile = os.path.join(clustersDir,'Tetrode%d.clu.1'%tetrode)
            spikeData.set_clusters(clustersFile)

            spikeTimestamps = spikeData.timestamps[spikeData.clusters==cluster]
        else:
            spikeTimestamps = spikeData.timestamps
            


        allSettingsSpikes = defaultdict(dict) #2D dictionary to hold the spiketimes arrays organized by frequency and intensity

        for indFreq, currentFreq in enumerate(possibleFreq):
            for indIntensity, currentIntensity in enumerate(possibleIntensity):

                #Determine which trials this setting was presented on. 
                trialsThisSetting = np.flatnonzero((freqEachTrial == currentFreq) & (intensityEachTrial == currentIntensity))

                #Get the onset timestamp for each of the trials of this setting. 
                timestampsThisSetting = eventOnsetTimes[trialsThisSetting]

                spikesAfterThisSetting = np.array([])
                #Loop through all of the trials for this setting, extracting the trace after each presentation
                for indts, eventTimestamp in enumerate(timestampsThisSetting):
                    spikes = spikeTimestamps[(spikeTimestamps >= eventTimestamp) & (spikeTimestamps <= eventTimestamp + SAMPLING_RATE * PLOTTING_WINDOW)]
                    spikes = spikes - eventTimestamp
                    spikes = spikes / 30 #Spikes in ms after the stimulus

                    spikesAfterThisSetting = np.concatenate((spikesAfterThisSetting, spikes))
                allSettingsSpikes[indFreq][indIntensity] = spikesAfterThisSetting #Put the spikes into the 2d dict
        figure()

        maxBinCount = []
        for indI, intensity in enumerate(possibleIntensity):
            for indF, frequency in enumerate(possibleFreq):
                h, bin_edges = histogram(allSettingsSpikes[indF][indI]) #Dict is ordered by freq and then by Int.
                maxBinCount.append(max(h))

        maxNumSpikesperBin = max(maxBinCount)

        for intensity in range(len(possibleIntensity)):
            #Subplot2grid plots from top to bottom, but we need to plot from bottom to top
            #on the intensity scale. So we make an array of reversed intensity indices.
            intensPlottingInds = range(len(possibleIntensity))[::-1]
            for frequency in range(len(possibleFreq)):
                if (intensity == len(possibleIntensity) - 1) & (frequency == len(possibleFreq) -1):
                    ax2 = subplot2grid((len(possibleIntensity), len(possibleFreq)), (intensPlottingInds[intensity], frequency))
                    if len(allSettingsSpikes[frequency][intensity]) is not 0:
                        ax2.hist(allSettingsSpikes[frequency][intensity])
                    else:
                        pass

                    ax2.set_ylim([0, maxNumSpikesperBin])
                    ax2.get_xaxis().set_ticks([])
                else:
                    ax = subplot2grid((len(possibleIntensity), len(possibleFreq)), (intensPlottingInds[intensity], frequency))
                    if len(allSettingsSpikes[frequency][intensity]) is not 0:
                        ax.hist(allSettingsSpikes[frequency][intensity])
                    else:
                        pass
                    ax.set_ylim([0, maxNumSpikesperBin])
                    ax.set_axis_off()

        def getXlabelpoints(n):
            rawArray = array(range(1, n+1))/float(n+1) #The positions in a perfect (0,1) world
            diffFromCenter = rawArray - 0.6
            partialDiffFromCenter = diffFromCenter * 0.175 #Percent change has to be determined empirically
            finalArray = rawArray - partialDiffFromCenter
            return finalArray

        #Not sure yet if similar modification to the locations will be necessary. 
        def getYlabelpoints(n):
            rawArray = array(range(1, n+1))/float(n+1) #The positions in a perfect (0,1) world
            return rawArray

        freqLabelPositions = getXlabelpoints(len(possibleFreq))
        for indp, position in enumerate(freqLabelPositions):
            figtext(position, 0.065, "%.1f"% (possibleFreq[indp]/1000), ha = 'center')

        intensLabelPositions = getYlabelpoints(len(possibleIntensity))
        for indp, position in enumerate(intensLabelPositions):
            figtext(0.075, position, "%d"% possibleIntensity[indp])

        figtext(0.525, 0.025, "Frequency (kHz)", ha = 'center')
        figtext(0.025, 0.5, "Intensity (dB SPL)", va = 'center', rotation = 'vertical')
        show()
Example #28
0
    selectedTrials = (intensityEachTrial==selectedIntensity)

freqEachTrial = freqEachTrial[selectedTrials]
trialsEachFreq = behavioranalysis.find_trials_each_type(freqEachTrial,possibleFreq)
###trialsEachFreq &= selectedTrials[:,np.newaxis]
numTrialsEachFreq = trialsEachFreq.sum(axis=0)
sortedTrials = np.nonzero(trialsEachFreq.T)[1] # The second array contains the sorted indexes
sortingInds = np.argsort(sortedTrials) # gives array of indices that would sort the sortedTrials

eventOnsetTimes = eventOnsetTimes[selectedTrials]


for indt,tetrodeID in enumerate(tetrodes):

    spikesFilename = os.path.join(fullephysDir, 'Tetrode{0}.spikes'.format(tetrodeID)) # make a path to ephys spike data of specified tetrode tetrodeID
    sp = loadopenephys.DataSpikes(spikesFilename) # load spike data from specified tetrode tetrodeID
    #spkTimeStamps = np.array(sp.timestamps)/SAMPLING_RATE # array of timestamps for each spike in seconds (thats why you divide by sampling rate)

    # -- Load clusters --
    kkDataDir = os.path.dirname(spikesFilename)+'_kk'
    clusterFilename = 'Tetrode{0}.clu.1'.format(tetrodeID)
    fullPath = os.path.join(kkDataDir,clusterFilename)
    clusters = np.fromfile(fullPath,dtype='int32',sep=' ')[1:]

    clustersToPlot = range(2,10)
    for indc,clusterID in enumerate(clustersToPlot):
        spkTimeStamps = np.array(sp.timestamps[clusters==clusterID])/SAMPLING_RATE
        (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \
            spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange)
        sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike]
    ### Get events data ###
    fullEventFilename = os.path.join(EPHYS_PATH, oneCell.animalName,
                                     oneCell.ephysSession,
                                     'all_channels.events')
    eventData = loadopenephys.Events(fullEventFilename)
    ##### Get event onset times #####
    eventData.timestamps = np.array(
        eventData.timestamps
    ) / EPHYS_SAMPLING_RATE  #hard-coded ephys sampling rate!!

    ### GEt spike data of just this cluster ###
    spikeFilename = os.path.join(EPHYS_PATH, oneCell.animalName,
                                 oneCell.ephysSession,
                                 'Tetrode{}.spikes'.format(oneCell.tetrode))
    spikeData = loadopenephys.DataSpikes(spikeFilename)
    spikeData.timestamps = spikeData.timestamps / EPHYS_SAMPLING_RATE
    clustersDir = os.path.join(EPHYS_PATH, oneCell.animalName,
                               oneCell.ephysSession) + '_kk'
    clusterFilename = os.path.join(clustersDir,
                                   'Tetrode{}.clu.1'.format(oneCell.tetrode))
    clusters = np.fromfile(clusterFilename, dtype='int32', sep=' ')[1:]
    spikeData.timestamps = spikeData.timestamps[clusters == oneCell.cluster]
    spikeData.samples = spikeData.samples[clusters == oneCell.cluster, :, :]
    spikeData.samples = spikeData.samples.astype(
        float) - 2**15  # FIXME: this is specific to OpenEphys
    # FIXME: This assumes the gain is the same for all channels and records
    spikeData.samples = (1000.0 / spikeData.gain[0, 0]) * spikeData.samples
    #spikeData = ephyscore.CellData(oneCell) #This defaults to settings ephys path
    spikeTimestamps = spikeData.timestamps
ISIVioBool = ISI < ISIcutoff

fractionViolation = np.mean(ISIVioBool)  # Assumes ISI in usec

print 'ISI Violation less than ', ISIcutoff, ' is ', fractionViolation

ISIVioBoolFirst = np.append(ISIVioBool, False)
ISIVioBoolSecond = np.append(False, ISIVioBool)

dataDir = os.path.join(settings.EPHYS_PATH, mouseName, oneCell.ephysSession)
tetrodeFile = os.path.join(dataDir, 'Tetrode{0}.spikes'.format(tetrode))
clustersDir = os.path.join(settings.EPHYS_PATH, mouseName,
                           oneCell.ephysSession + '_kk')

dataTT = loadopenephys.DataSpikes(tetrodeFile)  #,readWaves=True)
dataTT.samples = dataTT.samples.astype(
    float) - 2**15  # FIXME: this is specific to OpenEphys
# FIXME: This assumes the gain is the same for all channels and records
dataTT.samples = (1000.0 / dataTT.gain[0, 0]) * dataTT.samples
dataTT.timestamps = dataTT.timestamps / dataTT.samplingRate
clustersFile = os.path.join(clustersDir, 'Tetrode%d.clu.1' % tetrode)
if os.path.isfile(clustersFile):
    dataTT.set_clusters(clustersFile)
else:
    print('Clusters file does not exist for this tetrode: {0}'.format(tetrode))

fetFilename = os.path.join(clustersDir, 'Tetrode%d.fet.1' % tetrode)
fetFile = open(fetFilename, 'r')

numFetclusters = fetFile.readline()