timeRange: (list or np.array) two-element array specifying time-range to extract around event.

        spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
    o    trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
           The first spike index is 0.
        indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
           the range is from firstSpike to lastSpike+1 (like in python slices)
        spikeIndices
    '''

    sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike] #Takes values of trialIndexForEachSpike and finds value of sortingInds at that index and makes array. This array gives an array with the sorted index of each trial for each spike


    # -- Calculate tuning --
    responseRange = [0.010,0.020] #range of time to count spikes in after event onset
    nSpikes = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,responseRange) #array of the number of spikes in range for each trial
    '''Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''
    meanSpikesEachFrequency = np.empty(len(possibleFreq)) #make empty array of same size as possibleFreq

    # -- This part will be replace by something like behavioranalysis.find_trials_each_type --
    trialsEachFreq = []
    for indf,oneFreq in enumerate(possibleFreq):
        trialsEachFreq.append(np.flatnonzero(freqEachTrial==oneFreq)) #finds indices of each frequency. Appends them to get an array of indices of trials sorted by freq
Example #2
0
	    eventOnsetTimes: (np.array) the time of each instance of the event to lock to.
	    timeRange: (list or np.array) two-element array specifying time-range to extract around event.

	    spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
	    trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
	       The first spike index is 0.
	    indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
	       the range is from firstSpike to lastSpike+1 (like in python slices)
	    spikeIndices
	'''

    # -- Find the number of spikes in each bin --
    spikeNumberInBinPerTrial = np.empty([numberOfBins, numberOfTrials])
    for i, binRange in enumerate(binTimeRanges):
        spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(
            spikeTimesFromEventOnset, indexLimitsEachTrial,
            binRange)  #array of the number of spikes in range for each trial
    ''' spikesanalysis.count_spikes_in_range
	    Count number of spikes on each trial in a given time range.

	       spikeTimesFromEventOnset: vector of spikes timestamps with respect
		 to the onset of the event.
	       indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
	       timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

	       returns nSpikes
	'''

    ##############################
    #THIS COULD BE A SEPARATE MODULE
    ##############################
Example #3
0
    # -- Load clusters --
    kkDataDir = os.path.dirname(spikesFilename)+'_kk'
    clusterFilename = 'Tetrode{0}.clu.1'.format(tetrodeID)
    fullPath = os.path.join(kkDataDir,clusterFilename)
    clusters = np.fromfile(fullPath,dtype='int32',sep=' ')[1:]

    clustersToPlot = range(2,10)
    for indc,clusterID in enumerate(clustersToPlot):
        spkTimeStamps = np.array(sp.timestamps[clusters==clusterID])/SAMPLING_RATE
        (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = \
            spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange)
        sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike]

        # -- Calculate tuning --
        nSpikes = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,responseRange)
        meanSpikesEachFrequency = np.empty(len(possibleFreq)) # make empty array of same size as possibleFreq

        # -- Calculate average firing for each freq --
        for indf,oneFreq in enumerate(possibleFreq):
            meanSpikesEachFrequency[indf] = np.mean(nSpikes[trialsEachFreq[:,indf]])

        clf()
        ax2 = plt.subplot2grid((1,4), (0, 0), colspan=3)
        plot(spikeTimesFromEventOnset, sortedIndexForEachSpike, '.', ms=1)
        axvline(x=0, ymin=0, ymax=1, color='r')

        #The cumulative sum of the list of specific frequency presentations, 
        #used below for plotting the lines across the figure. 
        cumTrials = cumsum(numTrialsEachFreq)
def allFreqData(subject, ephysSession, behaviorSession, tetrodeID, FreqInd, binTime, startTime, endTime, startRange, endRange):

    timeRange = [startTime, endTime]
    responseRange = [startRange, endRange]

    # -- Load event and spike data --
    ephysData = loadEphysData.loadEphys(subject, ephysSession, tetrodeID)
    eventTimes = ephysData[0]
    multipleEventOnset = ephysData[1]
    eventChannel = ephysData[2]
    spkTimeStamps = ephysData[3]

    # -- Load behavior data --
    

    # -- Number of trials in Behavior data --
    #The number of trials that will be used will be that from the behavior data. The ephys data may contain the same number of trials or one more
    numberOfTrials = behaveData.numberOfTrials


    # -- Only use event onset times of one event --
    oneEvent = eventChannel==eventID #This picks out which channel you care about if there is more that one event
    eventOnset = multipleEventOnset*oneEvent #This keeps the correct size of the array to match eventTimes and picks out the onset of the channel you want
    #This makes sure that the behavior and ephys data have the same number of trials
    while (numberOfTrials < np.sum(eventOnset)):
        eventOnset = eventOnset[:-1]
    eventOnsetTimes = eventTimes[eventOnset == 1] #This gives only the times of the onset of the channel you want


    # -- Convert spike data into np.array's --
    (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange)
    ''' spikesanalysis.eventlocked_spiketimes
        Create a vector with the spike timestamps w.r.t. events onset.

        (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = 
            eventlocked_spiketimes(timeStamps,eventOnsetTimes,timeRange)

        timeStamps: (np.array) the time of each spike.
        eventOnsetTimes: (np.array) the time of each instance of the event to lock to.
        timeRange: (list or np.array) two-element array specifying time-range to extract around event.

        spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
        trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
           The first spike index is 0.
        indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
           the range is from firstSpike to lastSpike+1 (like in python slices)
        spikeIndices
    '''


    # -- Make np.array of time ranges for bins
    startTime = float(timeRange[0]) #makes sure that these are floats so division works as expected
    endTime = float(timeRange[1])
    fullTime = endTime - startTime
    numberOfBins = int((fullTime//binTime)+1)
    binTimeRanges = np.empty([numberOfBins,2]) #Gives np.array of time ranges for each bin
    xCoordinatesPlot = np.empty(numberOfBins)
    for indBin in range(0,numberOfBins):
        xCoordinatesPlot[indBin]=startTime+indBin*binTime
        binTimeRanges[indBin]=np.array([(startTime+indBin*binTime),(startTime+(indBin+1)*binTime)])


    # -- Find the number of spikes in each bin --
    spikeNumberInBinPerTrial = np.empty([numberOfBins,numberOfTrials])
    for i,binRange in enumerate(binTimeRanges):
        spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,binRange) #array of the number of spikes in range for each trial
    ''' spikesanalysis.count_spikes_in_range
        Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''


    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE HISTOGRAM#####################################################################################################
    ###################################################################################################################################################################################


    # -- Pick which trials you care about in counting spikes --
    spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1
    spikeMeanInBin2 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse2
    for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
        spikeMeanInBin1[indBin] = np.mean(np.append(spikeCounts[trialsToUse1==1],0))  #The append 0 is just for the edge case that there are no trials to use so np.mean does not give a nan.
        spikeMeanInBin2[indBin] = np.mean(np.append(spikeCounts[trialsToUse2==1],0))



    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE RASTOR PLOT###################################################################################################
    ###################################################################################################################################################################################


    sortedTrials = [] #array that sorts trials for different frequencies
    numTrialsEachFreq = []  #Used to plot lines after each group of sorted trials
    for indf,oneFreq in enumerate(possibleFreq): #indf is index of this freq and oneFreq is the frequency
        indsThisFreq = np.flatnonzero(targetFreqs==oneFreq) #this gives indices of this frequency
        sortedTrials = np.concatenate((sortedTrials,indsThisFreq)) #adds all indices to a list called sortedTrials
        numTrialsEachFreq.append(len(indsThisFreq)) #finds number of trials each frequency has
    sortingInds = argsort(sortedTrials) #gives array of indices that would sort the sortedTrials

    sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike] #Takes values of trialIndexForEachSpike and finds value of sortingInds at that index and makes array. This array gives an array with the sorted index of each trial for each spike


    nSpikes = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,responseRange) #array of the number of spikes in range for each trial
    '''Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''
    # -- Calculate average firing for each freq --
    meanSpikesEachFrequency = np.empty(len(possibleFreq))
    for indf,oneFreq in enumerate(possibleFreq):
        meanSpikesEachFrequency[indf] = np.mean(nSpikes[indf])

    return [spikeTimesFromEventOnset, sortedIndexForEachSpike, numTrialsEachFreq, possibleFreq, meanSpikesEachFrequency, xCoordinatesPlot, spikeMeanInBin1, spikeMeanInBin2]
Example #5
0
def allFreqData(subject, ephysSession, behaviorSession, tetrodeID, FreqInd,
                binTime, startTime, endTime, startRange, endRange):

    timeRange = [startTime, endTime]
    responseRange = [startRange, endRange]

    # -- Load event and spike data --
    ephysData = loadEphysData.loadEphys(subject, ephysSession, tetrodeID)
    eventTimes = ephysData[0]
    multipleEventOnset = ephysData[1]
    eventChannel = ephysData[2]
    spkTimeStamps = ephysData[3]

    # -- Load behavior data --

    # -- Number of trials in Behavior data --
    #The number of trials that will be used will be that from the behavior data. The ephys data may contain the same number of trials or one more
    numberOfTrials = behaveData.numberOfTrials

    # -- Only use event onset times of one event --
    oneEvent = eventChannel == eventID  #This picks out which channel you care about if there is more that one event
    eventOnset = multipleEventOnset * oneEvent  #This keeps the correct size of the array to match eventTimes and picks out the onset of the channel you want
    #This makes sure that the behavior and ephys data have the same number of trials
    while (numberOfTrials < np.sum(eventOnset)):
        eventOnset = eventOnset[:-1]
    eventOnsetTimes = eventTimes[
        eventOnset ==
        1]  #This gives only the times of the onset of the channel you want

    # -- Convert spike data into np.array's --
    (spikeTimesFromEventOnset, trialIndexForEachSpike,
     indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(
         spkTimeStamps, eventOnsetTimes, timeRange)
    ''' spikesanalysis.eventlocked_spiketimes
        Create a vector with the spike timestamps w.r.t. events onset.

        (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = 
            eventlocked_spiketimes(timeStamps,eventOnsetTimes,timeRange)

        timeStamps: (np.array) the time of each spike.
        eventOnsetTimes: (np.array) the time of each instance of the event to lock to.
        timeRange: (list or np.array) two-element array specifying time-range to extract around event.

        spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
        trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
           The first spike index is 0.
        indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
           the range is from firstSpike to lastSpike+1 (like in python slices)
        spikeIndices
    '''

    # -- Make np.array of time ranges for bins
    startTime = float(
        timeRange[0]
    )  #makes sure that these are floats so division works as expected
    endTime = float(timeRange[1])
    fullTime = endTime - startTime
    numberOfBins = int((fullTime // binTime) + 1)
    binTimeRanges = np.empty([numberOfBins,
                              2])  #Gives np.array of time ranges for each bin
    xCoordinatesPlot = np.empty(numberOfBins)
    for indBin in range(0, numberOfBins):
        xCoordinatesPlot[indBin] = startTime + indBin * binTime
        binTimeRanges[indBin] = np.array([(startTime + indBin * binTime),
                                          (startTime + (indBin + 1) * binTime)
                                          ])

    # -- Find the number of spikes in each bin --
    spikeNumberInBinPerTrial = np.empty([numberOfBins, numberOfTrials])
    for i, binRange in enumerate(binTimeRanges):
        spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(
            spikeTimesFromEventOnset, indexLimitsEachTrial,
            binRange)  #array of the number of spikes in range for each trial
    ''' spikesanalysis.count_spikes_in_range
        Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''

    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE HISTOGRAM#####################################################################################################
    ###################################################################################################################################################################################

    # -- Pick which trials you care about in counting spikes --
    spikeMeanInBin1 = np.empty(
        numberOfBins
    )  #This will hold the mean number of spikes in each bin or time range for the trialsToUse1
    spikeMeanInBin2 = np.empty(
        numberOfBins
    )  #This will hold the mean number of spikes in each bin or time range for the trialsToUse2
    for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
        spikeMeanInBin1[indBin] = np.mean(
            np.append(spikeCounts[trialsToUse1 == 1], 0)
        )  #The append 0 is just for the edge case that there are no trials to use so np.mean does not give a nan.
        spikeMeanInBin2[indBin] = np.mean(
            np.append(spikeCounts[trialsToUse2 == 1], 0))

    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE RASTOR PLOT###################################################################################################
    ###################################################################################################################################################################################

    sortedTrials = []  #array that sorts trials for different frequencies
    numTrialsEachFreq = [
    ]  #Used to plot lines after each group of sorted trials
    for indf, oneFreq in enumerate(
            possibleFreq
    ):  #indf is index of this freq and oneFreq is the frequency
        indsThisFreq = np.flatnonzero(
            targetFreqs == oneFreq)  #this gives indices of this frequency
        sortedTrials = np.concatenate(
            (sortedTrials,
             indsThisFreq))  #adds all indices to a list called sortedTrials
        numTrialsEachFreq.append(
            len(indsThisFreq))  #finds number of trials each frequency has
    sortingInds = argsort(
        sortedTrials)  #gives array of indices that would sort the sortedTrials

    sortedIndexForEachSpike = sortingInds[
        trialIndexForEachSpike]  #Takes values of trialIndexForEachSpike and finds value of sortingInds at that index and makes array. This array gives an array with the sorted index of each trial for each spike

    nSpikes = spikesanalysis.count_spikes_in_range(
        spikeTimesFromEventOnset, indexLimitsEachTrial,
        responseRange)  #array of the number of spikes in range for each trial
    '''Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''
    # -- Calculate average firing for each freq --
    meanSpikesEachFrequency = np.empty(len(possibleFreq))
    for indf, oneFreq in enumerate(possibleFreq):
        meanSpikesEachFrequency[indf] = np.mean(nSpikes[indf])

    return [
        spikeTimesFromEventOnset, sortedIndexForEachSpike, numTrialsEachFreq,
        possibleFreq, meanSpikesEachFrequency, xCoordinatesPlot,
        spikeMeanInBin1, spikeMeanInBin2
    ]
Example #6
0
	spikeNumberInBinPerTrial = np.empty([numberOfBins,numberOfTrials])
	for i,binRange in enumerate(binTimeRanges):
	    spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,binRange) #array of the number of spikes in range for each trial
        '''
    ''' spikesanalysis.count_spikes_in_range
	    Count number of spikes on each trial in a given time range.

	       spikeTimesFromEventOnset: vector of spikes timestamps with respect
		 to the onset of the event.
	       indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
	       timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

	       returns nSpikes
	'''
    spikeNumber1 = spikesanalysis.count_spikes_in_range(
        spikeTimesFromEventOnsetTrials1, indexLimitsEachTrialTrials1,
        timeRange)  #array of the number of spikes in range for each trial
    '''
        ######################################################################################################################
        #THIS IS FOR ALL ONE FREQUENCY
        # -- Pick which trials you care about in counting spikes --
        spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1 for the frequency chosen
        spikeMeanInBin2 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse2 for the frequency chosen
        for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
            spikeMeanInBin1[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq1],0)) #The append 0 is just for the edge case that there are no trials to use with this frequency so np.mean does not give a nan.
            spikeMeanInBin2[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq2],0))
        ######################################################################################################################
        '''
    '''
	###################################################################################################################################################################################
	#####################################################THIS IS FOR THE RASTOR PLOT###################################################################################################
	    eventOnsetTimes: (np.array) the time of each instance of the event to lock to.
	    timeRange: (list or np.array) two-element array specifying time-range to extract around event.

	    spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
	    trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
	       The first spike index is 0.
	    indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
	       the range is from firstSpike to lastSpike+1 (like in python slices)
	    spikeIndices
	'''


	# -- Find the number of spikes in each bin --
	spikeNumberInBinPerTrial = np.empty([numberOfBins,numberOfTrials])
	for i,binRange in enumerate(binTimeRanges):
	    spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,binRange) #array of the number of spikes in range for each trial
	''' spikesanalysis.count_spikes_in_range
	    Count number of spikes on each trial in a given time range.

	       spikeTimesFromEventOnset: vector of spikes timestamps with respect
		 to the onset of the event.
	       indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
	       timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

	       returns nSpikes
	'''
        '''
	######################################################################################################################
	#THIS IS FOR ALL FREQUENCIES
	# -- Pick which trials you care about in counting spikes --
	spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1
    eventOnsetTimes=eventTimes[evID==1]

    eventOnsetTimes=eventOnsetTimes[:-1] #FIXME: Horrible fix

    tetrodeID = 3
    spike_filename=os.path.join(ephysRoot, ephysDir, 'Tetrode{0}.spikes'.format(tetrodeID))
    sp=loadopenephys.DataSpikes(spike_filename)
    spkTimeStamps=np.array(sp.timestamps)/SAMPLING_RATE
    (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange)

    sortedIndexForEachSpike = sortingInds[trialIndexForEachSpike]


    # -- Calculate tuning --
    responseRange = [0.010,0.020]
    nSpikes = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,responseRange)
    meanSpikesEachFrequency = np.empty(len(possibleFreq))

    # -- This part will be replace by something like behavioranalysis.find_trials_each_type --
    trialsEachFreq = []
    for indf,oneFreq in enumerate(possibleFreq):
        trialsEachFreq.append(np.flatnonzero(freqEachTrial==oneFreq))

    # -- Calculate average firing for each freq --
    for indf,oneFreq in enumerate(possibleFreq):
        try:
            meanSpikesEachFrequency[indf] = np.mean(nSpikes[trialsEachFreq[indf]])

        except IndexError:
            print 'F****d up on {0}, freq {1}'.format(ephysDir, oneFreq)
            pass
Example #9
0
        spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
    o    trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
           The first spike index is 0.
        indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
           the range is from firstSpike to lastSpike+1 (like in python slices)
        spikeIndices
    '''

    sortedIndexForEachSpike = sortingInds[
        trialIndexForEachSpike]  #Takes values of trialIndexForEachSpike and finds value of sortingInds at that index and makes array. This array gives an array with the sorted index of each trial for each spike

    # -- Calculate tuning --
    responseRange = [0.010, 0.020
                     ]  #range of time to count spikes in after event onset
    nSpikes = spikesanalysis.count_spikes_in_range(
        spikeTimesFromEventOnset, indexLimitsEachTrial,
        responseRange)  #array of the number of spikes in range for each trial
    '''Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''
    meanSpikesEachFrequency = np.empty(
        len(possibleFreq))  #make empty array of same size as possibleFreq

    # -- This part will be replace by something like behavioranalysis.find_trials_each_type --
    trialsEachFreq = []
def histPlot(subject,behavSession,tetrodeID,cluster):

    allCellsName = 'allcells_'+subject
    allcells = __import__(allCellsName)
    
    cellIndex = allcells.cellDB.findcell(subject,behavSession,tetrodeID,cluster)

    ephysSession = allcells.cellDB[cellIndex].ephysSession
    ephysRoot = ephysRootDir+subject+'/'

    # -- Load Behavior Data --
    behaviorFilename = loadbehavior.path_to_behavior_data(subject,experimenter,paradigm,behavSession)
    bdata = loadbehavior.BehaviorData(behaviorFilename)

    numberOfTrials = len(bdata['choice'])
    targetFreqs = bdata['targetFrequency']

    #This gives an array of all frequencies presented
    possibleFreq = np.unique(bdata['targetFrequency'])

    #This gives an array with true and indices where the mouse made a correct decision
    correct = bdata['outcome']==bdata.labels['outcome']['correct']
    #This gives an array with true and indices where the mouse made a incorrect decision
    incorrect = bdata['outcome']==bdata.labels['outcome']['error']

    #This gives an array with true at indices of trials that are correct and went right
    rightward = bdata['choice']==bdata.labels['choice']['right']
    correctRightward = correct*rightward

    #This gives an array with true at indices of trials that are correct and went left
    leftward = bdata['choice']==bdata.labels['choice']['left']
    correctLeftward = correct*leftward

    #This gives an array with true at indices of trials that are incorrect and went right
    incorrectRightward = incorrect*rightward

    #This gives an array with true at indices of trials that are incorrect and went left
    incorrectLeftward = incorrect*leftward


    ###############################################################################################################################################################################
    trialsToUse1 = correctLeftward #This is an array of 1's and 0's to decide which trials to count spikes in and compare to the other trials
    trialsToUse2 = correctRightward #This is an array of 1's and 0's to decide which trials to count spikes in and compare to the other trials
    ##################################################################################################################################################################################



    # -- Load event data and convert event timestamps to ms --
    ephysDir = os.path.join(ephysRoot, ephysSession)
    eventFilename=os.path.join(ephysDir, 'all_channels.events')
    events = loadopenephys.Events(eventFilename) # Load events data
    eventTimes=np.array(events.timestamps)/SAMPLING_RATE #get array of timestamps for each event and convert to seconds by dividing by sampling rate (Hz). matches with eventID and 
    multipleEventOnset=np.array(events.eventID)  #loads the onset times of all events (matches up with eventID to say if event 1 went on (1) or off (0)
    eventChannel = np.array(events.eventChannel) #loads the ID of the channel of the event. For example, 0 is sound event, 1 is trial event, 2 ...


    # -- Load Spike Data From Certain Cluster --
    spkData = ephyscore.CellData(allcells.cellDB[cellIndex])
    spkTimeStamps = spkData.spikes.timestamps



    # -- Only use event onset times of one event --
    oneEvent = eventChannel==eventID #This picks out which channel you care about if there is more that one event
    eventOnset = multipleEventOnset*oneEvent #This keeps the correct size of the array to match eventTimes and picks out the onset of the channel you want

    while (numberOfTrials < np.sum(eventOnset)):
            eventOnset = eventOnset[:-1]

    eventOnsetTimes = eventTimes[eventOnset == 1] #This gives only the times of the onset of the channel you want


   # -- This is for one Frequency --
    trialsOfFreq = targetFreqs==possibleFreq[Frequency] #array of booleans that is true if the frequency chosen was played in that trial
    trialsToUseWithFreq1 = trialsToUse1*trialsOfFreq  #array with 1 is this is a trial to use and of the frequency chosen and 0 if not
    trialsToUseWithFreq2 = trialsToUse2*trialsOfFreq  #array with 1 is this is a trial to use and of the frequency chosen and 0 if not

    eventOnsetTimesTrials1 = eventOnsetTimes[trialsToUseWithFreq1==1]
    eventOnsetTimesTrials2 = eventOnsetTimes[trialsToUseWithFreq2==1]



    # -- Convert spike data into np.array's --
    (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimes,timeRange)
    (spikeTimesFromEventOnsetTrials1,trialIndexForEachSpikeTrials1,indexLimitsEachTrialTrials1) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimesTrials1,timeRange)
    (spikeTimesFromEventOnsetTrials2,trialIndexForEachSpikeTrials2,indexLimitsEachTrialTrials2) = spikesanalysis.eventlocked_spiketimes(spkTimeStamps,eventOnsetTimesTrials2,timeRange)
    ''' spikesanalysis.eventlocked_spiketimes
        Create a vector with the spike timestamps w.r.t. events onset.

        (spikeTimesFromEventOnset,trialIndexForEachSpike,indexLimitsEachTrial) = 
            eventlocked_spiketimes(timeStamps,eventOnsetTimes,timeRange)

        timeStamps: (np.array) the time of each spike.
        eventOnsetTimes: (np.array) the time of each instance of the event to lock to.
        timeRange: (list or np.array) two-element array specifying time-range to extract around event.

        spikeTimesFromEventOnset: 1D array with time of spikes locked to event.
        trialIndexForEachSpike: 1D array with the trial corresponding to each spike.
           The first spike index is 0.
        indexLimitsEachTrial: [2,nTrials] range of spikes for each trial. Note that
           the range is from firstSpike to lastSpike+1 (like in python slices)
        spikeIndices
    '''


    # -- Find the number of spikes in each bin --
    spikeNumberInBinPerTrial = np.empty([numberOfBins,numberOfTrials])
    for i,binRange in enumerate(binTimeRanges):
        spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,binRange) #array of the number of spikes in range for each trial
    ''' spikesanalysis.count_spikes_in_range
        Count number of spikes on each trial in a given time range.

           spikeTimesFromEventOnset: vector of spikes timestamps with respect
             to the onset of the event.
           indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
           timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

           returns nSpikes
    '''
    '''
    ######################################################################################################################
    #THIS IS FOR ALL FREQUENCIES
    # -- Pick which trials you care about in counting spikes --
    spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1
    spikeMeanInBin2 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse2
    for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
        spikeMeanInBin1[indBin] = np.mean(np.append(spikeCounts[trialsToUse1==1],0))  #The append 0 is just for the edge case that there are no trials to use so np.mean does not give a nan.
        spikeMeanInBin2[indBin] = np.mean(np.append(spikeCounts[trialsToUse2==1],0))
    ######################################################################################################################
    '''



    ######################################################################################################################
    #THIS IS FOR ALL ONE FREQUENCY
    # -- Pick which trials you care about in counting spikes --
    spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1 for the frequency chosen
    spikeMeanInBin2 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse2 for the frequency chosen
    for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
        spikeMeanInBin1[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq1==1],0)) #The append 0 is just for the edge case that there are no trials to use with this frequency so np.mean does not give a nan.
        spikeMeanInBin2[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq2==1],0))
    ######################################################################################################################




    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE RASTOR PLOT###################################################################################################
    ###################################################################################################################################################################################

    targetFreqsTrials1 = targetFreqs[trialsToUseWithFreq1==1]
    targetFreqsTrials2 = targetFreqs[trialsToUseWithFreq2==1]


    sortedTrials1 = [] #array that sorts trials for different frequencies
    for indf1,oneFreq1 in enumerate(possibleFreq): #indf is index of this freq and oneFreq is the frequency
        indsThisFreq1 = np.flatnonzero(targetFreqsTrials1==oneFreq1) #this gives indices of this frequency
        sortedTrials1.append(indsThisFreq1) #adds all indices to a list called sortedTrials


    sortedTrials2 = [] #array that sorts trials for different frequencies
    for indf2,oneFreq2 in enumerate(possibleFreq): #indf is index of this freq and oneFreq is the frequency
        indsThisFreq2 = np.flatnonzero(targetFreqsTrials2==oneFreq2) #this gives indices of this frequency
        sortedTrials2.append(indsThisFreq2) #adds all indices to a list called sortedTrials


    colorEachFreq = []
    strPossibleFreq = []
    for indFreq, Freq in enumerate(possibleFreq):
        colorEachFreq.append('b')
        colorEachFreq.append('g')
        strPossibleFreq.append(str(Freq))

    clf()
    rastorFreq1 = plt.subplot2grid((3,4), (0, 0), colspan=2, rowspan=2)
    extraplots.raster_plot(spikeTimesFromEventOnsetTrials1, indexLimitsEachTrialTrials1, timeRange, sortedTrials1, labels=strPossibleFreq,colorEachCond = colorEachFreq)

    rastorFreq2 = plt.subplot2grid((3,4), (0, 2), colspan=2, rowspan=2)
    extraplots.raster_plot(spikeTimesFromEventOnsetTrials2, indexLimitsEachTrialTrials2, timeRange, sortedTrials2, labels=strPossibleFreq,colorEachCond = colorEachFreq)



    ###################################################################################################################################################################################
    #####################################################THIS IS FOR THE HISTOGRAM#####################################################################################################
    ###################################################################################################################################################################################

    histogram3 = plt.subplot2grid((3,4), (2, 0), colspan=2)
    bar(xCoordinatesPlot,spikeMeanInBin1, width=binTime)
    ylabel('trialsToUse1, Average number of spikes in bin size {} sec'.format(binTime))
    xlabel(timeLabeling)

    histogram4 = plt.subplot2grid((3,4), (2, 2), colspan=2)
    bar(xCoordinatesPlot,spikeMeanInBin2, width=binTime)
    ylabel('trialsToUse2, Average number of spikes in bin size {} sec'.format(binTime))
    xlabel(timeLabeling)

    show()

    #clusterName = 'cluster'+str(oneCell.cluster)
    #tetrodeName = 'TT'+str(tetrodeID)
    tetrodeClusterName = 'T'+str(tetrodeID)+'c'+str(cluster)
    plt.gcf().set_size_inches((8.5,11))
    figformat = 'png' #'png' #'pdf' #'svg'
    filename = 'psy_%s_%s_%s_%s.%s'%(subject,behavSession,tetrodeClusterName,nametrialsToUse,figformat)
    fulloutputDir = outputDir+subject+'/'+'cluster_oneFreq'+'/'
    fullFileName = os.path.join(fulloutputDir,filename)
	# -- Find the number of spikes in each bin --
	spikeNumberInBinPerTrial = np.empty([numberOfBins,numberOfTrials])
	for i,binRange in enumerate(binTimeRanges):
	    spikeNumberInBinPerTrial[i] = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnset,indexLimitsEachTrial,binRange) #array of the number of spikes in range for each trial
        '''
	''' spikesanalysis.count_spikes_in_range
	    Count number of spikes on each trial in a given time range.

	       spikeTimesFromEventOnset: vector of spikes timestamps with respect
		 to the onset of the event.
	       indexLimitsEachTrial: each column contains [firstInd,lastInd+1] of the spikes on a trial.
	       timeRange: time range to evaluate. Spike times exactly at the limits are not counted.

	       returns nSpikes
	'''
        spikeNumber1 = spikesanalysis.count_spikes_in_range(spikeTimesFromEventOnsetTrials1,indexLimitsEachTrialTrials1,timeRange) #array of the number of spikes in range for each trial


        '''
        ######################################################################################################################
        #THIS IS FOR ALL ONE FREQUENCY
        # -- Pick which trials you care about in counting spikes --
        spikeMeanInBin1 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse1 for the frequency chosen
        spikeMeanInBin2 = np.empty(numberOfBins) #This will hold the mean number of spikes in each bin or time range for the trialsToUse2 for the frequency chosen
        for indBin, spikeCounts in enumerate(spikeNumberInBinPerTrial):
            spikeMeanInBin1[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq1],0)) #The append 0 is just for the edge case that there are no trials to use with this frequency so np.mean does not give a nan.
            spikeMeanInBin2[indBin] = np.mean(np.append(spikeCounts[trialsToUseWithFreq2],0))
        ######################################################################################################################
        '''