def classify_epoch(epoch,rate): """ This function returns a sleep stage classification (integers: 1 for NREM stage 1, 2 for NREM stage 2, and 3 for NREM stage 3/4) given an epoch of EEG and a sampling rate. """ Pxx, freqs = m.psd(epoch,NFFT=256,Fs=rate) nPxx = Pxx/float(sum(Pxx)) delta_f = plt.find((0<freqs) & (freqs <=3)) # delta_power = sum(Pxx[delta_f]) ndelta_power = sum(nPxx[delta_f]) spindles_f = plt.find((11 <= freqs) & (freqs <= 15)) # spindle_power = sum(Pxx[spindles_f]) nspindle_power = sum(nPxx[spindles_f]) # ratio = spindle_power/delta_power if (ndelta_power > 0.8): #suggests stage 3 stage = 3 else: if (nspindle_power > 0.03): #suggests stage 2 stage = 2 else: stage = 1 return stage
def compute_firing_rate_per_motion( direction_of_motions, spk_times, trials, time_bin ): avg_firing_rate = np.zeros( len( direction_of_motions ) ) for d in range( len( direction_of_motions ) ): # extract the trial times for each direction time_indx = plt.find( direction_of_motions[d] == trials[:,0] ) times_motion_started = trials[ time_indx, 1 ] total_spikes_per_trial = 0 # for each time of the trial where the movement started, # count the number of spikes that occurred in the time window for t in range( len(times_motion_started) ): spike_indx = plt.find( (spk_times >= times_motion_started[t] - time_bin) & (spk_times <= times_motion_started[t] + time_bin) ) total_spikes_per_trial += len( spike_indx ) # compute the average firing rate avg_rate = ( (1.0*total_spikes_per_trial) / (1.0*len(times_motion_started))) / time_bin avg_rate /= 2 # append the firing rate to avg_firing_rate[d] = avg_rate #append the direction d and the firing rate to the initially empty array dir_rates = np.column_stack( (direction_of_motions, avg_firing_rate) ) return dir_rates
def get_features(data_epoch,rate): pxxs = [] for eeg_epoch in data_epoch: pxx, freqs = m.psd(eeg_epoch, Fs=rate) pxxs.append(sum(pxx[plt.find((10 <= freqs) & (freqs <= 15))])) pxxs.append(max(pxx[plt.find((10 <= freqs) & (freqs <= 15))])) pxxs.append(np.mean(pxx[plt.find((10 <= freqs) & (freqs <= 15))])) return pxxs
def detector_tester(APTimes, actualTimes): """ returns percentTrueSpikes (% correct detected) and falseSpikeRate (extra APs per second of data) compares actual spikes times with detected spike times This only works if we give you the answers! """ JITTER = 0.0025 #2 ms of jitter allowed #first match the two sets of spike times. Anything within JITTER_MS #is considered a match (but only one per time frame!) #order the lists detected = np.sort(APTimes) actual = np.sort(actualTimes) #remove spikes with the same times (these are false APs) temp = np.append(detected, -1) detected = detected[plt.find(plt.diff(temp) != 0)] #find matching action potentials and mark as matched (trueDetects) trueDetects = []; for sp in actual: z = plt.find((detected >= sp-JITTER) & (detected <= sp+JITTER)) if len(z)>0: for i in z: zz = plt.find(trueDetects == detected[i]) if len(zz) == 0: trueDetects = np.append(trueDetects, detected[i]) break; percentTrueSpikes = 100.0*len(trueDetects)/len(actualTimes) #everything else is a false alarm totalTime = (actual[len(actual)-1]-actual[0]) falseSpikeRate = (len(APTimes) - len(actualTimes))/totalTime # Added this for auto-evaluation based on criteria pct_spike_eval = "PASS" if percentTrueSpikes > 90.0 else "FAIL" false_spike_eval = "PASS" if falseSpikeRate < 2.5 else "FAIL" overall_result = "FAIL" if pct_spike_eval == "FAIL" or false_spike_eval == "FAIL" else "PASS" print 'Action Potential Detector Performance performance: ' print ' Correct number of action potentials = %d' % len(actualTimes) print ' %s: Percent True Spikes = %f' % (pct_spike_eval, percentTrueSpikes) print ' %s: False Spike Rate = %f spikes/s' % (false_spike_eval, falseSpikeRate) print '' print 'Overall Evaluation: %s' % overall_result print '' return {'Percent True Spikes':percentTrueSpikes, 'False Spike Rate':falseSpikeRate}
def plot_waveforms(time, voltage, APTimes, titlestr): """ plot_waveforms takes four arguments - the recording time array, the voltage array, the time of the detected action potentials, and the title of your plot. The function creates a labeled plot showing the waveforms for each detected action potential """ # calculate sample rate (frames per ms) sample_rate = float(len(time) - 1) / (1000 * (time[-1] - time[0])) plt.figure() # cycle for each detected spike for i in range(0, len(APTime)): # find the index for the i-th detected spike time in the time, # using plt.find idx_spike = plt.find(time == APTime[i]) #please note that the use of "==" as an condition could be dangerous, #since it requires that the the detectedSpikeTimesMS[i] to be exact the same with one element in the timesMS, """ #to avoild this, we can instead use idx_spike = min(plt.find(abs(time - APTime[i]) < 0.00001)) #to allow 0.01ms jittering #I use min() here in case it returns several indices # find the start and end index for plotting idx_start = idx_spike - int(3 * sample_rate) idx_end = idx_spike + int(3 * sample_rate) #remeber that sample_rate may not be an interger, #so we have to use int() to convert it into an interger #note if the index could potentially be negative or bigger #than the length of the array at this point, an error will occur #we have to make sure that this does not happen if (idx_start >= 0) & (idx_end < len(time) - 1): # plot the waveform for the i-th spike xx = np.linspace(-3, 3, sample_rate * 6) yy = voltage[range(idx_start, idx_end)] plt.plot( xx, yy, 'b', hold=True, ) # add axis labels and title plt.xlabel('Time (ms)') plt.ylabel('Voltage (uV)') plt.title(titlestr) plt.show()
def gen_plot_inds(lims,inds_cc=None,exclude_conns=True): """ determine which connections should be plot in each ASC class. """ plots = list(lims.keys())+ ['other'] plots.remove('covs') # check sparseness for image-based (seed region analysis) if scipy.sparse.issparse(lims[plots[0]]['pctls']): voxels = lims[plots[0]]['pctls'][0,1:].toarray() if not inds_cc: inds_cc=np.arange(voxels.shape[-1]).astype(int) inds_plots={} notin = inds_cc.copy() for plot in plots[:3]: inds_plots[plot]=np.intersect1d(inds_cc,pl.find(lims[plot]['pctls'][0,1:].toarray())) notin = np.setdiff1d(notin,inds_plots[plot]) inds_plots['other'] = notin # exclude from additive if in common or uncorrelated if exclude_conns: inds_plots['common']=np.setdiff1d(inds_plots['common'],inds_plots['uncorrelated']) inds_plots['additive']=np.setdiff1d(inds_plots['additive'],inds_plots['common']) inds_plots['additive']=np.setdiff1d(inds_plots['additive'],inds_plots['uncorrelated']) else: # check if correlation matrix is provided if not inds_cc: inds_cc=np.triu_indices(n_nodes,1) indices=np.triu_indices(n_nodes,1) notin=inds_cc inds_plots={} for plot in plots[:3]: inds_plots[plot]=np.intersect1d(inds_cc,pl.find(fa(lims[plot]['pctls']))) notin = np.setdiff1d(notin,pl.find(fa(lims[plot]['pctls']))) inds_plots['other'] = notin # exclude from additive if in common or uncorrelated if exclude_conns: inds_plots['common']=np.setdiff1d(inds_plots['common'],inds_plots['uncorrelated']) inds_plots['additive']=np.setdiff1d(inds_plots['additive'],inds_plots['common']) inds_plots['additive']=np.setdiff1d(inds_plots['additive'],inds_plots['uncorrelated']) #self.inds_plots=inds_plots lims['covs']['inds_plots']=inds_plots return(lims)
def scoresStats(scoresList): scoresList = array(scoresList) nbPlayers = scoresList.shape[1] print "Player | wins loses (ties) meanWinMargin meanLossMargin" for player in xrange(nbPlayers): otherPlayer = (player+1) % nbPlayers winning = find(scoresList[:,player] > scoresList[:,otherPlayer]) losing = find(scoresList[:,player] < scoresList[:,otherPlayer]) wins,loses = len(winning),len(losing) ties = scoresList.shape[0]-wins-loses meanWinningMargin = mean(sum(scoresList[winning],1)) meanLosingMargin = mean(sum(scoresList[losing],1)) print " %c | %d %d (%d) +%f -%f" % \ (chr(65+player), wins, loses, ties, \ meanWinningMargin, meanLosingMargin)
def plot_waveforms(time,voltage,APTimes,titlestr): """ plot_waveforms takes four arguments - the recording time array, the voltage array, the time of the detected action potentials, and the title of your plot. The function creates a labeled plot showing the waveforms for each detected action potential """ plt.figure() ## Your Code Here for n in range(0,APTimes.size): ind = time[(time>(APTimes[n] - 0.003)) & (time<(APTimes[n] + 0.003))] sp = np.zeros(ind.size) for i in range(0,ind.size): sp[i] = plt.find(time == ind[i]) sp=sp.astype(np.int64) x = np.linspace(-3.0e-3, 3.0e-3, sp.size) plt.plot(x,voltage[sp]) plt.hold(True) plt.xlabel('Time (s)') plt.ylabel('Voltages (s)') plt.title(titlestr) plt.show()
def detector_tester(APTimes, actualTimes): """ returns percentTrueSpikes (% correct detected) and falseSpikeRate (extra APs per second of data) compares actual spikes times with detected spike times This only works if we give you the answers! """ JITTER = 0.025 #2 ms of jitter allowed #first match the two sets of spike times. Anything within JITTER_MS #is considered a match (but only one per time frame!) #order the lists detected = np.sort(APTimes) actual = np.sort(actualTimes) #remove spikes with the same times (these are false APs) temp = np.append(detected, -1) detected = detected[plt.find(plt.diff(temp) != 0)] #find matching action potentials and mark as matched (trueDetects) trueDetects = [] for sp in actual: z = plt.find((detected >= sp - JITTER) & (detected <= sp + JITTER)) if len(z) > 0: for i in z: zz = plt.find(trueDetects == detected[i]) if len(zz) == 0: trueDetects = np.append(trueDetects, detected[i]) break percentTrueSpikes = 100.0 * len(trueDetects) / len(actualTimes) #everything else is a false alarm totalTime = (actual[len(actual) - 1] - actual[0]) falseSpikeRate = (len(APTimes) - len(actualTimes)) / totalTime print 'Action Potential Detector Performance performance: ' print ' Correct number of action potentials = ' + str(len(actualTimes)) print ' Percent True Spikes = ' + str(percentTrueSpikes) print ' False Spike Rate = ' + str(falseSpikeRate) + ' spikes/s' print return { 'Percent True Spikes': percentTrueSpikes, 'False Spike Rate': falseSpikeRate }
def time_spent_in_stages(stages): i=0 timespent_array=[] for i in range(8): indices=plt.find(stages==i) j=len(indices)*30 timespent_array.append(j) return timespent_array
def getLineSeed(binary, scale, bottom, top): """ Function that find "line seeds" inside a binarized image. The line seeds are multiple boxes that surround text in a picture. -------------------------------------- @args: - binary: 2D array Represent the image from which line seed will be extracted - scale: double Represent the size of the average object - bottom: 2D array Represent the bottom boundary of line seeds - top: 2D array Represent the top boundary of line seeds -------------------------------------- The line seeds act as a "mask" that isolate lines from each other to allow segmentation """ # Use scale as a int vrange = int(scale) threshold = 0.1 # Find the bottom boundary bmarked = ndi.maximum_filter( bottom == ndi.maximum_filter(bottom, (vrange, 0)), (2, 2)) bmarked = bmarked * (bottom > threshold * np.amax(bottom) * threshold) # Find the top boundary tmarked = ndi.maximum_filter(top == ndi.maximum_filter(top, (vrange, 0)), (2, 2)) tmarked = tmarked * (top > threshold * np.amax(top) * threshold / 2) tmarked = ndi.maximum_filter(tmarked, (1, 20)) # Create seeds seeds = np.zeros(binary.shape, 'i') line_spacing = vrange / 2 for x in range(bmarked.shape[1]): transitions = sorted([(y, 1) for y in pylab.find(bmarked[ : , x])] \ + [(y, 0) for y in pylab.find(tmarked[ : , x])])[:: -1] transitions += [(0, 0)] for i in range(len(transitions) - 1): y0, s0 = transitions[i] if s0 == 0: continue seeds[y0 - line_spacing:y0, x] = 1 y1, s1 = transitions[i + 1] if s1 == 0 and (y0 - y1) < 5 * scale: seeds[y1:y0, x] = 1 return ndi.maximum_filter(seeds, (1, vrange))
def plot_waveforms(time,voltage,APTimes,titlestr): """ plot_waveforms takes four arguments - the recording time array, the voltage array, the time of the detected action potentials, and the title of your plot. The function creates a labeled plot showing the waveforms for each detected action potential """ # calculate sample rate (frames per ms) sample_rate = float(len(time)-1)/(1000*(time[-1]-time[0])) plt.figure() # cycle for each detected spike for i in range(0, len(APTime)): # find the index for the i-th detected spike time in the time, # using plt.find idx_spike = plt.find(time == APTime[i]) #please note that the use of "==" as an condition could be dangerous, #since it requires that the the detectedSpikeTimesMS[i] to be exact the same with one element in the timesMS, """ #to avoild this, we can instead use idx_spike = min(plt.find(abs(time - APTime[i])<0.00001)) #to allow 0.01ms jittering #I use min() here in case it returns several indices # find the start and end index for plotting idx_start = idx_spike - int(3*sample_rate) idx_end = idx_spike + int(3*sample_rate) #remeber that sample_rate may not be an interger, #so we have to use int() to convert it into an interger #note if the index could potentially be negative or bigger #than the length of the array at this point, an error will occur #we have to make sure that this does not happen if (idx_start>=0) & (idx_end < len(time)-1): # plot the waveform for the i-th spike xx = np.linspace(-3,3,sample_rate*6) yy = voltage[range(idx_start, idx_end)] plt.plot(xx, yy, 'b',hold=True,) # add axis labels and title plt.xlabel('Time (ms)') plt.ylabel('Voltage (uV)') plt.title(titlestr) plt.show()
def analys_trans(fname, maxSr=15, srHstBins=10): srHstBinSize = maxSr/srHstBins (time, sr) = np.load(fname) # sr = gs_filter(sr, 5) srHst = np.histogram(sr, bins=srHstBins, range=(0, maxSr))[0] srMidpntIdx = len(srHst)/2 firstMaxIdx = np.argmax(srHst[:srMidpntIdx]) secondMaxIdx = srMidpntIdx + np.argmax(srHst[srMidpntIdx:]) srMidpnt = np.argmin(srHst[firstMaxIdx:secondMaxIdx])*srHstBinSize # srMidpnt = (firstMaxIdx + secondMaxIdx)/2 print(srMidpnt) srMidpnt = 50. thr = np.array(sr >= srMidpnt, dtype='int32') df = np.diff(thr) indices_up = pl.find(df == 1) indices_down = pl.find(df == -1) TimesUp = [] ActMeanUp = [] TimesDown = [] ActMeanDown = [] Periods = [] # all time in sec if sr[0] > srMidpnt: for up, down in zip(indices_up, indices_down): TimesDown.append(up - down) ActMeanDown.append(np.mean(sr[down:up])) for up, down in zip(indices_up, indices_down[1:]): TimesUp.append(down - up) ActMeanUp.append(np.mean(sr[up:down])) Periods = np.diff(indices_down) elif sr[0] <= srMidpnt: for up, down in zip(indices_up, indices_down): TimesUp.append(down - up) ActMeanUp.append(np.mean(sr[up:down])) for up, down in zip(indices_up[1:], indices_down): TimesDown.append(up - down) ActMeanDown.append(np.mean(sr[down:up])) Periods = np.diff(indices_up) else: raise Exception("Unexpected type of sr") return np.array(Periods), np.array(TimesDown), np.array(TimesUp), np.array(ActMeanDown), np.array(ActMeanUp)
def detector_tester(APTimes, actualTimes): """ returns percentTrueSpikes (% correct detected) and falseSpikeRate (extra APs per second of data) compares actual spikes times with detected spike times This only works if we give you the answers! """ #print APTimes #print actualTimes JITTER = 0.0025 #2 ms of jitter allowed #first match the two sets of spike times. Anything within JITTER_MS #is considered a match (but only one per time frame!) #order the lists detected = np.sort(APTimes) actual = np.sort(actualTimes) #remove spikes with the same times (these are false APs) temp = np.append(detected, -1) detected = detected[plt.find(plt.diff(temp) != 0)] #find matching action potentials and mark as matched (trueDetects) trueDetects = []; for sp in actual: z = plt.find((detected >= sp-JITTER) & (detected <= sp+JITTER)) if len(z)>0: for i in z: zz = plt.find(trueDetects == detected[i]) if len(zz) == 0: trueDetects = np.append(trueDetects, detected[i]) break; percentTrueSpikes = 100.0*len(trueDetects)/len(actualTimes) #everything else is a false alarm totalTime = (actual[len(actual)-1]-actual[0]) falseSpikeRate = (len(APTimes) - len(actualTimes))/totalTime print 'Action Potential Detector Performance performance: ' print ' Correct number of action potentials = ' + str(len(actualTimes)) print ' Percent True Spikes = ' + str(percentTrueSpikes) print ' False Spike Rate = ' + str(falseSpikeRate) + ' spikes/s' print return {'Percent True Spikes':percentTrueSpikes, 'False Spike Rate':falseSpikeRate}
def GetSub(self, trackId, isnap=-1): ''' load a subhalo with the given trackId at snapshot isnap''' #subhalos=LoadSubhalos(isnap, rootdir) #return subhalos[subhalos['TrackId']==trackId] if self.nfiles: subid = find(self.LoadSubhalos(isnap, 'TrackId') == trackId)[0] else: subid = trackId return self.LoadSubhalos(isnap, subid)
def subject34_stage4_list(): """ list of indices for stage 4s """ stage4_indices = [[[], []], [[], []]] stages, stagelengths = pull_subject34_stages() for sidx in range(2): for bridx in range(2): stage4_indices[sidx][bridx] = plt.find(stages[sidx, bridx] == 4) gc.collect() return np.array(stage4_indices), stagelengths
def preferred_direction(fit_curve): """ The function takes a 2-dimensional array with the x-values of the fit curve in the first column and the y-values of the fit curve in the second. It returns the preferred direction of the neuron (in degrees). """ # find the motion that is associated with the highest firing rate prefered_value = max( fit_curve[:,1] ) preferred_direction = plt.find( fit_curve[:,1] == prefered_value ) return fit_curve[preferred_direction[0],0]
def calculateHalphaActivity(xx, yy): aha = 1.8565921 #correction coefficients bha = -0.0870908 #correction coefficients mask = np.where(np.logical_and(xx >= 6545., xx <= 6585.)) if len(mask[0]) == 0: print "Error: spectrum does not cover H-alpha range! " exit() x = xx[mask] y = yy[mask] hlc = max(pylab.find(x <= 6562.01)) hrc = min(pylab.find(x >= 6563.61)) hl1 = max(pylab.find(x <= 6545.495)) hl2 = min(pylab.find(x >= 6556.245)) hr1 = max(pylab.find(x <= 6575.934)) hr2 = min(pylab.find(x >= 6584.684)) ha_core = sum(y[hlc:hrc]) href1 = sum(y[hl1:hl2]) href2 = sum(y[hr1:hr2]) ha_rtest = ha_core / (href1 + href2) ha = ha_rtest * aha + bha return ha
def good_AP_finder2(time,voltage): """ This function takes the following input: time - vector where each element is a time in seconds voltage - vector where each element is a voltage at a different time We are assuming thaht he two vectors are in correspondance (meaning that at a given index, the time in one corresponds to the voltage in the other). The vectors must be the same size or the code won't run This function returns the following output: APTime - all the times where a spike (action potential) was detected """ #Let's make sure the input looks at least reasonable if (len(voltage) != len(time)): print "Can't run - the vectors aren't the same length!" APTime = [] return APTime # Pick a threshold. You can eyeball it by looking at the plot, or you can # write code to find it. Code would be better, but isn't 100% necessary. thrd = -80 # find all the indices whose corresponding voltage is lower than the threshold detectedAPIndex = plt.find(voltage < thrd) # note that now several neighboring indices could correspond to the same spike # we only want the first index for one spike # so we will throw away several frames following the first one # calculate difference of the picked neiboring indices diff_detectedAPIndex= plt.diff(detectedAPIndex) # if diff_detectedAPIndex>1, we know that it's a new spike # note that diff omits the first element, which is a spike, so I insert the first one detectedAPIndex_select = np.insert(diff_detectedAPIndex>1, 0, True) # detectedAPIndex_select is a boolean array with the same length of detectedAPIndex # we selecte the indices that correspond to the begginning frame of each spikes detectedAPIndex = detectedAPIndex[detectedAPIndex_select] # find the time im ms based on the indices APTime =list(time[i] for i in detectedAPIndex) return APTime
def plotIvsR(ix, iy, fx, fy, cx, cy): RF = sqrt((fx - cx)**2 + (fy - cy)**2) mD2 = [] mI2 = [] for i in range(len(RF)): r = 5 D = sqrt((ix - fx[i])**2 + (iy - fy[i])**2) I = find(D < r) for j in I: mD2.append(D[j]) mI2.append(i) #plot(mI, mD, 'ro', mI2, mD2, 'bo') plot(mI2, mD2, 'bo') xlabel('Index star number') ylabel('Match distance')
def roll_axes( direction_rates ): """ roll_axes takes the x-values (directions) and y-values (direction_rates) and return new x and y values that have been "rolled" to put the maximum direction_rate in the center of the curve. The first and last y-value in the returned list should be set to be the same. (See problem set directions) Hint: Use np.roll() """ bin_size = direction_rates[1,0] - direction_rates[0,0] new_xs = np.append(direction_rates[:,0], direction_rates[-1,0]) new_ys = np.append(direction_rates[:,1], direction_rates[-1,1]) roll_degrees = plt.find( max( new_ys ) == new_ys ) roll_degrees = roll_degrees[0] if( roll_degrees < 4 ): roll_degrees = np.abs(roll_degrees - 4 ) else: roll_degrees = 3 - roll_degrees new_xs = np.roll( new_xs, roll_degrees ) new_ys = np.roll( new_ys, roll_degrees ) new_ys[0] = new_ys[ -1 ] zero_indx = plt.find( new_xs == 0 ) zero_indx = zero_indx[0] for i in range( len(new_xs) ): if( i < zero_indx ): new_xs[ i ] = ( zero_indx - i )*bin_size*(-1) #debug_plot_rolled_data( new_xs, new_ys, bin_size ) return new_xs, new_ys, int( roll_degrees*bin_size);
def bin_spikes(trials, spk_times, time_bin = 0.08): """ bin_spikes takes the trials array (with directions and times) and the spk_times array with spike times and returns the average firing rate for each of the eight directions of motion, as calculated within a time_bin before and after the trial time (time_bin should be given in seconds). For example, time_bin = .1 will count the spikes from 100ms before to 100ms after the trial began. dir_rates should be an 8x2 array with the first column containing the directions (in degrees from 0-360) and the second column containing the average firing rate for each direction """ directions = np.unique(trials[:,0]) dir_rates = np.zeros( (8, 2) ) # intialize an 8x2 array of zeros for direction in directions: nmbr_trials = float(len(plt.find(trials[:,0]==direction))) trial_times = [ t[1] for t in trials if t[0] == direction ] nmbr_spks = 0.0 for time in trial_times: nmbr_spks += len(np.where(np.logical_and(spk_times>time-time_bin,spk_times<time+time_bin))[0]) firing_rate = nmbr_spks/nmbr_trials/(2*time_bin) dir_rates[plt.find(directions==direction)] = [direction, firing_rate] return dir_rates
def plotIvsR(ix, iy, fx, fy, cx, cy): RF = sqrt((fx - cx)**2 + (fy - cy)**2) mD2 = [] mI2 = [] for i in range(len(RF)): r = 5 D = sqrt((ix - fx[i])**2 + (iy - fy[i])**2) I = find( D < r ) for j in I: mD2.append(D[j]) mI2.append(i) #plot(mI, mD, 'ro', mI2, mD2, 'bo') plot(mI2, mD2, 'bo') xlabel('Index star number') ylabel('Match distance')
def good_AP_finder2(time, voltage): """ This function takes the following input: time - vector where each element is a time in seconds voltage - vector where each element is a voltage at a different time We are assuming thaht he two vectors are in correspondance (meaning that at a given index, the time in one corresponds to the voltage in the other). The vectors must be the same size or the code won't run This function returns the following output: APTime - all the times where a spike (action potential) was detected """ #Let's make sure the input looks at least reasonable if (len(voltage) != len(time)): print "Can't run - the vectors aren't the same length!" APTime = [] return APTime # Pick a threshold. You can eyeball it by looking at the plot, or you can # write code to find it. Code would be better, but isn't 100% necessary. thrd = -80 # find all the indices whose corresponding voltage is lower than the threshold detectedAPIndex = plt.find(voltage < thrd) # note that now several neighboring indices could correspond to the same spike # we only want the first index for one spike # so we will throw away several frames following the first one # calculate difference of the picked neiboring indices diff_detectedAPIndex = plt.diff(detectedAPIndex) # if diff_detectedAPIndex>1, we know that it's a new spike # note that diff omits the first element, which is a spike, so I insert the first one detectedAPIndex_select = np.insert(diff_detectedAPIndex > 1, 0, True) # detectedAPIndex_select is a boolean array with the same length of detectedAPIndex # we selecte the indices that correspond to the begginning frame of each spikes detectedAPIndex = detectedAPIndex[detectedAPIndex_select] # find the time im ms based on the indices APTime = list(time[i] for i in detectedAPIndex) return APTime
def bin_spikes(trials, spk_times, time_bin): """ bin_spikes takes the trials array (with directions and times) and the spk_times array with spike times and returns the average firing rate for each of the eight directions of motion, as calculated within a time_bin before and after the trial time (time_bin should be given in seconds). For example, time_bin = .1 will count the spikes from 100ms before to 100ms after the trial began. dir_rates should be an 8x2 array with the first column containing the directions (in degrees) and the second column containing the average firing rate for each direction """ #Set up your bin size start_bin = time_bin stop_bin = time_bin #Get just the trial times trial_times = trials[:, 1] #Initialize your array for each trial spikes_per_trial = np.zeros(len(trial_times)) #Count the number of spikes in the bin around each trial for t in np.arange(0, len(trial_times)): spikes_per_trial[t] = np.count_nonzero( np.logical_and(spk_times > trial_times[t] - start_bin, spk_times < trial_times[t] + stop_bin)) #Find the directions of motion used in the experiemnt directions = np.unique(trials[:, 0]) #Initialize the dir_rates output rates = np.zeros(len(directions)) for dirs in range(len(directions)): d = directions[dirs] indices = plt.find(trials[:, 0] == d) #This lets you group by direction rates[dirs] = sum(spikes_per_trial[indices]) / len( indices) #average over the trials rates = rates / (start_bin + stop_bin) #Convert to firing rate dir_rates = np.column_stack((directions, rates)) return dir_rates
def bin_spikes(trials, spk_times, time_bin): """ bin_spikes takes the trials array (with directions and times) and the spk_times array with spike times and returns the average firing rate for each of the eight directions of motion, as calculated within a time_bin before and after the trial time (time_bin should be given in seconds). For example, time_bin = .1 will count the spikes from 100ms before to 100ms after the trial began. dir_rates should be an 8x2 array with the first column containing the directions (in degrees) and the second column containing the average firing rate for each direction """ #Set up your bin size start_bin = time_bin stop_bin = time_bin #Get just the trial times trial_times = trials[:,1] #Initialize your array for each trial spikes_per_trial = np.zeros(len(trial_times)) #Count the number of spikes in the bin around each trial for t in np.arange(0,len(trial_times)): spikes_per_trial[t] = np.count_nonzero(np.logical_and(spk_times > trial_times[t] - start_bin, spk_times < trial_times[t] + stop_bin)) #Find the directions of motion used in the experiemnt directions = np.unique(trials[:,0]) #Initialize the dir_rates output rates = np.zeros(len(directions)) for dirs in range(len(directions)): d = directions[dirs] indices = plt.find(trials[:,0] == d) #This lets you group by direction rates[dirs] = sum(spikes_per_trial[indices])/len(indices) #average over the trials rates = rates/(start_bin + stop_bin) #Convert to firing rate dir_rates = np.column_stack((directions,rates)) return dir_rates
def plotDvsR(ix, iy, fx, fy, R): IR = argsort(R) mR = [] mD = [] mI = [] # Look at index stars in order of R. for ii in range(len(IR)): i = IR[ii] x = ix[i] y = iy[i] D = sqrt((fx - x)**2 + (fy - y)**2) # Grab field stars within a matching radius. r = 5 I = find( D < r ) for j in I: mR.append(R[i]) mD.append(D[j]) mI.append(ii) plot(mR, mD, 'ro') xlabel('Distance from quad center') ylabel('Match distance')
def plotDvsR(ix, iy, fx, fy, R): IR = argsort(R) mR = [] mD = [] mI = [] # Look at index stars in order of R. for ii in range(len(IR)): i = IR[ii] x = ix[i] y = iy[i] D = sqrt((fx - x)**2 + (fy - y)**2) # Grab field stars within a matching radius. r = 5 I = find(D < r) for j in I: mR.append(R[i]) mD.append(D[j]) mI.append(ii) plot(mR, mD, 'ro') xlabel('Distance from quad center') ylabel('Match distance')
def plot_waveforms(time,voltage,APTimes,titlestr): """ plot_waveforms takes four arguments - the recording time array, the voltage array, the time of the detected action potentials, and the title of your plot. The function creates a labeled plot showing the waveforms for each detected action potential """ plt.figure() # note the sampling rate: time[1] - time[0] = .000034375014s # which can serve as our x-axis increments, so our x-axis is # essentially an array from -.003 to .003, of len .006 / .000034375014 sampling_rate = .006 / (time[1] - time[0]) xaxis = np.linspace(-.003, .003, num=sampling_rate) xincrements = len(xaxis) for ap in APTimes: # yaxis is just the corresponding 6 ms from the voltage array (need to find the start and end index) peak_index = plt.find(time == ap)[0] starting_index = peak_index - xincrements/2 ending_index = peak_index + xincrements/2 yaxis = voltage[starting_index:ending_index] # hack to make sure arrays are same dimension # bug I ran into was when the recording is at the very end, and we # have less than 3ms of voltage data left to create a nice looking waveform. if len(xaxis) != len(yaxis): for x in range(0, (len(xaxis) - len(yaxis))): yaxis = np.insert(yaxis, -1, 0) plt.plot(xaxis, yaxis, 'b', hold=True) # add labels plt.xlabel("Time (s)") plt.ylabel("Voltage (uV)") plt.title(titlestr) plt.show()
dt1i = np.array([datetime.strftime(dt1[i],'%Y%m%d%H%M') for i in range(len(dt1)) ]).astype(int) #dados da planilha 1 - dd1 # 7 8 9 10 # hs, hmax, tp dp #define parametros hs, hmax, tp, dp = np.array(dd1[:,[7,8,9,10]].T).astype(float) dp = dp + dmag[ff-1] #corrige a declinacao magnetica para cada boia ws1, wd1, ws2, wd2 = np.array(dd0[:,[5,7,8,10]].T).astype(float) wd1 = wd1 + dmag[ff-1] #corrige a declinacao magnetica para cada boia wd2 = wd2 + dmag[ff-1] #corrige valores menores que zero dp[pl.find(dp<0)] = dp[pl.find(dp<0)] + 360 wd1[pl.find(wd1<0)] = wd1[pl.find(wd1<0)] + 360 wd2[pl.find(wd2<0)] = wd2[pl.find(wd2<0)] + 360 dp[pl.find(dp>360)] = dp[pl.find(dp>360)] - 360 wd1[pl.find(wd1>360)] = wd1[pl.find(wd1>360)] - 360 wd2[pl.find(wd2>360)] = wd2[pl.find(wd2>360)] - 360 #cria vetores de flags das series processadas (depende das qtdade de variaveis a serem consistidas + data) flagp = np.zeros((len(hs),4+1),dtype='|S32') flagp[:,0] = [datetime.strftime(dt1[i],'%Y%m%d%H%M') for i in range(len(dt1))] # ================================================================================== # # Testes de consistencia dos dados processados
def plot_waveforms(time,voltage,APTimes,titlestr): """ plot_waveforms takes four arguments - the recording time array, the voltage array, the time of the detected action potentials, and the title of your plot. The function creates a labeled plot showing the waveforms for each detected action potential """ plt.figure() ## Your Code Here # -------------------------------------------------------------- # from homework examples: # action potentials are caught in intervals of 3 miliseconds rate = 0.003 # compute the number of action potentials that are measured at each rate # this is given by the fraction between the rate and the delayed time between # each action potential timeStep = time[1]-time[0] measurements_per_rate = 2 * int( rate / timeStep ) # the x-axis varies between -3 ms to 3 ms and has steps of measurements_per_rate time_axis = np.linspace(-rate, rate, measurements_per_rate ) voltage_axis = np.zeros( measurements_per_rate ) for val in range(len(APTimes)): # find index of action potential (AP) in t data array indx = plt.find( time == APTimes[val]) # get data near the action potential if measurements_per_rate / 2 > indx: start_indx = 0 else: start_indx = indx - measurements_per_rate / 2 end_indx = indx + measurements_per_rate / 2 # create a range from the starting index to the ending index action_potentials = range(start_indx, end_indx) # if there are enough points to represent the number of measurements per rate if len( action_potentials ) == measurements_per_rate: # then, get the actural voltages from the action potentials identified voltage_axis = voltage[ action_potentials ] else: # otherwise, fill the array by adding points to fill up the measurement missing = measurements_per_rate - len( action_potentials ) # for poits very close to zero, fill them with zeros as well voltage_axis[ 0 : missing ] = 0 # for points not very close to zero, get the actual measurements from action potentials voltage_axis[ val:measurements_per_rate ] = voltage[ action_potentials[ 1 ] ] # plot the waveform with a blue color plt.plot(time_axis, voltage_axis, color='b', hold=True) plt.xlabel( 'Time (s)' ) # plot x-axis label plt.ylabel( 'Voltage (uV)' ) # plot y-axis label plt.title( titlestr ) # plot graph title plt.show( ) # show plot
fy = fxy.field(1) NF = len(fx) # The matched quad. mf = pyfits.open('ver/match.fits') mf = mf[1].data quad = mf.field('quadpix')[0] quad = quad[0:8].reshape(4, 2) qx = quad[:, 0] qy = quad[:, 1] # Quad center. cx = mean(qx) cy = mean(qy) # Grab index stars that are within the field. iok = find( (ix > min(fx)) * (ix < max(fx)) * (iy > min(fy)) * (iy < max(fy))) ix = [ix[i] for i in iok] iy = [iy[i] for i in iok] figure(1) clf() I = [0, 2, 1, 3, 0] plot( [cx], [cy], 'ro', qx[I], qy[I], 'r-', ix, iy,
def plot_conn_stats(AB_con, fig, flatten=True, errdist_perms=0, pctl=5, min_corr_diff=0, pcorrs=False, neg_norm=True, fdr_alpha=0.2, exclude_conns=True, savefig=None, ccstatsmat=None, inds_cc=None, vvstatsmat=None, refresh=False, nofig=False, rel=True): """ generates correlation and ASC stats for a pair of states. """ # generate basic correlation and variance stats if ccstatsmat is None: inds_cc = find( mne.stats.fdr_correction(fa( AB_con.get_corr_stats(pcorrs=pcorrs, rel=rel)[1]), alpha=fdr_alpha)[0]) ccstatsmat = -fa(AB_con.get_corr_stats(pcorrs=pcorrs, rel=rel)[0]) vvstatsmat = -AB_con.get_std_stats(pcorrs=pcorrs, rel=rel) # generate ASC limits lims = AB_con.get_ASC_lims(pcorrs=pcorrs, errdist_perms=errdist_perms, refresh=refresh, pctl=pctl) # gen correlation matrices for plotting Acorrs_mat = np.mean(AB_con.A.get_corrs(pcorrs=pcorrs), 0, keepdims=True) Acorrs = fa(Acorrs_mat) # flattening Bcorrs_mat = np.mean(AB_con.B.get_corrs(pcorrs=pcorrs), 0, keepdims=True) Bcorrs = fa(Bcorrs_mat) # flattening # min_corr_diff is the minimum change in correlation considered interesting (significant but tiny effects may not be interesting) if min_corr_diff != 0: inds_corr_diff = find(abs(Acorrs - Bcorrs) > min_corr_diff) inds_cc = np.intersect1d(inds_corr_diff, inds_cc) ################################## # set plot colours and other specs plot_colors = [(0.2, 0.6, 1), (0.62, 0.82, 0.98), (0.40, 0.95, 0.46), (0.6, 0.95, 0.6), (0.15, 0.87, 0.87), (0.8, 0.8, 0.8)] cdict1 = { 'red': ((0.0, 0.0, 0.0), (0.75, 0.5, 0.5), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 1.0), (0.25, 0.5, 0.5), (1.0, 0, 0)) } cdict2 = { 'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)) } blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1) plt.register_cmap(cmap=blue_red1) cmap2 = matplotlib.colors.ListedColormap(name='Test', colors=plot_colors) plt.register_cmap(cmap=cmap2) fontsize = 9 current_palette = [ (0.2980392156862745, 0.4470588235294118, 0.6901960784313725), (0.3333333333333333, 0.6588235294117647, 0.40784313725490196), (0.7686274509803922, 0.3058823529411765, 0.3215686274509804), (0.5058823529411764, 0.4470588235294118, 0.6980392156862745), (0.8, 0.7254901960784313, 0.4549019607843137), (0.39215686274509803, 0.7098039215686275, 0.803921568627451) ] sb_cols = current_palette + current_palette + current_palette # colours for variance stats vcols = [] # scaling stats for plotting colours for variance change vv_norm = vvstatsmat / 6 vv_norm[np.isnan(vv_norm)] = 0 n_nodes = AB_con.A.get_covs().shape[1] for a in np.arange(n_nodes): vcols.append(blue_red1((vv_norm[a] + 1) / 2)) vmax = 3 vmin = -3 # node info node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False) height = np.ones(n_nodes) * 4 dist_mat = node_angles[None, :] - node_angles[:, None] dist_mat[np.diag_indices(n_nodes)] = 1e9 node_width = np.min(np.abs(dist_mat)) node_edgecolor = 'black' group_cols = [] # colours for ROIs ROI_info = AB_con.A.ROI_info for a in ROI_info['ROI_RSNs']: val = 1 - 0.35 * a / max(ROI_info['ROI_RSNs']) group_cols.append((val * 0.8, val * 0.9, val)) plots = ['uncorrelated', 'common', 'additive', 'other'] titles = { 'uncorrelated': "Addition of uncorrelated signal", 'common': "Addition of common signal", 'additive': "Mixed additive signals", 'other': "Changes not explained \n by additive signal changes" } ################################# # plot data indices = np.triu_indices(n_nodes, 1) notin = inds_cc.copy() inds_plots = {} for plot in plots[:3]: if errdist_perms > 0: inds_plots[plot] = np.intersect1d(inds_cc, find(fa(lims[plot]['pctls']))) notin = np.setdiff1d(notin, find(fa(lims[plot]['pctls']))) minstr = 'min_pctls' maxstr = 'max_pctls' else: inds_plots[plot] = np.intersect1d( inds_cc, find(fa(lims[plot]['pctls_noerr']))) notin = np.setdiff1d(notin, find(fa(lims[plot]['pctls_noerr']))) minstr = 'min' maxstr = 'max' inds_plots['other'] = notin if exclude_conns: inds_plots['common'] = np.setdiff1d(inds_plots['common'], inds_plots['uncorrelated']) inds_plots['additive'] = np.setdiff1d(inds_plots['additive'], inds_plots['common']) inds_plots['additive'] = np.setdiff1d(inds_plots['additive'], inds_plots['uncorrelated']) plotccstats = ccstatsmat.astype(float) # flip color of changes to negative corrs (neg_norm option) if neg_norm == True: plotccstats = plotccstats * np.sign(Acorrs) cnt = -1 ################################# # produce the four plots for the four ASC classes if fig != None: fig.clf() for plot in plots: cnt += 1 ################################################ # mne plot function # TODO: test for mne / nofig pp = plot_connectivity_circle( plotccstats.flatten()[inds_plots[plot]], ROI_info['ROI_names'][0:n_nodes], (indices[0][inds_plots[plot]], indices[1][inds_plots[plot]]), fig=fig, colormap='BlueRed1', vmin=vmin, vmax=vmax, node_colors=vcols, subplot=241 + cnt, title=titles[plot], interactive=True, fontsize_names=fontsize, facecolor='w', colorbar=False, node_edgecolor=node_edgecolor, textcolor='black', padding=3, node_linewidth=0.5) # titles ax = plt.gca() ax.set_title(titles[plot], color='black') # color node faces bars = pp[1].bar(node_angles, height*2.2, width=node_width, bottom=10.4, \ edgecolor='0.9', lw=2, facecolor='.9', \ align='center',linewidth=1) for bar, color in zip(bars, group_cols): bar.set_facecolor(color) bar.set_edgecolor(color) # plot correlation info below circle plots if plot == 'other': plotrange = 'additive' else: plotrange = plot # sorting plotting only those indices requested sort_array = np.zeros((len(inds_plots[plot]), ), dtype=('f4,f4')) sort_array['f0'] = fa(lims[plotrange][minstr])[0, inds_plots[plot]] # sort_array['f1'] = fa(lims[plotrange][maxstr])[0, inds_plots[plot]] # ii = np.argsort(sort_array, order=['f0', 'f1']) # plot conn info if len(ii) > 0: # width of nodes width = np.max((20, len(ii) + 10)) # ii_ext ii_ext = np.r_[ii[0], ii, ii[-1]] # fbwx: midpoints for under plots fbwx = np.arange(len(ii_ext)) + (width - len(ii_ext)) / 2. fbwx[ 0] = fbwx[0] + 0.5 #=np.r_[fbwx[0]-0.5, fbwx,fbwx[-1]+0.5] fbwx[-1] = fbwx[-1] - 0.5 # axis settings ax = plt.subplot(245 + cnt, axisbg='white') ax.set_ylim([-1., 1]) ax.set_yticks([-1, 0, 1]) ax.set_yticks([-0.75, -.25, 0, 0.25, .5, .75, 1], minor=True) ax.yaxis.grid(color=[0.7, .95, .95], linestyle='-', linewidth=.5, which='minor') ax.yaxis.grid(color=[0.65, .85, .85], linestyle='-', linewidth=2, which='major') # first plot bands for ASC / uncorr / common (fill between) if len(fbwx) == 1: # if only one element plt.fill_between(np.r_[fbwx - 0.5, fbwx + 0.5], np.r_[fa(lims['additive'][minstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['additive'][minstr])[ 0, inds_plots[plot]][ii_ext]], np.r_[fa(lims['additive'][maxstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['additive'][maxstr])[ 0, inds_plots[plot]][ii_ext]], color='Grey', alpha=0.4) plt.fill_between(np.r_[fbwx - 0.5, fbwx + 0.5], np.r_[fa(lims['common'][minstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['common'][minstr])[ 0, inds_plots[plot]][ii_ext]], np.r_[fa(lims['common'][maxstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['common'][maxstr])[ 0, inds_plots[plot]][ii_ext]], color='Blue', alpha=0.4) plt.fill_between(np.r_[fbwx - 0.5, fbwx + 0.5], np.r_[fa(lims['uncorrelated'][minstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['uncorrelated'][minstr])[ 0, inds_plots[plot]][ii_ext]], np.r_[fa(lims['uncorrelated'][maxstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['uncorrelated'][maxstr])[ 0, inds_plots[plot]][ii_ext]], color='Green', alpha=0.6) else: # if multple elements plt.fill_between( fbwx, fa(lims['additive'][minstr])[0, inds_plots[plot]][ii_ext], fa(lims['additive'][maxstr])[0, inds_plots[plot]][ii_ext], color=[0.67, 0.76, 0.85]) plt.fill_between( fbwx, fa(lims['common'][minstr])[0, inds_plots[plot]][ii_ext], fa(lims['common'][maxstr])[0, inds_plots[plot]][ii_ext], color='Blue', alpha=0.4) plt.fill_between(fbwx, fa(lims['uncorrelated'][minstr])[ 0, inds_plots[plot]][ii_ext], fa(lims['uncorrelated'][maxstr])[ 0, inds_plots[plot]][ii_ext], color='Green', alpha=0.6) if neg_norm == True: iipospos = np.in1d( ii, find( abs(Acorrs[0, inds_plots[plot]]) > abs(Bcorrs[ 0, inds_plots[plot]]))) iinegpos = np.in1d( ii, find( abs(Acorrs[0, inds_plots[plot]]) < abs(Bcorrs[ 0, inds_plots[plot]]))) else: iipospos = np.in1d( ii, find(Acorrs[0, inds_plots[plot]] > Bcorrs[ 0, inds_plots[plot]])) iinegpos = np.in1d( ii, find(Acorrs[0, inds_plots[plot]] < Bcorrs[ 0, inds_plots[plot]])) iipos = ii[iipospos] iineg = ii[iinegpos] xes = np.arange(len(ii)) + (width - len(ii)) / 2. # now plot correlation in A and B conditions plt.plot(np.array([xes, xes])[:, find(iipospos)], [ Acorrs[0, inds_plots[plot][iipos]], Bcorrs[0, inds_plots[plot][iipos]] ], color=[0, 0, 1], alpha=1, linewidth=1.5, zorder=1) plt.plot(np.array([xes, xes])[:, find(iinegpos)], [ Acorrs[0, inds_plots[plot][iineg]], Bcorrs[0, inds_plots[plot][iineg]] ], color=[1, 0, 0], alpha=1, linewidth=1.5, zorder=1) plt.fill_between( fbwx, fa(lims['uncorrelated'][minstr])[0, inds_plots[plot]][ii_ext], fa(lims['uncorrelated'][maxstr])[0, inds_plots[plot]][ii_ext], color='Green', alpha=0.6) line3 = plt.Rectangle((0, 0), 0, 0, color=current_palette[0]) ax.add_patch(line3) ax.set_xticks([]) # plot points line2 = plt.scatter((xes)[find(iipospos)], Bcorrs[0, inds_plots[plot][iipos]].T, color='blue', zorder=2) line2 = plt.scatter((xes)[find(iinegpos)], Bcorrs[0, inds_plots[plot][iineg]].T, color='red', zorder=2) line2 = plt.scatter((xes)[find(iipospos)], Acorrs[0, inds_plots[plot][iipos]].T, color='white', zorder=2) line2 = plt.scatter((xes)[find(iinegpos)], Acorrs[0, inds_plots[plot][iineg]].T, color='white', zorder=2) # plot line between, colouring line two according to pos or neg change cmap = ListedColormap([(0.2980392156862745, 0.4470588235294118, 0.6901960784313725), (0.3333333333333333, 0.6588235294117647, 0.40784313725490196), (0.7686274509803922, 0.3058823529411765, 0.3215686274509804)]) norm = BoundaryNorm([-2, 0, 1, 2], cmap.N) z = np.zeros(xes.shape[0] + 1, ) # plot network membership above colorline(fbwx[:-1], z + 1.05, ROI_info['ROI_RSNs'][indices[0][np.r_[ inds_plots[plot], inds_plots[plot][-1]]]] - 1.5, cmap=cmap, norm=norm, linewidth=5) colorline(fbwx[:-1], z + 1.1, ROI_info['ROI_RSNs'][indices[1][np.r_[ inds_plots[plot], inds_plots[plot][-1]]]] - 1.5, cmap=cmap, norm=norm, linewidth=5) plt.show() if savefig != None: if nofig == 23: pp = PdfPages(fname) fig.tight_layout(h_pad=1, pad=4) pp.savefig(fig=fig) pp.close() else: fig.savefig(savefig) # add stats to AB_con struct AB_con.lims['covs']['inds_plots'] = inds_plots AB_con.lims['covs']['cc_stats'] = ccstatsmat AB_con.lims['covs']['vv_stats'] = vvstatsmat return (AB_con, inds_plots)
for File in Files: # klayer = 0 is surface, tstep=-99 reads in all time steps # Do u, v, eta sun = Spatial(File, klayer=2, tstep=-99, variable="eta") # , clim=clim) # sun = Spatial(File, klayer=0, tstep=-99, variable='eta')#, clim=clim) datesbay = sun.time # get dates available in file eta = sun.loadData() # Loop through times in bay file for i, date in enumerate(datesbay): # Find the same time from the shelf model output # Assumes that there is a time in the shelf model that perfectly aligns with the # times in the bay model at some point tindshelf = find(datesshelf == date) print date # pdb.set_trace() if not find(datesshelf == date): # if a shelf time doesn't align, don't use this bay model output continue # have to do u and v by time step for some reason u = Spatial(File, klayer=["surface"], tstep=i, variable="uc").loadData() v = Spatial(File, klayer=["surface"], tstep=i, variable="vc").loadData() # Interpolate model output from SUNTANS grid onto blended grid etabayblend = sun.interpolate(eta[i, :], gridblend.x_rho, gridblend.y_rho) ubayblend = sun.interpolate(u[:], gridblend.x_u, gridblend.y_u) vbayblend = sun.interpolate(v[:], gridblend.x_v, gridblend.y_v) # Rotate SUNTANS u,v velocities to be along/across
def addspikes(Y,lw=0.2,color='k'): ''' Add vertical lines where Y>0 ''' for t in find(Y>0): axvline(t,lw=lw,color=color)
from matplotlib.pylab import find NumChannels = 6 MaxCyclesOfSimulation = 1e5 SkipCycles = 300 CyclesResolution = 20 MinStimSyncPeriod = 60 if len(sys.argv) != 1 + NumChannels: sys.stderr.write('usage: %s ch0_period ch1_period ... ch%d_period\n' % (sys.argv[0], NumChannels-1)) sys.exit(1) periods = map(float, sys.argv[1:]) freqs = [1./period for period in periods] delta_t = np.array([int(random.expovariate(freq)) for freq in freqs], dtype=np.uint32) t = SkipCycles while True: min_t = delta_t.min() t += min_t if t > MaxCyclesOfSimulation: break delta_t -= min_t indices = find(delta_t == 0) flag = 0 for ch in indices: flag |= (1 << ch) if ch == 0: delta_t[ch] = max(MinStimSyncPeriod, random.normalvariate(periods[ch], 0.1*periods[ch])) else: delta_t[ch] = max(CyclesResolution, int(random.expovariate(freqs[ch]))) #sys.stderr.write("flag=%s timestamp=%u\n" % (bin(flag)[2:].rjust(8,'0'), t)) sys.stdout.write(struct.pack('>BL', flag, t))
isi = np.array(np.diff(get_timestamp(filename, channel)), dtype=np.float64) print(repr(('mean', isi.mean(), 'std', isi.std()))) plt.rc('text', usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']}) if plottype == 'time': for i in xrange(0, len(isi), chunksize): print('start @chunk %d' % i) window = isi[i:i+chunksize] plt.plot(1.e-6*window.cumsum(), window, 'k.') plt.ylabel(r'$\Delta t$ ($\mu$s)') plt.xlabel(r'$t$ (s)') dim = plt.axis() plt.axis(dim[:2] + (dim[2]-2, dim[3]+2)) ax = plt.gca() y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) ax.yaxis.set_major_formatter(y_formatter) plt.tight_layout() plt.show() elif plottype == 'hist': y, x, _ = plt.hist(isi, color='#888888', log=True) plt.ylabel(r'Number of occurrences') plt.xlabel(r'$\Delta t$ ($\mu$s)') ax = plt.gca() x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False) ax.xaxis.set_major_formatter(x_formatter) plt.xticks(np.round(x[pylab.find(y>0)])) plt.tight_layout() plt.show()
def detection_tester(peakTimes, refFilepath=None, delimiter=None, tolerance=0.35): """ Compares spike times from the spike_detector function to the actual spike times. The actual (validated) times should be listed in a reference text file located at refFilepath; set delimiter via the corresponding paramter, e.g. ',' for CSV. The function ouutputs and returns the percentage of true (verified) spikes that were detected along with the false spike rate (extra spikes per second of data). """ if refFilepath == None: print( "True spike times were not provided, so the spike detection perfromance cannot be evaluated" ) percentTrueSpikes, falseSpikeRate = "N/A", "N/A" return { 'percent_true_spikes': percentTrueSpikes, 'false_spike_rate': falseSpikeRate } else: # Create numpy array of the values in the reference csv file trueTimes = np.genfromtxt(refFilepath, delimiter=delimiter) # First match the two arrays of spike times. Anything within the given tolerance is a match. # Ensure times are in sequntial order peakTimes = np.sort(peakTimes) trueTimes = np.sort(trueTimes) # Remove spikes with the same times (false spikes) detected = np.append(peakTimes, -1) uniqueDetected = peakTimes[plt.find(plt.diff(detected) != 0)] # Find matching spikes and mark as true detections trueDetected = [] # Find indices of dedected spikes that are within the margin of tolerance around each true spike for spike in trueTimes: detectedWithinTol = plt.find((uniqueDetected >= spike - tolerance) & (uniqueDetected <= spike + tolerance)) # If detected spikes found... if len(detectedWithinTol) > 0: # ...for each one, check if already present in our list of true dectections, ... for i in detectedWithinTol: alreadyMarked = plt.find(trueDetected == uniqueDetected[i]) # ...and if not, append it to to that list if len(alreadyMarked) == 0: trueDetected = np.append(trueDetected, uniqueDetected[i]) percentTrueSpikes = 100.0 * len(trueDetected) / len(trueTimes) # Everything else is a false spike totalTime = (trueTimes[len(trueTimes) - 1] - trueTimes[0]) falseSpikeRate = (len(peakTimes) - len(trueTimes)) / totalTime print("\nSpike detector performance:") print("\n Number of spikes detected in test analysis =", len(peakTimes)) print(" Number of true spikes =", len(trueTimes)) print(" Percentage of true spikes detected =", percentTrueSpikes) print(" False spike rate = ", falseSpikeRate, "spikes/s") return { 'percent_true_spikes': percentTrueSpikes, 'false_spike_rate': falseSpikeRate }
def triu_all(x): """ upper triangular of first two dims. """ return x.flatten()[pl.find(np.triu(np.ones(x.shape),1))]
def __init__(self, img): """ get the grid from an opencv2 image object """ self.img = img sbd_params = cv2.SimpleBlobDetector_Params() for (k, v) in SBD_HOLES.iteritems(): setattr(sbd_params, k, v) sbd = cv2.SimpleBlobDetector(sbd_params) keypoints = sbd.detect(img) blobs = img.copy() for kp in keypoints: cv2.circle(blobs, tuple([int(coord) for coord in kp.pt]), 4, 255, 4) ###### Identifying the grid spacing ##### # What we do here is find the distances between all the detected holes in the image. The actual spacing between pins should be the minimum distance observed between holes. dists = np.zeros((len(keypoints),len(keypoints))) kps = [np.array(p.pt) for p in keypoints] for j, pj in enumerate(kps): for k, pk in enumerate(kps): dists[j,k] = np.linalg.norm(pj - pk) upper = np.triu(dists) flat = np.array([x for x in upper.reshape((-1,1)) if x > 1]) n_dist, bins = np.histogram(flat, range=(0,100), bins=400) min_spacing = bins[lab.find(np.diff(gaussian_filter(n_dist, 2)) < 0)[0]+1] cond = np.where((upper >= min_spacing - 1) * (upper <= min_spacing + 1), 1, 0) neighbors = [] for j, row in enumerate(cond): for k, el in enumerate(row): if el: # print j, k, keypoints[j].pt, keypoints[k].pt neighbors.append([j, k, keypoints[j].pt, keypoints[k].pt]) # Draw lines between grid neighbors lines = blobs.copy() for n in neighbors: cv2.line(lines, tuple(map(int, n[2])), tuple(map(int, n[3])), 255) # Identify the angle of the grid # We find the angle of the grid by looking at the orientation of the lines between adjacent holes in the grid angles = np.array([np.arctan2(n[3][1] - n[2][1], n[3][0] - n[2][0]) for n in neighbors]) angle_step = 0.5 n_ang, bins = np.histogram((angles % np.pi) * 180 / np.pi, range=(0,180), bins=180./angle_step) kernel = np.tile(np.hstack([lab.normpdf(np.array(range(int(45./angle_step))), 0, 10./angle_step), lab.normpdf(np.array(range(int(45./angle_step))), 45./angle_step, 10./angle_step)]), 2) angle_fits = np.zeros(int(45./angle_step)) for t in range(len(angle_fits)): angle_fits[t] = np.correlate(np.roll(kernel, t), n_ang) grid_angle = (np.argmax(angle_fits) * angle_step + angle_step/2) * np.pi/180 print grid_angle, grid_angle*180/np.pi # Translation of the grid # Now we have the spacing and orientation of the grid, so let's find its translation as well. First, let's undo the rotation: R = rotmat(-grid_angle) kps_rot = [np.array(R * np.reshape(k, (2,-1))) for k in kps] # Next, we can find the phase of the x and y position of the grid using mod x_mod = [k[0][0] % min_spacing for k in kps_rot] y_mod = [k[1][0] % min_spacing for k in kps_rot] n_x, bins = np.histogram(x_mod, range=(0,min_spacing), bins=(min_spacing)) n_y, bins = np.histogram(y_mod, range=(0,min_spacing), bins=(min_spacing)) grid_orig = np.reshape([np.argmax(gaussian_filter(n_x, 2))+0.5, np.argmax(gaussian_filter(n_y,2))+0.5], (2,-1)) self.grid_orig = grid_orig self.grid_angle = grid_angle self.grid_spacing = min_spacing
pathname = os.environ['HOME'] + '/Dropbox/tese/rot/out/' # 0 1 2 3 4 5 6 7 8 9 10 #date,ws,wg,wd,at,rh,pr,wt,hs,tp,dp re = np.loadtxt(pathname + 'argos_opendap_cq_recife.out',delimiter=',') # ac = np.loadtxt(pathname + 'siodoc_janis_arraial_cabo.out',delimiter=',') sa = np.loadtxt(pathname + 'argos_opendap_cq_santos.out',delimiter=',') fl = np.loadtxt(pathname + 'argos_opendap_cq_florianopolis.out',delimiter=',') rg = np.loadtxt(pathname + 'argos_opendap_cq_rio_grande.out',delimiter=',') #consistencia manual re[1995:6995,1:] = np.nan ; re[11411:15932,1:] = np.nan fl[2352:4118,1:] = np.nan #florianopolis #plotar apenas os dados consistentes re = re[pl.find(np.isnan(re[:,1])==False),:] sa = sa[pl.find(np.isnan(sa[:,1])==False),:] fl = fl[pl.find(np.isnan(fl[:,1])==False),:] rg = rg[pl.find(np.isnan(rg[:,1])==False),:] #triaxys rew = np.loadtxt(pathname + 'triaxys_8_recife.out',delimiter=',') saw = np.loadtxt(pathname + 'triaxys_8_santos.out',delimiter=',') flw = np.loadtxt(pathname + 'triaxys_8_florianopolis.out',delimiter=',') rgw = np.loadtxt(pathname + 'triaxys_8_rio_grande.out',delimiter=',') #plotar apenas os dados consistentes rew = rew[pl.find(np.isnan(rew[:,1])==False),:] saw = saw[pl.find(np.isnan(saw[:,1])==False),:] flw = flw[pl.find(np.isnan(flw[:,1])==False),:]
][2:-2] #hora = ['1400','1430','1500','1530','1600','1630','1700','1730','1800','1830','1900','1930','2000','2030','2100','2130','2200','2230','2300','2330','0000','0030','0100','0130','0200','0230','0300','0330','0400','0430','0500','0530','0600','0630','0700','0730','0800','0830','0900','0930','1000'][:-9]#41 #hora = ['0800','0845','0930','1015','1100','1145','1230','1315','1400','1445','1530','1615','1700','1745','1830','1915','2000','2045'][2:-3] #hora = ['0730','0800','0830','0900','0930','1000','1030','1100','1130','1200','1230','1300','1330','1400','1430','1500','1530','1600','1630','1700','1730','1800','1830','1900','1930','2000','2030','2100','2130'][1:-4] #hora = ['0730','0745','0800','0815','0830','0845','0900','0915','0930','0945','1000','1015','1030','1045','1100','1115','1130','1145','1200','1215','1230','1245','1300','1315','1330','1345','1400','1415','1430','1445','1500','1515','1530','1545','1600','1615','1630','1645','1700','1715','1730','1745','1800','1815'][4:-6] #hora = (len(dd)) dd1 = {} epr = {} evx = {} evy = {} evz = {} #separa os dados e calcula os espectros for i in range(len(a)): dd1[hora[i]] = dd.ix[pl.find(dd.ens == a[i]), :] epr[hora[i]] = espec.espec1(dd1[hora[i]].pr, nfft, fs) evx[hora[i]] = espec.espec1(dd1[hora[i]].vx, nfft, fs) evy[hora[i]] = espec.espec1(dd1[hora[i]].vy, nfft, fs) evz[hora[i]] = espec.espec1(dd1[hora[i]].vz, nfft, fs) pl.figure() cont = 0 for i in range(0, len(a), 1): cont += 1 pl.subplot(1, 4, 1) pl.plot(epr[hora[i]][:, 0], cont + epr[hora[i]][:, 1]) pl.xlim(0, 0.2), pl.ylim(0, 15) pl.xlabel('Frequency (Hz)') pl.ylabel('Spectral Density (m2/Hz')
def freqz_resp_list(b,a=np.array([1]),mode = 'dB',fs=1.0,Npts = 1024,fsize=(6,4)): """ A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) b = ndarray of numerator coefficients a = ndarray of denominator coefficents mode = display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz Npts = number of points to plot; default is 1024 fsize = figure size; defult is (6,4) inches Mark Wickert, January 2015 """ if type(b) == list: # We have a list of filters N_filt = len(b) f = np.arange(0,Npts)/(2.0*Npts) for n in range(N_filt): w,H = signal.freqz(b[n],a[n],2*np.pi*f) if n == 0: plt.figure(figsize=fsize) if mode.lower() == 'db': plt.plot(f*fs,20*np.log10(np.abs(H))) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.title('Frequency Response - Magnitude') elif mode.lower() == 'phase': plt.plot(f*fs,np.angle(H)) if n == N_filt-1: plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (rad)') plt.title('Frequency Response - Phase') elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'): """ Notes ----- Since this calculation involves finding the derivative of the phase response, care must be taken at phase wrapping points and when the phase jumps by +/-pi, which occurs when the amplitude response changes sign. Since the amplitude response is zero when the sign changes, the jumps do not alter the group delay results. """ theta = np.unwrap(np.angle(H)) # Since theta for an FIR filter is likely to have many pi phase # jumps too, we unwrap a second time 2*theta and divide by 2 theta2 = np.unwrap(2*theta)/2. theta_dif = np.diff(theta2) f_diff = np.diff(f) Tg = -np.diff(theta2)/np.diff(w) # For gain almost zero set groupdelay = 0 idx = pylab.find(20*np.log10(H[:-1]) < -400) Tg[idx] = np.zeros(len(idx)) max_Tg = np.max(Tg) #print(max_Tg) if mode.lower() == 'groupdelay_t': max_Tg /= fs plt.plot(f[:-1]*fs,Tg/fs) plt.ylim([0,1.2*max_Tg]) else: plt.plot(f[:-1]*fs,Tg) plt.ylim([0,1.2*max_Tg]) if n == N_filt-1: plt.xlabel('Frequency (Hz)') if mode.lower() == 'groupdelay_t': plt.ylabel('Group Delay (s)') else: plt.ylabel('Group Delay (samples)') plt.title('Frequency Response - Group Delay') else: s1 = 'Error, mode must be "dB", "phase, ' s2 = '"groupdelay_s", or "groupdelay_t"' print(s1 + s2)
def measureEquivalentWidths(xx, yy, inputlinelist='lines.rdb', output='ew_out.npz'): """ Measure pseudo equivalent widths in spectrum Parameters ---------- xx : wavelength data array (angstrom) yy : flux data array (normalized) inputlinelist : input file containing list of line ranges [x0,xf] output : output file name Returns ------- eqwidths: array of measured equivalent widths """ xinif, xendf = np.loadtxt(inputlinelist, unpack=True, delimiter='\t', usecols=(0, 1), dtype='float', skiprows=2) eqwidths = [] for i in range(len(xinif)): mask = np.where(np.logical_and(xx >= xinif[i], xx <= xendf[i])) maxlocf = mask[0][0] endmaxf = mask[0][-1] + 1 if (maxlocf - 2 < 0) or (endmaxf + 3 > len(xx)): print "WARNING 1: index out of range! Setting EQ=NaN" eqwidths.append('nan') continue maxloc_adjust = yy[maxlocf - 2:maxlocf + 3] endmax_adjust = yy[endmaxf - 2:endmaxf + 3] xmaxloc = xx[maxlocf - 2:maxlocf + 3] xendmax = xx[endmaxf - 2:endmaxf + 3] """ if 3760 < i < 3790: for j in range(len(xmaxloc)) : print i, j, xmaxloc[j], maxloc_adjust[j] for j in range(len(xendmax)) : print i, j, xendmax[j], endmax_adjust[j] """ locmaxmask = np.where(maxloc_adjust == max(maxloc_adjust)) ind_temp_max = locmaxmask[0][0] maxlocfs = int(maxlocf + ind_temp_max - 2) locendmask = np.where(endmax_adjust == max(endmax_adjust)) ind_temp_end = locendmask[0][0] endmaxfs = int(endmaxf + ind_temp_end - 2) if (maxlocfs < 0) or (maxlocfs > len(xx)) or (endmaxfs < 0) or ( endmaxfs > len(xx)): print "WARNING 2: index out of range! Setting EQ=NaN" eqwidths.append('nan') continue yinit = yy[maxlocfs] yendt = yy[endmaxfs] yit = yy[maxlocfs:endmaxfs + 1] # nao normalizado xint = xx[maxlocfs:endmaxfs + 1] tam = len(yit) if tam < 5: print "WARNING 3: index out of range! Setting EQ=NaN" eqwidths.append('nan') continue if any(yinit < yit[0:5]): minit = max(yit[0:5]) else: minit = yinit if any(yendt < yit[-5:-1]): mendt = max(yit[-5:-1]) else: mendt = yendt yintt = yy[maxlocfs:endmaxfs + 1] - max(minit, mendt) #normalizado yinttnew = yy[maxlocfs:endmaxfs + 1] indini = max(pylab.find(minit == yit)) indend = min(pylab.find(mendt == yit)) if (xint[indend] - xint[indini]) != 0.0: mt = (mendt - minit) / (xint[indend] - xint[indini]) yeqt = minit + mt * (xint - xint[indini]) - max( minit, mendt) # condicao fronteira para o integral normalizada yeqplott = minit + mt * (xint - xint[indini]) #### if yeqt < yintt eqw = 1000 * np.trapz(1 - yinttnew / yeqplott, dx=0.01) eqwidths.append(eqw) else: eqw = float('nan') eqwidths.append(eqw) #print i,"/",len(xinif), eqw eqwidths = np.array(eqwidths) np.save(output, eqwidths) return eqwidths
serie = [] for j in range(img.shape[1]): #cria primeiro ponto como referencia if j == 0: #matriz de cada derivada menos a media das deriadas totais a = np.diff(img[:,j] - np.nanmean(img)) #normalze vector #a = a / a.sum() #perfil da onda (pwav) e fundo (pbot) pwav.append(int(pl.find(a == np.nanmax(a))[0])) pbot.append(int(pl.find(a == np.nanmin(a))[0])) else: #matriz de cada derivada menos a media das deriadas totais a = np.diff(img[:,j] - np.nanmean(img)) b = np.flipud(a) #bottom #normalze vector #a = a / a.sum() #b = b / b.sum() #perfil da onda (pwav) e fundo (pbot) pbot.append(len(b) - pl.find(b < -blim)[0]) #faz a contagem do inverso pwav.append(pl.find(a > wlim)[0])
fs = 2 #freq de amostragem #numero de amostras a=np.unique(dd.ens)[:-2] dd1 = {} epr = {} evx = {} evy = {} evz = {} #separa os dados e calcula os espectros for v in range(len(a)): #varia as horas de cada dia dd1[str(v)] = dd.ix[pl.find(dd.ens == a[v]),:] nfft = int(dd1[str(v)].shape[0] / 2) epr[str(v)] = espec.espec1(dd1[str(v)].pr,nfft,fs) evx[str(v)] = espec.espec1(dd1[str(v)].vx,nfft,fs) evy[str(v)] = espec.espec1(dd1[str(v)].vy,nfft,fs) evz[str(v)] = espec.espec1(dd1[str(v)].vz,nfft,fs) pl.figure(figsize=(16,10)) cont = 0 for i in range(0,len(a),1): print(i) cont += 2 pl.subplot(1,4,1) pl.semilogy(epr[str(i)][:,0],cont+epr[str(i)][:,1]) pl.xlim(0,0.2), pl.ylim(0,45) pl.xlabel('Frequency (Hz)')
## Convert voltage to temperature ## Expect (20 mV/C) 400 mV = 0 C #raw[:,1] -= 0.4 #raw[:,1] /= 0.02 #raw[:,1] = C2F(raw[:,1]) raw[:,0] -= raw[0,0] # time relative to start raw[:,0] /= 60.0 # min. T, V = raw[:,0], raw[:,2] print 'Fit rise' tstart=0.2 t0=pl.find(T>tstart)[0] t1=V.argmax() print 'fitting between',T[t0],'and',T[t1] def fn1eval(x, p): # [ amplitude, 0 offset, time const ] return p[0]*(1-np.exp(-x/p[2])) + p[1] def fn1err(p, x, y): return y - fn1eval(x,p) p0 = [ V[t1]-V[t0], V[t0], 1] print 'initial',p0 pr,_ = leastsq(fn1err, p0, args=(T[t0:t1], V[t0:t1])) print 'opt',pr
## Convert voltage to temperature ## Expect (20 mV/C) 400 mV = 0 C #raw[:,1] -= 0.4 #raw[:,1] /= 0.02 #raw[:,1] = C2F(raw[:,1]) raw[:, 0] -= raw[0, 0] # time relative to start raw[:, 0] /= 60.0 # min. T, V = raw[:, 0], raw[:, 2] print 'Fit rise' tstart = 0.2 t0 = pl.find(T > tstart)[0] t1 = V.argmax() print 'fitting between', T[t0], 'and', T[t1] def fn1eval(x, p): # [ amplitude, 0 offset, time const ] return p[0] * (1 - np.exp(-x / p[2])) + p[1] def fn1err(p, x, y): return y - fn1eval(x, p) p0 = [V[t1] - V[t0], V[t0], 1] print 'initial', p0
def good_AP_finder(time, voltage): """ This function takes the following input: time - vector where each element is a time in seconds voltage - vector where each element is a voltage at a different time We are assuming that the two vectors are in correspondance (meaning that at a given index, the time in one corresponds to the voltage in the other). The vectors must be the same size or the code won't run This function returns the following output: APTimes - all the times where a spike (action potential) was detected """ # Constants peak_voltage = max(voltage) if abs(max(voltage)) > abs( min(voltage)) else min(voltage) # THRESHOLD = abs(get_absolute_peak(voltage)) / 2.0 THRESHOLD = abs(peak_voltage) / 2.0 SAMPLING_RATE = time[1] - time[0] AP_SLOPE = np.std(voltage) * 2 SPREAD = int(.0008 / SAMPLING_RATE) # number of samples in 1 ms # Data stores APTimes = [] AP_set = set() # for seeing steep changes in voltage voltage_delta = np.diff(voltage) #Let's make sure the input looks at least reasonable if (len(voltage) != len(time)): print "Can't run - the vectors aren't the same length!" return APTimes # def find_local_peak(index): # """ # Return local peak (either a spike or valley) index. # (Local is within a .002s spread) # """ # sample = voltage[index-SPREAD:index+SPREAD+1] # # now find the biggest spike (neg or positive) # local_peak = get_absolute_peak(sample) # # # note: sorting idea came from http://stackoverflow.com/a/12141207 # peak_index = min(plt.find(voltage==local_peak), key=lambda x:abs(x-index)) # return peak_index # Use steep slopes to find local peaks and valleys for i, v in enumerate(voltage_delta): # Check for a string of slopes above threshold if abs(v) > AP_SLOPE and abs(voltage_delta[i + 1]) > AP_SLOPE and abs( voltage_delta[i + 2]) > AP_SLOPE: # find local max near the second one # b/c it's more likely to be closer to the peak # local_peak = find_local_peak(i+2) # testing removal of inner fcn i2 = i + 2 sample = voltage[i2 - SPREAD:i2 + SPREAD + 1] local_peak = max(sample) if abs(max(sample)) > abs( min(sample)) else min(sample) # note: sorting idea came from http://stackoverflow.com/a/12141207 local_peak = min(plt.find(voltage == local_peak), key=lambda x: abs(x - i2)) if abs(voltage[local_peak]) > THRESHOLD: # set prevents duplicates AP_set.add(time[local_peak]) APTimes = list(AP_set) return APTimes
#b = np.flipud(a) #bottom #normalize vector #a = a / a.sum() #b = b / b.sum() # pl.figure() # pl.plot(a) # pl.savefig('out/der' + str(j) + '.png') # pl.close('all') #perfil da onda (pwav) e fundo (pbot) #acha o fundo #acha valores menores que zero (fundo) auxwav = pl.find(w > wlim)[1] pwav.append(auxwav) auxbot = pl.find(b < blim)[-1] pbot.append(auxbot) #pbot.append( len(b) - pl.find(b < -blim)[0] ) #faz a contagem do inverso # pwav.append(pl.find(a > wlim)[0]) # pbot.append(pl.find(a == a.min())[-1]) #pwav = pd.rolling_mean(np.array(pwav), wind) #pbot = pd.rolling_mean(np.array(pbot), wind) fig = plt.figure(figsize=(10, 8))
pathnamewind = os.environ['HOME'] + '/Dropbox/pnboia/cfsr/' time = np.loadtxt(pathnamewind + 'time_RioGrande_200905.txt') u = np.loadtxt(pathnamewind + 'uCFSR_RioGrande_200905.txt') v = np.loadtxt(pathnamewind + 'vCFSR_RioGrande_200905.txt') #retira dias repetidos [time,ia] = unique(time,return_index=True); time = time[:-1] u = u[ia[:-1]]; v = v[ia[:-1]]; ws = np.sqrt(u**2 + v**2); wd = np.arctan2(v,u) * 180 / np.pi; #vento de onde vem wd = 270 - wd; #de onde vai para onde vem wd[pl.find(wd<0)] = wd[pl.find(wd<0)] + 360; wd[pl.find(wd>360)] = wd[pl.find(wd>360)] - 360; #declinacao magnetica dmag = -23 #cria variavel 'lista' com nome dos arquivos HNE lista = np.array(lista_hne(pathname)) #para processar todos os arquivos (comentao o p0 e p1 abaixo) # p0 = 2500 # p1 = 2600 #escolhe a data inicial e final para ser processada (opcional, no 'p0' e 'p1') z0 = '200905010000.HNE'
def freqz_resp_list(b, a=np.array([1]), mode='dB', fs=1.0, n_pts=1024, fsize=(6, 4)): """ A method for displaying digital filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freq_resp(self,mode = 'dB',Npts = 1024) A method for displaying the filter frequency response magnitude, phase, and group delay. A plot is produced using matplotlib freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4)) Parameters ---------- b : ndarray of numerator coefficients a : ndarray of denominator coefficents mode : display mode: 'dB' magnitude, 'phase' in radians, or 'groupdelay_s' in samples and 'groupdelay_t' in sec, all versus frequency in Hz n_pts : number of points to plot; default is 1024 fsize : figure size; defult is (6,4) inches Mark Wickert, January 2015 """ if type(b) == list: # We have a list of filters N_filt = len(b) else: return None f = np.arange(0, n_pts) / (2.0 * n_pts) for n in range(N_filt): w, H = signal.freqz(b[n], a[n], 2 * np.pi * f) if n == 0: plt.figure(figsize=fsize) if mode.lower() == 'db': plt.plot(f * fs, 20 * np.log10(np.abs(H))) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') plt.ylabel('Gain (dB)') plt.title('Frequency Response - Magnitude') elif mode.lower() == 'phase': plt.plot(f * fs, np.angle(H)) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') plt.ylabel('Phase (rad)') plt.title('Frequency Response - Phase') elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'): """ Notes ----- Since this calculation involves finding the derivative of the phase response, care must be taken at phase wrapping points and when the phase jumps by +/-pi, which occurs when the amplitude response changes sign. Since the amplitude response is zero when the sign changes, the jumps do not alter the group delay results. """ theta = np.unwrap(np.angle(H)) # Since theta for an FIR filter is likely to have many pi phase # jumps too, we unwrap a second time 2*theta and divide by 2 theta2 = np.unwrap(2 * theta) / 2. theta_dif = np.diff(theta2) f_diff = np.diff(f) Tg = -np.diff(theta2) / np.diff(w) # For gain almost zero set groupdelay = 0 idx = pylab.find(20 * np.log10(H[:-1]) < -400) Tg[idx] = np.zeros(len(idx)) max_Tg = np.max(Tg) # print(max_Tg) if mode.lower() == 'groupdelay_t': max_Tg /= fs plt.plot(f[:-1] * fs, Tg / fs) plt.ylim([0, 1.2 * max_Tg]) else: plt.plot(f[:-1] * fs, Tg) plt.ylim([0, 1.2 * max_Tg]) if n == N_filt - 1: plt.xlabel('Frequency (Hz)') if mode.lower() == 'groupdelay_t': plt.ylabel('Group Delay (s)') else: plt.ylabel('Group Delay (samples)') plt.title('Frequency Response - Group Delay') else: s1 = 'Error, mode must be "dB", "phase, ' s2 = '"groupdelay_s", or "groupdelay_t"' log.info(s1 + s2)
def good_AP_finder(time,voltage): """ This function takes the following input: time - vector where each element is a time in seconds voltage - vector where each element is a voltage at a different time We are assuming that the two vectors are in correspondance (meaning that at a given index, the time in one corresponds to the voltage in the other). The vectors must be the same size or the code won't run This function returns the following output: APTimes - all the times where a spike (action potential) was detected """ # Constants peak_voltage = max(voltage) if abs(max(voltage)) > abs(min(voltage)) else min(voltage) # THRESHOLD = abs(get_absolute_peak(voltage)) / 2.0 THRESHOLD = abs(peak_voltage) / 2.0 SAMPLING_RATE = time[1]-time[0] AP_SLOPE = np.std(voltage) * 2 SPREAD = int(.0008 / SAMPLING_RATE) # number of samples in 1 ms # Data stores APTimes = [] AP_set = set() # for seeing steep changes in voltage voltage_delta = np.diff(voltage) #Let's make sure the input looks at least reasonable if (len(voltage) != len(time)): print "Can't run - the vectors aren't the same length!" return APTimes # def find_local_peak(index): # """ # Return local peak (either a spike or valley) index. # (Local is within a .002s spread) # """ # sample = voltage[index-SPREAD:index+SPREAD+1] # # now find the biggest spike (neg or positive) # local_peak = get_absolute_peak(sample) # # # note: sorting idea came from http://stackoverflow.com/a/12141207 # peak_index = min(plt.find(voltage==local_peak), key=lambda x:abs(x-index)) # return peak_index # Use steep slopes to find local peaks and valleys for i, v in enumerate(voltage_delta): # Check for a string of slopes above threshold if abs(v) > AP_SLOPE and abs(voltage_delta[i+1]) > AP_SLOPE and abs(voltage_delta[i+2]) > AP_SLOPE: # find local max near the second one # b/c it's more likely to be closer to the peak # local_peak = find_local_peak(i+2) # testing removal of inner fcn i2 = i+2 sample = voltage[i2-SPREAD:i2+SPREAD+1] local_peak = max(sample) if abs(max(sample)) > abs(min(sample)) else min(sample) # note: sorting idea came from http://stackoverflow.com/a/12141207 local_peak = min(plt.find(voltage==local_peak), key=lambda x:abs(x-i2)) if abs(voltage[local_peak]) > THRESHOLD: # set prevents duplicates AP_set.add(time[local_peak]) APTimes = list(AP_set) return APTimes