def detect_jumps(self): wle = int(self.regressor_t/self.pitch_t_hop) isel = self.mag > np.max(self.mag)*self.mag_threshold tsel = self.t[isel] fsel = self.f0[isel] self.isel = isel #pl.plot(np.flatnonzero(isel),20*np.log10(m[isel])) le = linreg2_err(tsel, fsel, wleft=wle, wright=wle, use_l=True) #ax[0].plot(tsel,fsel) #ax[1].plot(tsel,le) imax = argrelmax(le)[0] lemax = le[imax] idx = imax[lemax > self.t_threshold] ijup = idx #ax[0].plot(tsel[idx],fsel[idx],'o') #ax[1].plot(tsel[idx],le[idx],'o') imin = argrelmax(-le)[0] lemin = le[imin] idx = imin[lemin < -self.t_threshold] ijdn = idx #ax[0].plot(tsel[idx],fsel[idx],'o') #ax[1].plot(tsel[idx],le[idx],'o') self.down_jump_indices = np.asarray(ijdn) self.up_jump_indices = np.asarray(ijup) self.down_jump_times = tsel[ijdn] self.up_jump_times = tsel[ijup]
def timebetweenpeaks(dataMatrix, Sensor,Min = False,order=10): timedifference = [] if Min==False: for Sensors in Sensor: peaks = (argrelmax(dataMatrix[:,Sensors],order=order)) distance = [] for p in range(0,len(peaks[0])-1): distance.append((peaks[0][p+1]-peaks[0][p])) number=0 for d in distance: number +=d timedifference.append((number/len(distance))) else: for Sensors in Sensor: peaks = (argrelmax(dataMatrix[:,Sensors],order=order)) distance = [] for p in range(0,len(peaks[0])-1): distance.append((peaks[0][p+1]-peaks[0][p])) number=0 for d in distance: number +=d timedifference.append((number/len(distance))) return timedifference
def on_change2(pt): #maxi=sp.argrelmax(normList)[0] #pt=maxi[pt] fig=plt.figure(figsize=(20,10)) gs = gridspec.GridSpec(2, 2) ax1 = plt.subplot(gs[:, 0]) ax2 = plt.subplot(gs[0,1]) ax3 = plt.subplot(gs[1,1]) #ax=plt.subplot(1,2,1) ax1.plot(f,normList) ax1.plot(f[pt],normList[pt],'ko') #ax1.text(f[pt],normList[pt],str(f[pt])+ 'Hz') string='f={:.3f} Hz\nMode={:.0f}'.format(f[pt],pt) ax1.text(0.05, 0.95, string, transform=ax1.transAxes, fontsize=14, verticalalignment='top') ax1.set_xscale('log') ax1.set_yscale('log') #ax=plt.subplot(1,2,2) idxMode=myDMD_Uy.getIdxforFrq(f[pt]) mode=myDMD_Uy.getMode(idxMode) ax2.imshow(np.real(mode),vmin=vmin,vmax=vmax,interpolation='nearest') uy=np.array(np.real(mode)[iRow,:]) uy_imag=np.array(np.imag(mode)[iRow,:]) ax3.plot(uy) ax3.plot(uy_imag,'r') maxi=sp.argrelmax(uy)[0] mini=sp.argrelmin(uy)[0] exti=np.sort(np.r_[maxi,mini]) maxi_imag=sp.argrelmax(uy_imag)[0] mini_imag=sp.argrelmin(uy_imag)[0] exti_imag=np.sort(np.r_[maxi_imag,mini_imag]) print np.diff(exti) ax3.scatter(maxi,uy[maxi],marker=2) ax3.scatter(mini,uy[mini],marker=3) ax3.scatter(maxi_imag,uy_imag[maxi_imag],marker=2) ax3.scatter(mini_imag,uy_imag[mini_imag],marker=3) ax3.set_xlim([0,np.real(mode).shape[1]]) gamma=0 print 'n=',L/(np.diff(maxi)*dx)+gamma print 'n=',L/(np.diff(mini)*dx)+gamma print 'n=',L/(np.diff(exti)*dx*2.0)+gamma print 'n=',L/(np.diff(maxi_imag)*dx)+gamma print 'n=',L/(np.diff(mini_imag)*dx)+gamma print 'n=',L/(np.diff(exti_imag)*dx*2.0)+gamma
def getWinSize(im, padFactor=4): h, w = im.shape im = np.pad(im, ((0, h * (padFactor - 1)), (0, w * (padFactor - 1))), mode='constant') spec = abs(np.fft.fft2(im)) horizIs = sig.argrelmax(spec[0, :])[0] vertIs = sig.argrelmax(spec[:, 0])[0] maxHorizI = max(horizIs, key=lambda i: spec[0, i]) maxVertI = max(vertIs, key=lambda i: spec[i, 0]) return round(float(im.shape[1]) / maxHorizI), round(float(im.shape[0]) / maxVertI)
def locxymax(nda, order=1, mode='clip') : """For 2-d or 3-d numpy array finds mask of local maxima in x and y axes (diagonals are ignored) using scipy.signal.argrelmax and return their product. @param nda - input ndarray @param order - range to search for local maxima along each dimension @param mode - parameter of scipy.signal.argrelmax of how to treat the boarder """ shape = nda.shape size = nda.size ndim = len(shape) if ndim< 2 or ndim>3 : msg = 'ERROR: locxymax nda shape %s should be 2-d or 3-d' % (shape) sys.exit(msg) ext_cols = argrelmax(nda, -1, order, mode) ext_rows = argrelmax(nda, -2, order, mode) indc = np.array(ext_cols, dtype=np.uint16) indr = np.array(ext_rows, dtype=np.uint16) msk_ext_cols = np.zeros(shape, dtype=np.uint16) msk_ext_rows = np.zeros(shape, dtype=np.uint16) if ndim == 2 : icr = indc[0,:] icc = indc[1,:] irr = indr[0,:] irc = indr[1,:] msk_ext_cols[icr,icc] = 1 msk_ext_rows[irr,irc] = 1 elif ndim == 3 : ics = indc[0,:] icr = indc[1,:] icc = indc[2,:] irs = indr[0,:] irr = indr[1,:] irc = indr[2,:] msk_ext_cols[ics,icr,icc] = 1 msk_ext_rows[irs,irr,irc] = 1 #print 'nda.size:', nda.size #print 'indc.shape:', indc.shape #print 'indr.shape:', indr.shape return msk_ext_rows * msk_ext_cols
def find_current_peak_position(self, data): """ Finds the current average peak position. Parameters ---------- data : numpy.ndarray A phase-corrected datacube. Returns ------- peak_position : int The argument of the highest local maximum. """ self.info('Finding current peak position.') data = data.sum(axis=2) data = data.sum(axis=1) data = np.where(data < 0.75 * data.max(), 0, data) peaks = signal.argrelmax(data, axis=0, order=5)[0] self.info('Encountered {:d} peaks: '.format(len(peaks))) peaks_values = data[peaks] max_peaks_arg = np.argmax(peaks_values) peak = peaks[max_peaks_arg] return peak
def apocenter(self, type=np.mean): """ Estimate the apocenter(s) of the orbit. By default, this returns the mean apocenter. To get, e.g., the minimum apocenter, pass in ``type=np.min``. To get all apocenters, pass in ``type=None``. Parameters ---------- type : func (optional) By default, this returns the mean apocenter. To return all apocenters, pass in ``None``. To get, e.g., the minimum or maximum apocenter, pass in ``np.min`` or ``np.max``. Returns ------- apo : float, :class:`~numpy.ndarray` Either a single number or an array of apocenters. """ r = self.r max_ix = argrelmax(r, mode='wrap')[0] max_ix = max_ix[(max_ix != 0) & (max_ix != (len(r)-1))] if type is not None: return type(r[max_ix]) else: return r[max_ix]
def par_find_peaks_by_chan(info): """ Parameters ---------- p_spect_array: numpy.ndarray An array with dimensions frequencies x channels frequencies: numpy.ndarray An array of the frequencies used std_thresh: float Threshold in number of standard deviations above the corrected power spectra to be counted as a peak Returns ------- peaks_all_chans: numpy.ndarray with type bool An array of booleans the same shape as p_spect_array, specifying if there is a peak at a given frequency and electrode """ p_spect_array = info[0] frequencies = info[1] std_thresh = info[2] peaks_all_chans = np.zeros(p_spect_array.shape).astype(bool) for i, chan_data in enumerate(p_spect_array.T): x = sm.tools.tools.add_constant(np.log10(frequencies)) model_res = sm.RLM(chan_data, x).fit() peak_inds = argrelmax(model_res.resid) peaks = np.zeros(x.shape[0], dtype=bool) peaks[peak_inds] = True above_thresh = model_res.resid > (np.std(model_res.resid) * std_thresh) peaks_all_chans[:,i] = peaks & above_thresh return peaks_all_chans
def find_peaks(x, threshold=None, order=1): """Finds local maxima of a function. Args: x: A data vector. threshold: Local maxima under this value will be discarded. If threshold is None, the function will return only the global maximum. Defaut value os None. order: Number of samples before and after a data point that have to be smaller than the data point to be considered a local maximum. If 'threshold' is None, this argument has no effect. Default: 1. Returns: out: A list of local maxima, numpy array type. """ if threshold is not None: event_peaks = signal.argrelmax(x, order=int(order))[0] if event_peaks.size > 0: return event_peaks[x[event_peaks] > threshold] return event_peaks else: if x.size > 0: return np.array([np.argmax(x)]) return np.array([])
def elliptical_orbit_to_events(t, w): """ Convert an orbit to MIDI events using Cartesian coordinates and rules. Parameters ---------- t : array_like w : array_like midi_pool : array_like """ loop = gd.classify_orbit(w) # apocenters x,y,z = w.T[:3] r = np.sqrt(x**2 + y**2 + z**2) apo = np.array([argrelmax(rr)[0] for rr in r]) # get periods periods = [] for i in range(w.shape[1]): if np.any(loop[i] == 1): w2 = gd.align_circulation_with_z(w[:,i], loop[i]) R = np.sqrt(w2[:,0]**2 + w2[:,1]**2) phi = np.arctan2(w2[:,1], w2[:,0]) % (2*np.pi) z = w2[:,2] # loop T1 = gd.peak_to_peak_period(t, R) T2 = gd.peak_to_peak_period(t, phi) T3 = gd.peak_to_peak_period(t, z) else: # box T1 = gd.peak_to_peak_period(t, w[:,i,0]) T2 = gd.peak_to_peak_period(t, w[:,i,1]) T3 = gd.peak_to_peak_period(t, w[:,i,2]) periods.append([T1,T2,T3]) freqs = (2*np.pi / np.array(periods)) * 10000. delays = [] notes = [] for j in range(w.shape[0]): _no = [] for i in range(w.shape[1]): if j in apo[i]: _no.append(freqs[i].tolist()) if len(_no) > 0: delays.append(t[j]) notes.append(np.unique(_no).tolist()) delays = np.array(delays) notes = np.array(notes) return delays, notes
def unwrap_fsr(peaks, fsr_channel, running_for=None): """ Use clusters of data to identify regions that are wrapped and unwrap it using the fsr in number of channels. Parameters ---------- peaks (numpy.ndarray) : 1D array containing the peaks with more than a FSR. fsr_channel (int) : the FSR in number of channels. Returns ------- peaks (numpy.ndarray) : 1D array containing the peaks unwrapped. To Do: Test with more than 1 FSR. """ indexes = np.argsort(peaks) sorted_peaks = np.sort(peaks) diff_sorted_peaks = np.diff(sorted_peaks) temp = np.abs(diff_sorted_peaks) where = np.abs(temp - np.median(temp)) < np.std(temp) temp[where] = 0 split_indexes = signal.argrelmax(temp)[0] split_y_indexes = np.split(indexes, split_indexes + 1) for (i, idx) in enumerate(split_y_indexes): peaks[idx] -= fsr_channel * i return peaks
def get_peaks(freq, spectrum, threshold, outdir, metadata): """ This gets the frequency of the peaks of a signal. """ # Smooth the data by using repeated mean smoothing. radius = 2 for i in range(3): smooth_spec, _ = mean_smooth(spectrum, radius) freq = freq[radius:-radius+1] spectrum = spectrum[radius:-radius+1] # Get the peaks from the smoothed spectum. prad = 4 peak_index = sig.argrelmax(smooth_spec, order=prad)[0] # Remove "peaks" that are only noise fluctuations. peaks = [] for i in peak_index: lower = max(i - prad, 0) upper = min(i + prad + 1, len(smooth_spec)) segment = smooth_spec[lower:upper] - smooth_spec[i] if abs(np.min(segment)) > threshold: peaks.append(i) # Frequencies and the spectra. freq_peaks = np.array([freq[i] for i in peaks]) spec_peaks = np.array([spectrum[i] for i in peaks]) # Create a plot. plt.plot(freq, spectrum, 'k') plt.plot(freq, smooth_spec, 'r') plt.plot(freq_peaks, spec_peaks, 'bo') plot_tools(metadata, outdir, 'peak_find') return freq_peaks
def spline_max_growth_rate(self, s, droplow=False): ### N.B.: set parameter of -2.3 for dropping low OD values from analysis - i.e., OD 0.1### if droplow: data = np.where(self.log_data < -2.3, 'nan', self.log_data) else: data = self.log_data interpolator = interpolate.UnivariateSpline(self.elapsed_time, data, k=4, s=s) #k can be 3-5 der = interpolator.derivative() # Get the approximation of the derivative at all points der_approx = der(self.elapsed_time) # Get the maximum self.maximum_index = np.argmax(der_approx) self.growth_rate = der_approx[self.maximum_index] self.doubling_time = np.log(2)/self.growth_rate self.time_of_max_rate = self.elapsed_time[self.maximum_index] # Get estimates of lag time and saturation time from 2nd derivative der2 = der.derivative() der2_approx = der2(self.elapsed_time) try: self.lag_index = signal.argrelmax(der2_approx)[0][0] # find first max except: self.lag_index = 0 if self.lag_index > self.maximum_index: self.lag_index = 0 self.lag_time = self.elapsed_time[self.lag_index] self.lag_OD = self.raw_data[self.lag_index] minima = signal.argrelmin(der2_approx)[0] # find first min after maximum_index which_min = bisect.bisect(minima, self.maximum_index) try: self.sat_index = minima[which_min] except: self.sat_index = len(self.elapsed_time) - 1 self.sat_time = self.elapsed_time[self.sat_index] self.sat_OD = self.raw_data[self.sat_index] self.spline = interpolator(self.elapsed_time) self.intercept = self.log_data[self.maximum_index] - (self.growth_rate*self.time_of_max_rate) # b = y - ax self.fit_y_values = [((self.growth_rate * x) + self.intercept) for x in self.elapsed_time] # for plotting
def relativeExtremaSegments(self, rawData, maxMin="max", minSegSize=50): from scipy.signal import argrelmax, argrelmin PCs = pca(rawData, n_components=1)[0] if maxMin == 'max': return argrelmax(PCs[:,0], order=minSegSize)[0] if maxMin == 'min': return argrelmin(PCs[:,0], order=minSegSize)[0]
def get_envelops(x, t=None): """ Find the upper and lower envelopes of the array `x`. """ if t is None: t = np.arange(x.shape[0]) maxima = argrelmax(x)[0] minima = argrelmin(x)[0] # consider the start and end to be extrema ext_maxima = np.zeros((maxima.shape[0] + 2,), dtype=int) ext_maxima[1:-1] = maxima ext_maxima[0] = 0 ext_maxima[-1] = t.shape[0] - 1 ext_minima = np.zeros((minima.shape[0] + 2,), dtype=int) ext_minima[1:-1] = minima ext_minima[0] = 0 ext_minima[-1] = t.shape[0] - 1 tck = interpolate.splrep(t[ext_maxima], x[ext_maxima]) upper = interpolate.splev(t, tck) tck = interpolate.splrep(t[ext_minima], x[ext_minima]) lower = interpolate.splev(t, tck) return upper, lower
def extr(x): """Extract the indices of the extrema and zero crossings. :param x: input signal :type x: array-like :return: indices of minima, maxima and zero crossings. :rtype: tuple """ m = x.shape[0] x1 = x[:m - 1] x2 = x[1:m] indzer = find(x1 * x2 < 0) if np.any(x == 0): iz = find(x == 0) indz = [] if np.any(np.diff(iz) == 1): zer = x == 0 dz = np.diff([0, zer, 0]) debz = find(dz == 1) finz = find(dz == -1) - 1 indz = np.round((debz + finz) / 2) else: indz = iz indzer = np.sort(np.hstack([indzer, indz])) indmax = argrelmax(x)[0] indmin = argrelmin(x)[0] return indmin, indmax, indzer
def decode(file_name): border.rotate(file_name) image = Image.open("temp.png") q = border.find("temp.png") ind = sp.argmin(sp.sum(q, 1), 0) up_left = q[ind, 0] + 2 up_top = q[ind, 1] + 2 d_right = q[ind+1, 0] - 3 d_bottom = q[ind-1, 1] - 3 box = (up_left, up_top, d_right, d_bottom) region = image.crop(box) h_sum = sp.sum(region, 0) m = argrelmax(sp.correlate(h_sum, h_sum, 'same')) s = sp.average(sp.diff(m)) m = int(round(d_right - up_left)/s) if m % 3 != 0: m += 3 - m % 3 n = int(round(d_bottom - up_top)/s) if n % 4 != 0: n += 4 - n % 4 s = int(round(s))+1 region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS) region.save("0.png") pix = region.load() matrix = mix.off(rec.matrix(pix, s, m, n)) str2 = hamming.decode(array_to_str(matrix)) return hamming.bin_to_str(str2)
def test_amgauss(self): """Test if the gaussian amplitude modulator works correctly.""" time_center = 63 n_points = 128 spread = 10 signal = am.amgauss(n_points, time_center, spread) # parameters of the underlying gaussian function of the form # f(x) = a * exp( (-(x - b) **2) / (2 * (c ** 2))) a, b, c = 1, time_center, spread / np.sqrt(2 * pi) # Integral of a Gaussian is a * c * sqrt( 2 * pi) integral = a * c * np.sqrt(2 * pi) self.assertAlmostEqual(integral, signal.sum()) # Other miscellaneous properties of a Gaussian maximum = argrelmax(signal) self.assertEqual(len(maximum), 1) self.assertEqual(maximum[0][0], time_center - 1) self.assertAlmostEqual(signal[time_center - 1], 1.0) self.assert_is_monotonic_increasing(signal[:(time_center - 1)]) self.assert_is_monotonic_decreasing(signal[(time_center - 1):]) infpl1 = np.floor(b - c).astype(int) - 1 infpl2 = np.floor(b + c).astype(int) self.assert_is_convex(signal[:infpl1]) self.assert_is_concave(signal[infpl1:infpl2]) self.assert_is_convex(signal[infpl2:])
def _get_psp_list(bins, neuron_model, di_param, timestep, simtime): ''' Return the list of effective weights from a list of NEST connection weights. ''' nest.ResetKernel() nest.SetKernelStatus({"resolution":timestep}) # create neuron and recorder neuron = nest.Create(neuron_model, params=di_param) vm = nest.Create("voltmeter", params={"interval": timestep}) nest.Connect(vm, neuron) # send the spikes times = [ timestep+n*simtime for n in range(len(bins)) ] sg = nest.Create("spike_generator", params={'spike_times':times, 'spike_weights':bins}) nest.Connect(sg, neuron) nest.Simulate((len(bins)+1)*simtime) # get the max and its time dvm = nest.GetStatus(vm)[0] da_voltage = dvm["events"]["V_m"] da_times = dvm["events"]["times"] da_max_psp = da_voltage[ argrelmax(da_voltage) ] da_min_psp = da_voltage[ argrelmin(da_voltage) ] da_max_psp -= da_min_psp if len(bins) != len(da_max_psp): raise InvalidArgument("simtime too short: all PSP maxima are not in \ range") else: plt.plot(da_times, da_voltage) plt.show() return da_max_psp
def peakFind(data,l,pos,order=1,_even=False): sets = re.sub('x',str(pos),'Report: xm (m)') print sets iterr = 1 while not _even: iterr += 1 mean = int(l[2]*iterr) maxlen = [] phases = {} framepeak = {} for frame in data.iteritems(): name = frame[0] frame = frame[1][sets] frameMean = mo.rolling_sum(frame**3,mean,center=True).values maxpeak = sig.argrelmax(frameMean,order=order) maxlen.append(len(maxpeak[0])) framepeak[name] = frame.index[maxpeak] phase = np.zeros([framepeak[name].shape[0]-1]) for i in xrange(phase.shape[0]): phase[i] = framepeak[name][i+1]-framepeak[name][i] phases[name] = phase try: phases = pd.DataFrame(phases) framePeaks = pd.DataFrame(framepeak) _even = True except ValueError: print maxlen print '----- All of the peaks have not been found ------' if iterr is 200: _even = True print ' ------ max iterations reached. Check data length ------' sets,phases,framePeaks = [],[],[] return [sets,phases,framePeaks]
def extractResponse(self): stimStarts = (self.stimStartInds) / self.downSamplingFactor stimEnds = (self.stimEndInds) / self.downSamplingFactor samplingRateDown = self.vibrationSignalDown.sampling_rate self.stimAmps = [] self.stimFreqs = [] self.responseVTraces = [] self.stimTraces = [] for (stD, endD, st, end) in zip(stimStarts, stimEnds, self.stimStartInds, self.stimEndInds): stimDown = self.vibrationSignalDown[stD:endD + 1] stimDownFFT = np.fft.rfft(stimDown, n=2048) self.stimFreqs.append(np.argmax(np.abs(stimDownFFT)) * samplingRateDown / 2 / len(stimDownFFT)) stimAS = self.vibrationSignal[st:end + 1] stim = stimAS.magnitude allAmps = stim[np.concatenate((argrelmin(stim)[0], argrelmax(stim)[0]))] self.stimAmps.append(np.abs(allAmps).mean() * self.vibrationSignal.units) self.responseVTraces.append(self.voltageSignal[st:end + 1]) self.stimTraces.append((stimAS - np.mean(stimAS)))
def manual_detect(x, times, ker_len, clip, rate): kernel = np.ones((int(ker_len))) / ker_len x_smoothed = convolve(x, kernel) boundaries = argrelmax(x_smoothed)[0] boundaries = np.append(boundaries, len(x)-1) boundaries = np.insert(boundaries, 0, 0) boundaries = times[boundaries] # Optionaly clip all boundaries that are too close apart if clip > 0: y = [boundaries[0]] i = 0 for j in range(1, len(boundaries)): if boundaries[j]-boundaries[i] >= clip: boundaries[i:j] = np.mean(boundaries[i:j]) i = j j += 1 for bound in boundaries: if bound != y[-1]: y.append(bound) boundaries = np.array(y) return boundaries
def guess_syllables(wave, window_size=10, len_threshold=0.01): amp, _ = segmentation(wave) windowed_amp = feature.normalize(moving_window(amp, window_size)) maximi=signal.argrelmax(windowed_amp, order=7) #plot(windowed_amp) #show() return len(maximi[0])
def initSMParamsFourier(Q, x, y, sn, samplingFreq, nPeaks, relMaxOrder=2): """ Initialize hyperparameters for the spectral-mixture kernel. Weights are all set to be uniformly distributed, means are given as the peaks in the frequency spectrum, and variances are given by a random sample from a uniform distribution with a max equal to the max distance. """ x = np.atleast_2d(x) y = np.atleast_2d(y) n, D = x.shape w = np.zeros(Q) m = np.zeros((D,Q)) s = np.zeros((D,Q)) w[:] = np.std(y) / Q hypinit = { 'cov': np.zeros(Q+2*D*Q), 'lik': np.atleast_1d(np.log(sn)), 'mean': np.array([]) } # Assign hyperparam weights hypinit['cov'][0:Q] = np.log(w) # Assign hyperparam frequencies (mu's) signal = np.array(y.ravel()).ravel() # Make into 1D array n = x.shape[0] k = np.arange(n) ts = n/samplingFreq frqx = k/float(ts) frqx = frqx[range(n/2)] frqy = np.fft.fft(signal)/n frqy = abs(frqy[range(n/2)]) # Find the peaks in the frequency spectrum peakIdx = np.array([]) while not peakIdx.any() and relMaxOrder > 0: peakIdx = spsig.argrelmax(np.log(frqy**2), order=relMaxOrder)[0] relMaxOrder -= 1 if not peakIdx.any(): raise ValueError("Data doesn't have any detectable peaks in Fourier space." " Switching to a different kernel besides the spectral " "mixture is recommended.") # Find specified number (nPeaks) largest peaks sortedIdx = frqy[peakIdx].argsort()[::-1][:nPeaks] sortedPeakIdx = peakIdx[sortedIdx] hypinit['cov'][Q + np.arange(0,Q*D)] = np.log(frqx[sortedPeakIdx]) # Assign hyperparam length scales (sigma's) for i in range(0,D): xslice = np.atleast_2d(x[:,i]) d2 = spat.distance.cdist(xslice, xslice, 'sqeuclidean') if n > 1: d2[d2 == 0] = d2[0,1] else: d2[d2 == 0] = 1 maxshift = np.max(np.max(np.sqrt(d2))) s[i,:] = 1./np.abs(maxshift*np.random.ranf((1,Q))) hypinit['cov'][Q + Q*D + np.arange(0,Q*D)] = np.log(s[:]).T return hypinit
def top_peak_pixels(pixels,spectra): max_locs = argrelmax(spectra)[0] max_vals = spectra[max_locs] sorted_max_val_inds = np.argsort(max_vals).astype(int) top_max_val_inds = sorted_max_val_inds[-2:] top_max_locs = max_locs[top_max_val_inds] max_flux_pixels = pixels[top_max_locs] return np.sort(max_flux_pixels)
def test_ampd_peaks(): x = np.sin(np.arange(0, 20*np.pi, np.pi/20)) noise = np.random.sample(size=x.shape)/100 # test with just a little noise actual_peaks = signal.argrelmax(x)[0] detected_peaks = discrete.ampd_peaks(x + noise) assert set(detected_peaks) - set(actual_peaks) == set() # Grant forgiveness for first and last peaks. assert set(actual_peaks) - set(detected_peaks) - {actual_peaks[0], actual_peaks[-1]} == set()
def convolution_metric(convolved, response, sample_size): convolved = np.convolve(convolved, np.ones(10)/10, mode='same') peaks = argrelmax(convolved)[0] peak_values = convolved[peaks] samples = np.argsort(peak_values)[::-1][:sample_size] response = response[peaks[samples]] weights = convolved[peaks[samples]] weights = weights / np.sum(weights) return np.sum(response * weights), np.std(response)
def getSpectralPeaks(aSpectrum, method="MAD",halfWindowSize=20, SNR=2): if (method=="MAD"): idx = argrelmax(np.array(aSpectrum.df['intensity']), order=halfWindowSize)[0] noise = estimateNoise(aSpectrum.df['intensity']) dfPeaksPython=aSpectrum.df.ix[idx] dfPeaksPythonCleaned = dfPeaksPython[dfPeaksPython['intensity'] > (SNR*noise)] print(dfPeaksPythonCleaned.columns) pks = massPeaks(mass=dfPeaksPythonCleaned['mass'], intensity=dfPeaksPythonCleaned['intensity']) return pks
def peaky(f, o=3): fpeak = signal.argrelmax(f, order=o)[0] peak_mean = f[fpeak].mean() # consider that no train is less than half of average train limit = peak_mean / 2.0 # collect volume around the peak ff = np.array([f[i - o : i + o + 1].sum() if (i in fpeak) and (x > limit) else 0 for i, x in enumerate(f)]) scale = f.sum() / ff.sum() return ff * scale
def test_residue(self): """Test the residue of the emd output.""" signal = np.sum([self.trend, self.mode1, self.mode2], axis=0) decomposer = EMD(signal, t=self.ts) imfs = decomposer.decompose() n_imfs = imfs.shape[0] n_maxima = argrelmax(imfs[n_imfs - 1, :])[0].shape[0] n_minima = argrelmin(imfs[n_imfs - 1, :])[0].shape[0] self.assertTrue(max(n_maxima, n_minima) <= 2)
def extremes(values: [T]) -> [T]: max_indices = argrelmax(numpy.array(values))[0] min_indices = argrelmin(numpy.array(values))[0] max_i = 0 min_i = 0 result = [] # Merge two arrays of minimums and maximums into one while max_i < len(max_indices) and min_i < len(min_indices): if max_indices[max_i] < min_indices[min_i] and \ values[max_indices[max_i]] > values[min_indices[min_i]]: result.append(values[max_indices[max_i]]) max_i += 1 else: result.append(values[min_indices[min_i]]) min_i += 1 result.extend(map(lambda i: values[i], max_indices[max_i:])) result.extend(map(lambda i: values[i], min_indices[min_i:])) return result
def get_first_mode(data, rare_prop=99, remove_tail=True): """ gets the first (reliable) peak in the histogram Args: data (np.ndarray): image data rare_prop (float): if remove_tail, use the proportion of hist above remove_tail (bool): remove rare portions of histogram (included to replicate the default behavior in the R version) Returns: first_peak (int): index of the first peak """ if remove_tail: rare_thresh = np.percentile(data, rare_prop) which_rare = data >= rare_thresh data = data[which_rare != 1] grid, pdf = smooth_hist(data) maxima = argrelmax(pdf)[ 0] # for some reason argrelmax returns a tuple, so [0] extracts value first_peak = grid[maxima[0]] return first_peak
def extr(x): """Extract the indices of the extrema and zero crossings. :param x: input signal :type x: array-like :return: indices of minima, maxima and zero crossings. :rtype: tuple :Example: >>> from __future__ import print_function >>> import numpy as np >>> x = np.array([0, -2, 0, 1, 3, 0.5, 0, -1, -1]) >>> indmin, indmax, indzer = extr(x) >>> print(indmin) [1] >>> print(indmax) [4] >>> print(indzer) [0 2 6] """ m = x.shape[0] x1 = x[:m - 1] x2 = x[1:m] indzer = np.where(x1 * x2 < 0)[0] if np.any(x == 0): iz = np.where(x == 0)[0] indz = [] if np.any(np.diff(iz) == 1): zer = x == 0 dz = np.diff(np.r_[0, zer, 0]) debz = np.where(dz == 1)[0] finz = np.where(dz == -1)[0] - 1 indz = np.round((debz + finz) / 2) else: indz = iz indzer = np.sort(np.hstack([indzer, indz])) indmax = argrelmax(x)[0] indmin = argrelmin(x)[0] return indmin, indmax, indzer
def find_max_peak(vector): """ Find the maximum peak of a 1D vector, using scipy argrelmax function. Parameters ---------- vector : np.array 1D array Returns ------- peak, index : float, np.array The maximum peak and its corresponding index in the input vector. Examples -------- # no peaks returns nan >>> a = np.concatenate((np.arange(5), np.arange(5)[::-1])) >>> find_max_peak(a) (nan, nan) >>> a = np.concatenate((np.arange(5), np.arange(6)[::-1])) >>> print(a) [0 1 2 3 4 5 4 3 2 1 0] >>> find_max_peak(a) (5, array([5])) >>> a = np.concatenate((np.arange(5), np.arange(6)[::-1], [5, 0])) >>> print(a) [0 1 2 3 4 5 4 3 2 1 0 5 0] >>> find_max_peak(a) (5, array([ 5, 11])) """ from scipy import signal assert np.ndim(vector) == 1 pksind, = signal.argrelmax(vector) if len(pksind) == 0: return np.nan, np.nan pk = vector[pksind].max() inds, = np.where(vector == pk) return pk, inds
def get_maxima(series, N, start_date=None, stop_date=None, _sorted=True): """ Summary: Function that determines the first N maxima of a pandas time series Arguments: series - pandas series N - number of maxima start_date - start date as DateTime - used to limit the search for the maxima stop_date - stop date as DateTime - used to limit the search for the maxima _sorted - tell to sort with respect to the highest values Returns: maxima - the maxima of the time series """ if start_date == None: start_date = series.index[0] if stop_date == None: stop_date = series.index[-1] ind_in_date_range = np.argwhere((series.index >= start_date) & (series.index <= stop_date)).flatten() # First find the local maxima all_maxima = signal.argrelmax(series.values[ind_in_date_range])[0] # all_maxima = argrelmax(series.values[ind_in_date_range]) if _sorted: # Then sort the minima ind = np.argsort(series.values[ind_in_date_range[all_maxima]]) nmaxima = min([N, len(ind)]) ind = all_maxima[ind[-nmaxima:]] maxima = series.iloc[ind_in_date_range[np.sort(ind)]].copy() else: nmaxima = min([N, len(all_maxima)]) maxima = series.iloc[ind_in_date_range[all_maxima[:nmaxima]]] return maxima
def get_rise_times(color_plot, x_color, y_color, collection_rate=1): """ This function reads in a tdms file and returns a list of rise times for the peaks in the same order as the x values from preprocessing.read_tdms. """ x_int = [int(i) for i in x_color] y_int = [int(i) for i in y_color] # Take derivatives of IvsTs peak_and_times = [] for idx, i in enumerate(y_int): ivst = color_plot[i] dy_1 = diff(ivst) / collection_rate dy_2 = diff(dy_1) / collection_rate peak = (x_int[idx], argrelmax(dy_2)[0]) peak_and_times.append(peak) peak_with_time = [] for i in peak_and_times: greater = [] smaller = [] equal = [] for idx, time in enumerate(i[1]): if time < i[0]: smaller.append(time) #if time == i[0]: equal.append(time) if time > i[0]: greater.append(time) if len(smaller) != 0: start_time = smaller[-1] if len(smaller) == 0: start_time = equal[0] peak_with_time.append((i[0], start_time)) rise_times = [i[0] - i[1] for i in peak_with_time] rise_times = [i / 10 for i in rise_times] return np.absolute(rise_times)
def getTresholdByReExtrem(gray, myorder=10): """ get thr by relative extrem-value, :param gray: a gray scale img data :param myorder: the order means step of thr :return: int a better thr """ hist = np.bincount(gray.ravel(), minlength=256) # performence:0.003163 s # Returns the indices of the maximum values along an axis. # max_exts perf: 9.60826873779e-05, min_exts perf: 0.000133991241455, max_idx: 1.09672546387e-05 max_idx = np.argmax(hist, axis=0) # calculate relative extrem value min_exts = argrelmin(hist, order=myorder)[0] max_exts = argrelmax(hist, order=myorder)[0] if myorder < 2: return np.mean(gray) elif min_exts.size < 1 or max_exts.size < 2: myorder -= 1 other_thr = getTresholdByReExtrem(gray, myorder=myorder) return other_thr # for black background if max_idx < 128 and max_idx == max_exts[0]: min_ext_1 = min_exts[0] # 背景的 max_ext_0 = max_exts[0] max_ext_1 = max_exts[1] if max_ext_1 - max_ext_0 > 50: myorder -= 1 other_thr = getTresholdByReExtrem(gray, myorder=myorder) return other_thr elif max_exts[0] < min_ext_1 < max_exts[1]: return min_ext_1 else: myorder -= 1 other_thr = getTresholdByReExtrem(gray, myorder=myorder) return other_thr elif max_idx < 128 and max_idx != max_exts[0]: myorder -= 1 other_thr = getTresholdByReExtrem(gray, myorder=myorder) return other_thr else: return np.mean(gray)
def lifeSED(self, k, na, iqp, pya): q = self.calculateSED(k, na) #return x = self.freq nbranch = 3 * na w = [pya.frequency(iqp, ibr) for ibr in range(nbranch)] w1 = [] df = 1.0 / (2.0 * self.timestep) / (self.totalStep / 2) span = int(1.0 / df) for i in range(nbranch): node = '/%s/%s' % (iqp, i) print node # get the 1Hz neighbor of the original w,because dw couldn't be too large ori = int(w[i] / df) low = max(ori - 1 * span, 0) hi = min(ori + 1 * span, len(q) - 1) filter = range(low, hi) qu = q[filter] xu = x[filter] #qs=self.lowess(xu,qu) if False: from scipy import signal peaks, = signal.argrelmax(qs, order=span / 3) center = peaks[np.abs(xu[peaks] - w[i]).argmin()] p = self.fitpart(xu, qu) w1.append(p) #q[filter]=0.0 w1 = np.array(w1) #o=w1[:,0].argsort() #w1=w1[o] v = [ '\t'.join(map(str, list(k) + [w[i], p[0], p[1], 1.0 / p[1]])) for i, p in enumerate(w1) ] c = '\n'.join(v) print "[sed]", c return c
def pick_onsets_dynT(F, threshold=0.15, N=10, w=3.5): """ Peak picking with a dynamic threshold :param F: ODF :param delta: float, constant modifier for threshold :param N: int, number of frames the mean and median are calculated from :return: numpy array, peack indices """ # Indices of local maxima in F localMaximaInd = argrelmax(F, order=1) # Values of local maxima in F localMaxima = F[localMaximaInd[0]] # Pick local maxima greater than threshold threshold_dyn = np.full((F.shape), threshold) #mova=movingAverage(F,N) for i in range(N, F.shape[0]): threshold_dyn[i] = threshold - 0.2 + .25 * np.median( F[i - N:i]) + .25 * np.mean(F[i - N:i]) #threshold_dyn[i] = threshold - 0.2 + .5 * mova[i] # showEnvelope(threshold) # showEnvelope(F) # threshold=thresh*np.median(localMaxima)+delta onsets = [] onsets = localMaxima >= threshold_dyn[localMaximaInd[0]] # showEnvelope([F, threshold_dyn]) rets = localMaximaInd[0][onsets] # remove peak if detFunc has not been under threshold. i = 0 while i in range(len(rets) - 1): # Check that the ODF goes under the threshold between onsets if F[rets[i]:rets[i + 1]].min() >= threshold: rets = np.delete(rets, i + 1) # Check that two onsets are not too close to each other elif rets[i] - rets[i + 1] > -w: rets = np.delete(rets, i + 1) else: i += 1 # Return onset indices return rets
def GetAveMaxima(self, img): ''' 入力された画像の極大値の平均を求める args : img -> uint,1ch dst : AveLocalMax -> 極大値の平均 ''' '''maxima = np.array(signal.argrelmax(IntensityImg))''' maxima = np.array(signal.argrelmax(img)) TotalMaxima = 1 for i in range(len(maxima[1])): u = maxima[0, i] v = maxima[1, i] Maxima = img[u, v] TotalMaxima += Maxima if len(maxima[1]) == 0: AveMaxima = 0 else: AveMaxima = TotalMaxima / len(maxima[1]) return AveMaxima
def sifting2_argrel(t, x): t_up = argrelmax(x) t_up = t_up[0] x_up = x[t_up] t_down = argrelmin(x) t_down = t_down[0] x_down = x[t_down] extrema_x = len(x_up) + len(x_down) tck = interpolate.splrep(t_up, x_up) x_up = interpolate.splev(t, tck) tck = interpolate.splrep(t_down, x_down) x_down = interpolate.splev(t, tck) x_mean = (x_up + x_down)/2 h = x - x_mean return h, extrema_x
def baseline_like_detect(x, times, threshold=1, min_threshold=1): #x = 1-np.exp(-x) potential_boundaries = argrelmax(x)[0] boundaries = [] mean = np.mean(x[potential_boundaries]) for i, pb in enumerate(potential_boundaries): if pb == 0 or pb == len(x): boundaries.append(pb) continue if x[pb] < min_threshold*mean: continue if not check_valleys(x, pb, threshold): continue # j=upper_valley(pb,valleys) # if j>0 and valleys[j]>pb and valleys[j-1]<pb: # if pb-valleys[j] < valley_threshold or pb-valleys[j-1] < valley_threshold: # continue boundaries.append(pb) return times[boundaries]
def local_extrema(vec: np.array) -> Tuple[List[int], List[int]]: """ Takes a 1-d np array and return the indices of the local maximum and minimum, including boundary points """ maxInd = signal.argrelmax(vec)[0] minInd = signal.argrelmin(vec)[0] # TODO: Find a better way to do this because "argrelextrema" only looks for internal if vec[0] > vec[1]: maxInd = np.append(maxInd, 0) else: minInd = np.append(minInd, 0) if vec[-1] > vec[-2]: maxInd = np.append(maxInd, len(vec) - 1) else: minInd = np.append(minInd, len(vec) - 1) return maxInd, minInd
def create_w_coef_mask(w_coefs, order, epsilon=0.1, remove_inf=False): """ Create a new matrix, the same shape as the wavelet coefficient one, but with zeros everywhere except for local maxima's. Epsilon here is used for ranking the strength of the local maxima. Assumes that the coefficient matrix coming in is already in absolute terms :param w_coefs: wavelet coefficient matrix :param epsilon: divided against the maxima, used for transparent ranking :param order: how many neighboors on a given row to look at to determine maxima :return: same shape array, see above """ if remove_inf: w_coefs[w_coefs == np.inf] = 0.0 mask = np.zeros_like(w_coefs, dtype=int) for n, row in enumerate(w_coefs): maxs = signal.argrelmax(row, order=order)[0] mask[n, maxs] = row[maxs] / epsilon return mask
def noise_autoxcorr(fragment): xcoefs = [] if np.mean(fragment) > np.median(fragment): # Positive peaks tp = signal.argrelmax(fragment, order=50)[0] pc = np.percentile(fragment, range(50, 100)) pkthres = pc[np.argmax(np.diff(pc)) + 1] tp = tp[fragment[tp] > pkthres] else: # Negative peaks tp = signal.argrelmin(fragment, order=50)[0] pc = np.percentile(fragment, range(1, 51)) pkthres = pc[np.argmax(np.diff(pc)) + 1] tp = tp[fragment[tp] < pkthres] if len(tp) < 2: return 0.0 for i in range(1, len(tp)): delay = tp[i] - tp[0] tr1, tr2 = fragment[:-delay], fragment[delay:] xcoefs.append((np.correlate(tr1, tr2) / np.sqrt(np.dot(tr1, tr1) * np.dot(tr2, tr2)))[0]) return np.median(xcoefs)
def get_damp_from_decay(decay): """Calculates the damping of a decaying signal from its logarithmic decrement, obtained by least squares.""" peak_ind = np.array([m for m in signal.argrelmax(abs(decay), order=1) ]).flatten() log_peak_ratios = np.zeros(len(peak_ind) - 1) for i in range(len(peak_ind) - 1): log_peak_ratios[i] = 2 * np.log( abs(decay[peak_ind[0]] / decay[peak_ind[i + 1]])) peak_nums = np.linspace(1, len(log_peak_ratios), len(log_peak_ratios)) A = np.vstack([peak_nums, np.ones(len(peak_nums))]).T b = log_peak_ratios m, c = np.linalg.lstsq(A, b, rcond=None)[0] resid = np.linalg.lstsq(A, b, rcond=None)[1][0] R2 = 1 - resid / (b.size * b.var()) damp = m / np.sqrt(m**2 + 4 * np.pi**2) return damp, R2, A, b, c, m
def call_peaks(sigvals, min_signal=0, sep=120, boundary=None, order=1): """Greedy algorithm for peak calling-- first call all local maxima, then call greatest maxima as a peak, then next greatest that isn't within 'sep' distance of that peak, and so on""" if sum(np.isnan(sigvals)) > 0: if sum(np.isnan(sigvals)) == len(sigvals): return np.array([]) else: replace = min(sigvals[~np.isnan(sigvals)]) sigvals[np.isnan(sigvals)] = replace if boundary is None: boundary = sep / 2 random = np.random.RandomState(seed=25) l = len(sigvals) peaks = signal.argrelmax(sigvals * (1 + random.uniform(0, 10**-12, l)), order=order)[0] peaks = peaks[sigvals[peaks] >= min_signal] peaks = peaks[peaks >= boundary] peaks = peaks[peaks < (l - boundary)] sig = sigvals[peaks] return reduce_peaks(peaks, sig, sep)
def fft_peak(data, s=0, e=24 * 3, dt=60, pdf_plot=False): """fftによる周期推定.範囲は両端を含む Args: data: numpy s: [time (h)] (default: {0}) e: [time (h)] (default: {24 * 3}) dt: [description] (default: {60}) pdf_plot: [description] (default: {False}) Returns: [description] [type] """ dt_h = dt / 60 data = data[int(s / dt_h):int(e / dt_h) + 1] # FFTに使うデータだけ. n = data.shape[0] time = np.arange(s, e + dt_h, dt_h) time = time - s # f = 1/dt_h*np.arange(int(n/2))/n # 1時間あたりの頻度 f = np.linspace(0, 1.0 / dt_h, n) # FFTアルゴリズム(CT)で一次元のn点離散フーリエ変換(DFT) fft_data = np.fft.fft(data, n=None, axis=0) # axisは要検討 # norm="ortho"で正規化.よくわからん P2 = np.abs(fft_data) / n # 振幅を合わせる. P1 = P2[0:int(n / 2)] # サンプリング頻度の半分しか有効じゃない P1[1:-1] = 2 * P1[1:-1] # 交流成分を二倍.rのampspecとほぼ同じ P1[0] = 0 # https://jp.mathworks.com/help/matlab/ref/fft.html fft_point = signal.argrelmax(P1, order=1, axis=0) # peakの場所 fft_df = pd.DataFrame(index=[], columns=["sample", "amp", "f", "pha"]) fft_df["sample"] = fft_point[1] fft_df["amp"] = P1[fft_point] fft_df["f"] = f[fft_point[0]] # fft_df['per'] = np.mod(np.angle(fft_data)[fft_point]+2*np.pi, 2*np.pi) # 複素数なので位相が出る fft_df["pha"] = np.angle(fft_data)[fft_point] # 複素数なので位相が出る fft_df = fft_df.sort_values(by=["sample", "amp"], ascending=[True, False]) return fft_df, time, data
def pitch(data, Fs, method=None, outImage=False): if data.ndim != 1: raise TypeError('data must be 1d numpy array') # int array -> float array data = data.astype('float32') if method == 'fft': return pitch_f(data, Fs) N = data.shape[0] # Normalized Autocorrelation(v) r0 = np.dot(data, data) shift = np.copy(data) v = np.zeros(N, dtype='float32') v[0] = 1.0 for i in range(1, N): shift = np.roll(shift, N - 1) shift[N - 1] = 0.0 v[i] = np.dot(data, shift) / r0 # output graph of v if outImage: kaxis = np.arange(0.0, 1.0 * N / Fs, 1.0 / Fs) plt.title('autocorrelation') plt.xlabel('time delay[sec]') plt.plot(kaxis, v) plt.savefig('../data/img/autocorrelation.png') # detect peek peeks = sig.argrelmax(v) maxpeek = 0.0 delay = 0.0 for peek in peeks[0]: if maxpeek < v[peek]: maxpeek = v[peek] delay = float(peek) / Fs if delay == 0: delay = -1 F0 = 1.0 / delay return F0
def doFFT(np_vector, num, category, fps, frame_num, savePath): import numpy as np import matplotlib.pyplot as plt from scipy import signal # 必要な変数の準備 dt = 1.0 / fps N = len(np_vector) t = np.arange(0, N * dt, dt) fq = np.linspace(0, 1.0 / dt, N) F = np.fft.fft(np_vector) F_abs = np.abs(F) F_abs = F_abs / (N / 2) F_abs[0] = 0 plt.figure(figsize=(20, 6)) maximal_idx = signal.argrelmax(F_abs, order=1)[0] peak_cut = 0.011 maximal_idx = maximal_idx[(F_abs[maximal_idx] > peak_cut) & (maximal_idx <= N / 2)] plt.subplot(122) plt.xlabel('Frequency(Hz)') plt.ylabel('Amplitude') plt.axis([0, 1.0 / dt / 2, 0, max(F_abs) * 1.5]) #plt.ylim(0, 0.1) plt.plot(fq, F_abs) #plt.plot(fq[maximal_idx], F_abs[maximal_idx],'ro') plt.subplot(121) plt.plot([i for i in range(len(np_vector))], np_vector) plt.title('cat' + str(category + 1)) plt.savefig(savePath + '/cat' + str(category + 1) + '/toFirstFFT/' + str(num)) plt.close()
def _inflectionpoints(self, xdata, verb=False): """ find inflection points in lineprofile """ x_smooth = _smooth(xdata, window_len=self._smoothing) dx = np.gradient(x_smooth) dx_smooth = _smooth(dx, window_len=self._smoothing) wall1 = self._wall1 + self._wallpos wall2 = self._wall2 + self._wallpos infmax = np.argmax(dx_smooth[wall1:wall2]) + wall1 - 1 (localmaxs, ) = argrelmax(dx_smooth[wall1:wall2], order=self._searchrange) if verb: print('... smoothing: %i, order: %i' % (self._smoothing, self._searchrange)) print('... find %i local maximums' % len(localmaxs)) # check wall if np.abs(infmax - wall2) < 0.1 * self._arrayWidth: # 10% of array width infmax = _find_before(localmaxs + wall1, infmax) return (localmaxs + wall1, infmax)
def extract_lines_wlen(self, wlen, flux, boxwid=3, shallow=0.01, border=20, pctile=False): xpix = 1 + np.arange(wlen.size) smooth_flux = self._make_smooth_flux(xpix, flux, boxwid=boxwid, pctile=pctile) maxima_idx = ssig.argrelmax(smooth_flux, order=3)[0] peak_fluxes = np.array([smooth_flux[x] for x in maxima_idx]) highest_flx = np.max(peak_fluxes) #sys.stderr.write("Peaks found: %6d\n" % len(peak_fluxes)) #sys.stderr.write("highest_flx: %10.3f\n" % highest_flx) flx_cut = shallow * highest_flx #sys.stderr.write("flx_cut: %10.5f\n" % flx_cut) keepers = (peak_fluxes >= flx_cut).nonzero()[0] #sys.stderr.write("Uncut peaks: %6d\n" % keepers.size) peak_center_idx = maxima_idx[keepers] x_lo, x_hi = border, xpix.max() - border safe_edge = (x_lo < peak_center_idx) & (peak_center_idx < x_hi) keep_center_idx = peak_center_idx[safe_edge] #sys.stderr.write("keep_center_idx: %s\n" % str(keep_center_idx)) ctr_wlen = self._calc_centroids(wlen, flux, keep_center_idx, boxwid) ctr_flux = flux[keep_center_idx] return (ctr_wlen, ctr_flux)
def imf_create(t,data): mins = signal.argrelmin(data)[0] mins_ = [float(data*60/8064) for data in mins] maxs = signal.argrelmax(data)[0] maxs_ = [float(data*60/8064) for data in maxs] #extrema = np.concatenate((mins, maxs)) spl_min = interpolate.CubicSpline(mins_, data[mins])#, bc_type = 'natural') #clamped #l_env = spl_min(t) spl_max = interpolate.CubicSpline(maxs_, data[maxs])#, bc_type = 'natural')#clamped #u_env = spl_max(t) mid = (spl_max(t)+spl_min(t))/2 #plt.figure() #plt.plot(t,data) #plt.plot(t,l_env,'-') #plt.plot(t,u_env,'-') #plt.plot(t, mid, '--') #plt.title('Plottings') #plt.show() return data-mid
def find_peaks(chi, sides=6, intensity_threshold=0): # Find all potential peaks preliminary_peaks = argrelmax(chi, order=20)[0] # peaks must have at least sides pixels of data to work with preliminary_peaks2 = preliminary_peaks[np.where( preliminary_peaks < len(chi) - sides)] # make certain that a peak has a drop off which causes the peak height to # be more than twice the height at sides pixels away criteria = chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 + sides] criteria *= chi[preliminary_peaks2] >= 2 * chi[preliminary_peaks2 - sides] criteria *= chi[preliminary_peaks2] >= intensity_threshold peaks = preliminary_peaks[np.where(criteria)] left_idxs = peaks - sides right_idxs = peaks + sides peak_centers = peaks left_idxs[left_idxs < 0] = 0 right_idxs[right_idxs > len(chi)] = len(chi) return left_idxs, right_idxs, peak_centers
def find_extrema(X, ret_min=False): """ Identify extrema within a time-course and reject extrema whose magnitude is below a set threshold. Parameters ---------- X : ndarray Input signal ret_min : bool Flag to indicate whether maxima (False) or minima (True) should be identified(Default value = False) Returns ------- locs : ndarray Location of extrema in samples extrema : ndarray Value of each extrema """ if ret_min: ind = signal.argrelmin(X, order=1)[0] else: ind = signal.argrelmax(X, order=1)[0] # Only keep peaks with magnitude above machine precision if len(ind) / X.shape[0] > 1e-3: good_inds = ~(np.isclose(X[ind], X[ind - 1]) * np.isclose(X[ind], X[ind + 1])) ind = ind[good_inds] # if ind[0] == 0: # ind = ind[1:] # if ind[-1] == X.shape[0]: # ind = ind[:-2] return ind, X[ind]
def getWave(data, waveLengthList, stftX, order=11, rate=0.5): #FT ft = stft(data, stftX) ftAbsolute = np.abs(ft) #plt.plot(ftAbsolute) #plt.show() #最初のピーク取り出し mean = max(ftAbsolute)*rate ids = signal.argrelmax(ftAbsolute, order=order) #極大 ids = ids[0] waveLength = -1 for id in ids: if ftAbsolute[id] > mean: waveLength = id #エラー処理 if waveLength == -1: return (0,0) #位相計算 phase = (np.angle(ft[waveLength])/(np.pi*2) + 1)%1-0.5 #結果 start = int(0-phase*waveLengthList[waveLength]) return (start,waveLengthList[waveLength])
def filter_noise(df, samples_per_sec): """ :param df: Full Dataframe :param samples_per_sec: Samples per second in dataframe :return: Gets only the relevant 10 seconds """ peaks = argrelmax(np.array(df.y))[0] df['is_peak'] = 0 for i in range(0, len(peaks)): if df.y[peaks[i]] > df.y[peaks].mean(): df.loc[peaks[i], 'is_peak'] = 1 index = return_end_of_walking_index(df, samples_per_sec * 10, 130, 0, 0, 501) df_filtered = df[int(index - (samples_per_sec * NUM_OF_SEC_BEFORE) ):int(index + samples_per_sec * NUM_OF_SEC_AFTER)] return df_filtered
def profile(p, thresh=-0.7, upper=1.5, smooth=20, peaksize=5): dam = 0 if p.size < smooth: smooth = p.size smooth_only = smoothg(p, smooth) # smooth p = smooth_only # if whole luminance is high (eg, on a vessel) if p.min() >= upper: dam += 1 # count high peaks only once t = p > upper dam += np.diff(t).nonzero()[0][::2].size p[p > upper] = upper p[p < thresh] = thresh # elim small peaks # find number of peaks (ie, local maxima) dam += argrelmax(p, order=peaksize)[0].size return dam, p, smooth_only
def getSpecStrength2(self, refHR, tolHR=6, fmt=None): ''' compute the spectral amplitude of the signal around the reference point (5bpm diff). If there is no local peak in the area of interest, return invalid. :param ref: array [nx1] reference point (bmp). :param fmt: format for screen print out :return: + pk: nPk x 4 array; each row contains [ref point, minDis, pk, pkVal] o ref point(bpm: o minDis(bpm): distance from the spectral peak to ref point o pk(bpm): location of the spectral peak o pkVal(dB): spectral amplitude of the spectral peak + strOut ''' pk = [] strOut = '' i = -1 for iref in refHR: i += 1 bpmRange, bpmBin = self.freq2bpmRange(iref - tolHR, iref + tolHR) Px = self.X[bpmBin] mxIdx = signal.argrelmax(Px)[0] # location of all local peaks if len(mxIdx) > 0: # if there are local peaks mnDis = tolHR mnDisIdx = 100 for idxTmp in mxIdx: # get the local peak that closest to trueHR dis = abs(bpmRange[idxTmp] - iref) if dis < mnDis: mnDis = dis mnDisIdx = idxTmp pk.append([ iref, mnDis, bpmRange[mnDisIdx], 20 * np.log10(Px[mnDisIdx]) ]) #todo handle edge -> assuming argrelmax does not consider edge as pk else: # no local pks pk.append([iref, 999, 0, 999]) # refHR, dis, HR, Px strOut += fmt % (pk[i][0], pk[i][1], pk[i][2], pk[i][3]) + '\n' return pk, strOut
def get_hist_peaks(data, smooth=True, sigma=1, order=5, debug=False): hist, bins = skiexp.histogram(data) hist[0] = hist[1:].max() if debug: hist_o = hist.copy() if smooth: hist = scindi.filters.gaussian_filter1d(hist, sigma=sigma) peaks_idx = scisig.argrelmax(hist, order=order)[0] peaks = bins[peaks_idx] if debug: plt.figure() plt.bar(bins, hist_o) plt.plot(bins, hist, 'g') for p in peaks: plt.plot(p, hist[p], 'ro') return peaks