def makescore(temp,dims): score = zeros(dims) + 1 lonax = greater(dims[1],dims[0]) londim= dims[lonax] sdim = dims[1-lonax] ldiag = arange(0,londim,1,int) sdiag = array(floor(ldiag * (float(sdim)/londim)),int) dvals = [ldiag, sdiag] if lonax:dvals = dvals[::-1] score[dvals] = 20.*temp ** .5 if lonax: score = score.T #for i in range(len(score)): # score[i][i] = 20.* temp **.5 if temp > .1: g = ss.gaussian((londim/2)*temp**2,(londim/2)*temp**2)[:,newaxis]*\ ss.gaussian((sdim/2)*temp**2,(sdim/2)*temp**2)[newaxis,:] g/= sum(g) score = ss.convolve2d(score,g,'same','wrap') for i in range(1,len(score)): score[i,arange(sdiag[i])] *= .25 if lonax: score = score.T return score
def vsumsmooth_offset(x, w, center, csum, delta, offset,spacing = 0.01): exact=vsumexact_offset(x,w,center,csum,offset) if np.shape(x)==(): gaussian = signal.gaussian(10*delta/spacing,delta/spacing)/(delta/spacing*np.sqrt(2*np.pi)) else: gaussian = signal.gaussian(10*delta/(x.values[1]-x[0]),delta/((x.values[1]-x[0])))/(delta/(x.values[1]-x[0])*np.sqrt(2*np.pi)) return signal.fftconvolve(exact,gaussian,mode='same')
def vdifsmooth(x, w, center, cdif, delta,spacing=0.01): exact=vdifexact(x,w,center,cdif) if np.shape(x) == (): gaussian = signal.gaussian(10*delta/spacing,delta/spacing)/(delta/spacing*np.sqrt(2*np.pi)) else: gaussian = signal.gaussian(10*delta/(x.values[1]-x[0]),delta/((x.values[1]-x[0])))/(delta/(x.values[1]-x[0])*np.sqrt(2*np.pi)) return signal.fftconvolve(exact,gaussian,mode='same')
def fftgauss(img,sigma): """https://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.fftconvolve.html""" kernel = np.outer(signal.gaussian(img.shape[0], sigma), signal.gaussian(img.shape[1],sigma)) return signal.fftconvolve(img, kernel, mode='same')
def addGaussians(query_fn,cand_list,tree,K): songs_list = getNeighbors(query_fn,cand_list,tree,K) M = 23000 query_ann = getAnnotationList(gt_path,[query_fn]) query_labels = [elem[-1] for elem in query_ann[0]] query_ann = np.floor((np.array(getAnnotation(query_ann))*1000)).astype(int) length = query_ann[-1] total = np.zeros(int(np.ceil(length))) neighbors_annotations_rescaled = [] neighbors_annotations = getAnnotationList(gt_path,songs_list) for i, song in enumerate(songs_list): gt_list = getAnnotationList(gt_path,[song]) ann = np.floor((np.array(getAnnotation(gt_list))*1000)).astype(int) #convert to miliseconds to mantain res neighbor_dur = ann[-1] ann_with_sides = ann ann = ann[1:-1] a = np.zeros(int(np.ceil(length))) r = float(length)/float(neighbor_dur) #rescale according to query duration ann = np.floor(ann*r) ann_with_sides = np.floor(ann_with_sides*r) labels = [x[-1] for x in gt_list[0]] # get the labels annotation_rescaled = [] for elem in neighbors_annotations[i]: label = elem[-1] #save the label so it doesnt get affected by rescaling elem[0] = int(np.floor(float(elem[0])*1000*r)) #rescale the rest elem[1] = int(np.floor(float(elem[1])*1000*r)) annotation_rescaled.append([elem[0],elem[1],label]) neighbors_annotations_rescaled.append(annotation_rescaled) for i, loc in enumerate(ann,1): section_length = ann_with_sides[i]-ann_with_sides[i-1] sigma = 0.1*section_length # M=int(np.floor(0.6*section_length)) g1 = signal.gaussian(M,std=sigma) half1 = int(np.floor(len(g1)/2)) section_length = ann_with_sides[i+1]-ann_with_sides[i] sigma = 0.1*section_length g2 = signal.gaussian(M,std=sigma) half2 = int(np.floor(len(g2)/2)) g = np.concatenate((g1[:half1],g2[half2:])) if loc < np.floor(M/2): a += np.array(np.concatenate((g[int(np.floor(M/2)-loc):],np.zeros(int(length-loc-np.floor(M/2)))))) elif loc + np.floor(M/2) > length: a += np.array(np.concatenate((np.zeros(int(loc-np.floor(M/2))),g[:int(length+np.floor(M/2)-loc)]))) else: a += np.array(np.concatenate((np.zeros(int(loc-np.floor(M/2))),g,np.zeros(int(length-loc-np.floor(M/2)))))) total += a total = total/float(max(total)) peaks = getPeaks(total,neighbors_annotations) all_songs_segmented = [segmentLabel(elem) for elem in neighbors_annotations_rescaled] res_boundaries = sorted(peaks) res_boundaries.insert(0,0) res_boundaries.append(length) res_labels = mergeLabels(res_boundaries,all_songs_segmented) res_annotations = formatAnnotation(res_boundaries,res_labels) return res_annotations
def _gaussian_window(self, width, sigma): """ Generates a gaussian window sigma is based on the dat being in a range 0 to 1 """ return (ssignal.gaussian(width[0], sigma*width[0]).reshape((-1,1,1)) * ssignal.gaussian(width[1], sigma*width[1]).reshape((-1,1)) * ssignal.gaussian(width[2], sigma*width[2]))
def heat_labels_gauss(click, img_size=IMG_SIZE, k_size=KERNEL_SIZE, label_size=LABEL_SIZE): # take list of pixel coordinates and return 70x70 heatmap img = np.zeros((img_size, img_size)) for j in range(click.shape[0]): x = img_size-1-click[j,1] y = click[j,0] img[x,y]=1 kernel = np.outer(signal.gaussian(img_size+1, k_size), signal.gaussian(img_size+1, k_size)) img = signal.convolve2d(img,kernel, mode='same') offset = (img_size-img_size/label_size*(label_size-1))/2 step = img_size/label_size return img[offset:(img_size-offset+step):step, offset:(img_size-offset+step):step]
def chutes_iniciais(n=2, size=1024, mu=None): """Retorna os n primeiros polinomios de legendre modulados por uma gaussiana. Params ------ n : int o numero de vetores size : int o tamanho dos vetores mu : float centro da gaussiana, entre 0 e 1 Returns ------- Um array com n arrays contendo os polinomios modulados """ sg = np.linspace(-1, 1, size) # short grid g = gaussian(size, std=int(size/100)) # gaussian if mu: sigma = np.ptp(sg)/100 g = (1.0/np.sqrt(2*np.pi*sigma**2))*np.exp(-(sg-mu)**2 / (2*sigma**2)) vls = [g*legendre(i)(sg) for i in range(n)] return np.array(vls, dtype=np.complex_)
def testGauss(x, y, s, npts): #b = gaussian(39, 10) b = gaussian(75, 15) ga = filters.convolve1d(y, b/b.sum()) plt.plot(x, ga) print "gaerr", ssqe(ga, s, npts) return ga
def __init__(self, audiofile=None, fs=22050, bandwidth=300,freqRange= 5000, dynamicRange=48, noiseFloor =-72, parent = None): super(Spec, self).__init__() backend = 'pyqt4' app = vv.use(backend) Figure = app.GetFigureClass() self.fig= Figure(self) self.fig.enableUserInteraction = True self.fig._widget.setMinimumSize(700,350) self.axes = vv.gca() self.audiofilename = audiofile self.freqRange = freqRange self.fs = fs self.NFFT = int(1.2982804/bandwidth*self.fs) self.overlap = int(self.NFFT/2) self.noiseFloor = noiseFloor self.dynamicRange = dynamicRange self.timeLength = 60 self.resize(700,250) layout = QtGui.QVBoxLayout() layout.addWidget(self.fig._widget) self.setLayout(layout) self.win = gaussian(self.NFFT,self.NFFT/6) self.show()
def gaussFil(signal, sr=1893.9393939393942, freq=50): """ Implements a lowpass Gaussian Filter over the signal with cutoff frequency freq. Works... Creates a gaussian window of the size of the cutoff frequency and convolves the signal to it. Parameters ---------- signal : 1d array Signal to filter sr : float Sampling rate of the siganl freq : float Frequency cutoff for the filter Returns ------- res : 1d array Filtered signal """ M = sr/freq std = M/2 ker = gaussian(M, std) res = np.convolve(signal, ker, mode='same') return res
def moving_average(series,sigma = 3,window_time = 39): #### Moving weighted gaussian average with window = 39 b = gaussian(window_time,sigma) average = filters.convolve1d(series,b/b.sum()) var = filters.convolve1d(np.power(series-average,2),b/b.sum()) return average,var
def lpc_formants(signal, sr, num_formants, max_freq, time_step, win_len, window_shape='gaussian'): output = {} new_sr = 2 * max_freq alpha = np.exp(-2 * np.pi * 50 * (1 / new_sr)) proc = lfilter([1., -alpha], 1, signal) if sr > new_sr: proc = librosa.resample(proc, sr, new_sr) nperseg = int(win_len * new_sr) nperstep = int(time_step * new_sr) if window_shape == 'gaussian': window = gaussian(nperseg + 2, 0.45 * (nperseg - 1) / 2)[1:nperseg + 1] else: window = np.hanning(nperseg + 2)[1:nperseg + 1] indices = np.arange(int(nperseg / 2), proc.shape[0] - int(nperseg / 2) + 1, nperstep) num_frames = len(indices) for i in range(num_frames): if nperseg % 2 != 0: X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2) + 1] else: X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2)] frqs, bw = process_frame(X, window, num_formants, new_sr) formants = [] for j, f in enumerate(frqs): if f < 50: continue if f > max_freq - 50: continue formants.append((np.asscalar(f), np.asscalar(bw[j]))) missing = num_formants - len(formants) if missing: formants += [(None, None)] * missing output[indices[i] / new_sr] = formants return output
def smooth_pdf(a, sd=None): """Get a smoothed pdf of an array of data for visualization Keyword arguments: sd -- S.D. of the gaussian kernel used to perform the smoothing (default is 1/20 of the data range) Return 2-row (x, pdf(x)) smoothed probability density estimate. """ from scipy.signal import gaussian, convolve from numpy import array, arange, cumsum, trapz, histogram, diff, r_, c_ if sd is None: sd = 0.05 * a.ptp() data = a.copy().flatten() # get 1D copy of array data nbins = len(data) > 1000 and len(data) or 1000 # num bins >~ O(len(data)) f, l = histogram(data, bins=nbins, normed=True) # fine pdf sd_bins = sd * (float(nbins) / a.ptp()) # convert sd to bin units kern_size = int(10*sd_bins) # sufficient convolution kernel size g = gaussian(kern_size, sd_bins) # generate smoothing kernel c = cumsum(f, dtype='d') # raw fine-grained cdf cext = r_[array((0,)*(2*kern_size), 'd'), c, array((c[-1],)*(2*kern_size), 'd')] # wrap data to kill boundary effect cs = convolve(cext, g, mode='same') # smooth the extended cdf ps = diff(cs) # differentiate smooth cdf to get smooth pdf dl = l[1]-l[0] # get bin delta l = r_[arange(l[0]-kern_size*dl, l[0], dl), l, arange(l[-1]+dl, l[-1]+kern_size*dl, dl)] # pad index to match bounds ps = ps[kern_size:kern_size+len(l)] # crop pdf to same length as index ps /= trapz(ps, x=l) # normalize pdf integral to unity return c_[l, ps].T # return 2-row concatenation of x and pdf(x)
def gbm(R,sigma): # gaussian_blur_matrix # R 为模糊半径 # sigma 为标准差 temp1=signal.gaussian(2*R+1,sigma) temp2=np.sum(temp1) #用于归一化,使2R+1矩阵的和为1 return temp1/temp2
def make_gaussian(k, std): '''Create a gaussian kernel. Input: k - the radius of the kernel. std - the standard deviation of the kernel. Output: output - a numpy array of shape (2k+1, 2k+1) and dtype float. If gaussian_1d is a gaussian filter of length 2k+1 in one dimension, kernel[i,j] should be filled with the product of gaussian_1d[i] and gaussian_1d[j]. Once all the points are filled, the kernel should be scaled so that the sum of all cells is equal to one.''' kernel = None # Insert your code here.---------------------------------------------------- size = 2 * k + 1 gaussian1d = signal.gaussian(size, std) gaussian2d = np.ndarray((size, 1), buffer=gaussian1d) * np.ndarray((1, size), buffer=gaussian1d) kernel = gaussian2d / np.sum(gaussian2d) #--------------------------------------------------------------------------- return kernel
def make_gaussian(k, std): '''Create a gaussian kernel. Input: k - the radius of the kernel. std - the standard deviation of the kernel. Output: output - a numpy array of shape (2k+1, 2k+1) and dtype float. If gaussian_1d is a gaussian filter of length 2k+1 in one dimension, kernel[i,j] should be filled with the product of gaussian_1d[i] and gaussian_1d[j]. Once all the points are filled, the kernel should be scaled so that the sum of all cells is equal to one.''' kernel = np.zeros((2*k+1, 2*k+1), dtype = float) gaussian1d = signal.gaussian(2*k+1, std) for i in range(0,kernel.shape[0]): for j in range(0,kernel.shape[1]): kernel[i,j] = gaussian1d[i]*gaussian1d[j] kernel = kernel/kernel.sum() # Insert your code here.---------------------------------------------------- #--------------------------------------------------------------------------- return kernel
def smooth_color_prior(size=64, sigma=5, do_plot=False): prior_prob = np.load(os.path.join(data_dir, "CelebA_%s_prior_prob.npy" % size)) # add an epsilon to prior prob to avoid 0 vakues and possible NaN prior_prob += 1E-3 * np.min(prior_prob) # renormalize prior_prob = prior_prob / (1.0 * np.sum(prior_prob)) # Smooth with gaussian f = interp1d(np.arange(prior_prob.shape[0]),prior_prob) xx = np.linspace(0,prior_prob.shape[0] - 1, 1000) yy = f(xx) window = gaussian(2000, sigma) # 2000 pts in the window, sigma=5 smoothed = convolve(yy, window / window.sum(), mode='same') fout = interp1d(xx,smoothed) prior_prob_smoothed = np.array([fout(i) for i in range(prior_prob.shape[0])]) prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed) # Save file_name = os.path.join(data_dir, "CelebA_%s_prior_prob_smoothed.npy" % size) np.save(file_name, prior_prob_smoothed) if do_plot: plt.plot(prior_prob) plt.plot(prior_prob_smoothed, "g--") plt.plot(xx, smoothed, "r-") plt.yscale("log") plt.show()
def simulSpectra(self,fileSpectra, nBin, continuumLevel, sigma, linePositionChan, lineWidth, lineIntensity, startFreq = 0., resFreq =1.): """ Save in fileSpectra of nBin channels with the linePosition, lineWidth and lineIntensity list starFreq and resFreq are optional""" freq = np.arange(nBin) spectra = np.random.normal(continuumLevel, sigma, nBin) index = 0 for pos in linePositionChan: nChan = 4 * int(lineWidth[index] / resFreq) spec = lineIntensity[index] * signal.gaussian(nChan,lineWidth[index]) startPos = pos - nChan / 4 spectra[pos:pos+nChan] = spectra[pos:pos+nChan] + spec index += 1 f = open(fileSpectra,"w") index = 0 for frequency in freq : strOut = "%f %f \n"%(frequency, spectra[index]) f.write(strOut) index += 1 f.close()
def process_dir(sDir, iResample=None, iSmooth = 50, iSigmaSecs=0.01): """ take input dir and output smoothed, correlated array iSigmaSecs: standard deviation of gaussian in seconds iSmooth = smoothing window size for linear smoother """ from scipy import signal import numpy as np iSampleRate, aTime, aOrigAudio = audio2array(sDir, iResample) #only positive aAudio = [abs(i) for i in aOrigAudio] #audio files must be right format aOrigAudio = np.asarray(aOrigAudio, dtype=np.int16) if not iSmooth == None: #smooth aAudio = smooth(aAudio, iSmooth) #standard deviation for gaussian function iSigma = float(iSigmaSecs * iSampleRate) aGaussian = signal.gaussian(10*iSigma, iSigma) #gaussian correlated with audio signal aCorr = np.correlate(aAudio, aGaussian, 'same') return iSampleRate, aTime, aAudio, aCorr, aOrigAudio
def instr_model(teff, logg, z, lam_0, lam_1, res, observed_spectrum): kernel = gaussian(int(5*res), res) # Apply wavelength correction just to red wavelengths: corrected_wavelengths = observed_spectrum.wavelength.copy() corrected_wavelengths[corrected_wavelengths > 5000*u.Angstrom] -= lam_0 * u.Angstrom corrected_wavelengths[corrected_wavelengths <= 5000*u.Angstrom] -= lam_1 * u.Angstrom combined_spectrum = model_grid.spectrum(teff, logg, z, wavelengths=corrected_wavelengths.value) combined_spectrum.convolve(kernel=kernel) A = np.vstack([combined_spectrum.flux, corrected_wavelengths.value]).T combined_scaled = combined_spectrum.flux.copy() residuals = 0 for i_min, i_max in observed_spectrum.wavelength_splits: c, residuals_i = np.linalg.lstsq(A[i_min:i_max, :], observed_spectrum.flux[i_min:i_max, np.newaxis])[0:2] residuals += residuals_i combined_scaled[i_min:i_max] = (c[0] * combined_spectrum.flux[i_min:i_max] + c[1] * corrected_wavelengths[i_min:i_max].value) return combined_scaled, residuals
def smooth(self, stddev=100, tol=0.01): """ Smoothes each spectrum by applying a Gaussian filter. Note that stddev is given in terms of buckets, even though the mz_axis has a log scale. Smoothed values above tol are kept. """ from scipy import signal, stats mz_len = len(self.mz_axis) # apply monkeypatch to speed up fftconvolve() import pyfftw signal.signaltools.fftn = pyfftw.interfaces.scipy_fftpack.fftn signal.signaltools.ifftn = pyfftw.interfaces.scipy_fftpack.ifftn # truncate kernel at 4 sigma kernel = signal.gaussian(8 * stddev, stddev) def smooth_one(spectrum): x, y, t, ions = spectrum values = np.zeros((mz_len,)) for bucket, mz, intensity in ions: values[bucket] = intensity values = signal.fftconvolve(values, kernel, mode='same') assert (values >= -1e-4).all() def make_ions(): for bucket in np.flatnonzero(values >= tol): yield (bucket, self.mz_axis[bucket], values[bucket]) return (x, y, t, list(make_ions())) smoothed_spectra = self.spectra.map(smooth_one) return MSIDataset(self.mz_range, smoothed_spectra, self._shape, self.mask)
def __init__(self, sigma, texture, resolution=None): sigma = float(sigma) if resolution is None: resolution = sigma / 10.0 max_length = 100 texture = instantiate_spec(texture) num_cells = max_length / resolution cell_coord = np.linspace(0, max_length, num_cells) unsmoothed = texture(cell_coord) kernel_size = int(2 * (sigma * 3) / resolution) #print('resol: %s' % resolution) #print('sigma: %s' % sigma) #print('kernel_size: %s' % kernel_size) #print kernel kernel = gaussian(kernel_size, sigma / resolution) kernel = kernel / kernel.sum() assert kernel.size == kernel_size assert_allclose(kernel.sum(), 1) smoothed = convolve(unsmoothed, kernel, 'same') assert smoothed.size == cell_coord.size SampledTexture.__init__(self, smoothed, resolution)
def gaussSmooth(vals, num): b = gaussian(num, 1) smoothVals = filters.convolve1d(vals, b/b.sum()) return smoothVals # if __name__ == '__main__': # singleRunPlot("writefile.txt", 70, 80, 75)
def testGauss(x, y, s, npts): b = gaussian(39, 10) #ga = filtfilt(b/b.sum(), [1.0], y) ga = filters.convolve1d(y, b/b.sum()) plt.plot(x, ga) print "gaerr", ssqe(ga, s, npts) return ga
def spectrogram_scores(pattern_chunk, chan_sound, candidates): s_f = chan_sound.s_f n_window = 256 n_overlap = 192 sigma = 1. / 1000. * s_f spectrogram_kwargs = {'nperseg': n_window, 'noverlap': n_overlap, 'window': sg.gaussian(n_window, sigma), 'scaling': 'spectrum'} pattern_spectrogram = spectrogram(pattern_chunk.data[:, 0], s_f, **spectrogram_kwargs) logger.info('Getting spectrogram difference score for {} candidates'.format(len(candidates.index))) for (i, start) in enumerate(candidates['start'][:]): logger.debug('Start {0}: {1}'.format(i, start)) motif_start = start series = chan_sound.get_chunk(motif_start, motif_start + pattern_chunk.samples) # f, t, sxx = spectrogram(bandpass_filter(series[:, 0], s_f), s_f, **spectrogram_kwargs) candidates.set_value(i, 'spectral_diff', spectrogram_diff(series[:, 0], pattern_spectrogram[2], s_f, spectrogram_kwargs) )
def smooth(signal, std=2): "Smooths a 1D signal" from scipy.signal import gaussian smoothingKernel = gaussian(std*5,std) smoothingKernel /= np.sum(smoothingKernel) signal = np.convolve(signal, smoothingKernel, 'same') return signal
def make_gaussian(k, std): '''Create a gaussian kernel. Input: k - the radius of the kernel. std - the standard deviation of the kernel. Output: output - a numpy array of shape (2k+1, 2k+1) and dtype float. If gaussian_1d is a gaussian filter of length 2k+1 in one dimension, kernel[i,j] should be filled with the product of gaussian_1d[i] and gaussian_1d[j]. Once all the points are filled, the kernel should be scaled so that the sum of all cells is equal to one.''' kernel = None # Insert your code here.---------------------------------------------------- l = 2 * k + 1 kernel = np.zeros(l * l) kernel.shape = (l, l) from scipy import signal gaussian1d = signal.gaussian(l, std) for i in xrange(l): for j in xrange(l): kernel[i, j] = gaussian1d[i] * gaussian1d[j] s = np.sum(kernel) kernel = np.divide(kernel, s) #--------------------------------------------------------------------------- return kernel
def compute_gaussian_krnl(M): """Creates a gaussian kernel following Foote's paper.""" g = signal.gaussian(M, M / 3., sym=True) G = np.dot(g.reshape(-1, 1), g.reshape(1, -1)) G[M / 2:, :M / 2] = -G[M / 2:, :M / 2] G[:M / 2, M / 2:] = -G[:M / 2, M / 2:] return G
def __init__(self, brain): """A convergence-divergence zone. 1. correlates temporally near packets and store the correlations allowing for quick and efficient lookups. 2. Allows for generating of 2nd modality output given first modality. """ self.brain = brain self.LEARNING_RATE = config.CE_LEARNING_RATE # The maximum number of timesteps we look back. # IE. The point where we trim the tails of the Gaussian # This is for computational efficiency gaussian = signal.gaussian(config.CE_CORRELATION_WINDOW_MAX * 2, std=config.CE_CORRELATION_WINDOW_STD, sym=True) self.GAUSSIAN = np.split(gaussian, 2)[0][::-1] # split the array into two and then reverse it assert len(self.GAUSSIAN) == config.CE_CORRELATION_WINDOW_MAX if config.CE_IGNORE_GAUSSIAN: self.GAUSSIAN *= 0 self.GAUSSIAN[0] = 1 # The number of packets to keep in the queue # The number of packets used for learning from the packet_queue depends on the self.PACKET_QUEUE_LENGTH = len(self.GAUSSIAN) + 1 # ================== END CONFIG ================================== # A queue to store the most recent packets, old packets are automatically pushed out of the queue. self.packet_queue = deque(maxlen=self.PACKET_QUEUE_LENGTH) # A dict to store the connections/correlations between the different modalities self.correlations = {}
def neural_response(to_samples, response_std_ms: float = 0.33, response_duration_ms: float = 1.): response = ss.gaussian(int(response_duration_ms * to_samples), response_std_ms * to_samples) return response
def sim_osc_cycle(n_seconds, fs, cycle_type, **cycle_params): """Make one cycle of an oscillation. Parameters ---------- n_seconds : float Length of cycle window in seconds. Note that this is NOT the period of the cycle, but the length of the returned array that contains the cycle, which can be (and usually is) much shorter. fs : float Sampling frequency of the cycle simulation. cycle_type : {'sine', 'asine', 'sawtooth', 'gaussian', 'exp', '2exp'} What type of cycle to simulate. Options: * sine: a sine wave cycle * asine: an asymmetric sine wave * sawtooth: a sawtooth wave * gaussian: a gaussian cycle * exp: a cycle with exponential decay * 2exp: a cycle with exponential rise and decay **cycle_params Keyword arguments for parameters of the oscillation cycle, all as float: * sine: None * asine: 'rdsym', rise-decay symmetry, from 0-1 * sawtooth: 'width', width of the rising ramp as a proportion of the total cycle * gaussian: 'std', standard deviation of the gaussian kernel, in seconds * exp: 'tau_d', decay time, in seconds * 2exp: 'tau_r' & 'tau_d' rise time, and decay time, in seconds Returns ------- cycle: 1d array Simulated oscillation cycle. """ if cycle_type not in [ 'sine', 'asine', 'sawtooth', 'gaussian', 'exp', '2exp' ]: raise ValueError('Did not recognize cycle type.') if cycle_type == 'sine': cycle = np.sin(create_cycle_time(n_seconds, fs)) elif cycle_type == 'asine': cycle = sim_asine_cycle(n_seconds, fs, cycle_params['rdsym']) elif cycle_type == 'sawtooth': cycle = sawtooth(create_cycle_time(n_seconds, fs), cycle_params['width']) elif cycle_type == 'gaussian': cycle = gaussian(n_seconds * fs, cycle_params['std'] * fs) elif cycle_type == 'exp': cycle = sim_synaptic_kernel(n_seconds, fs, 0, cycle_params['tau_d']) elif cycle_type == '2exp': cycle = sim_synaptic_kernel(n_seconds, fs, cycle_params['tau_r'], cycle_params['tau_d']) return cycle
def generate_one_day_one_component_time_series(pc_wave_start_date, pc_wave_start_time, wavepacket_duration, number_of_waves, phase_shift=0): date_time = pd.to_datetime(pc_wave_start_date + ' ' + pc_wave_start_time) total_timesteps = int(np.timedelta64(1, 'D') / np.timedelta64(1, 'm')) full_day_timeseries = np.zeros(total_timesteps) data_source = ['' for i in range(total_timesteps)] # first generate the wavepacket - a sine wave combined with a Gaussian window gaussian_window = signal.gaussian(wavepacket_duration + 1, std=(wavepacket_duration + 1) / 6) sine_wave = np.zeros(wavepacket_duration + 1) for minute in range(wavepacket_duration + 1): sine_wave[minute] = np.sin( (minute - phase_shift * wavepacket_duration / number_of_waves) * (2 * np.pi) * number_of_waves / wavepacket_duration) wavepacket_start_index = int( (date_time - pd.to_datetime(pc_wave_start_date)) / np.timedelta64(1, 'm')) for i in range(wavepacket_duration + 1): full_day_timeseries[wavepacket_start_index + i] = gaussian_window[i] * sine_wave[i] * 100 data_source[wavepacket_start_index + i] = 'wavepacket' # next generate some random behaviour before and after the wavepacket # use an Ornstein-Uhlenbeck process (rewritten as a Langevin equation) to generate the other noisy data # first define the parameters # adjust sigma and tau to change the shape of the wavepacket sigma = 38 # Standard deviation. From (a single) empirical observation mu = 0. # Mean. dt = 1. # Time step. tau = 50. * dt # Time constant. This choice seems to yield reasonable-looking results T = 1440. # Total time. n = int(T / dt) # Number of time steps. t = np.linspace(0., T, n) # Vector of times. # things that are used in the formulae sigma_bis = sigma * np.sqrt(2. / tau) sqrtdt = np.sqrt(dt) # first complete the time series by populating the timesteps before the wavepacket # note that we use the time-reversibility property of the O-U process start_index_start = 0 end_index_start = wavepacket_start_index # add 1 so that there is an overlap (of 1 timestep) between the OU process and the wavepacket # the first datapoint of the wavepacket will be used as the first datapoint of the O-U process first_part = np.zeros(end_index_start - start_index_start + 1) first_part[0] = full_day_timeseries[wavepacket_start_index] # populate the first part of the O-U process (before the wavepacket) for i in range(len(first_part) - 1): first_part[i + 1] = first_part[i] + dt * ( -(first_part[i] - mu) / tau) + sigma_bis * sqrtdt * np.random.randn() for i in range(len(first_part)): index = end_index_start - i full_day_timeseries[index] = first_part[i] if data_source[ index] == 'wavepacket' and index != wavepacket_start_index: print('duplicate') elif data_source[ index] == 'wavepacket' and index == wavepacket_start_index: data_source[index] = 'overlap' else: data_source[index] = 'OU_first_part' # now populate the last part of the O-U process (after the wavepacket) # note start_index_start, end_index_start, start_index_last and end_index_last are all array indices, hence the -1 in end_index_last start_index_last = wavepacket_start_index + wavepacket_duration end_index_last = int(np.timedelta64(1, 'D') / np.timedelta64(1, 'm')) - 1 last_part = np.zeros(end_index_last - start_index_last + 1) last_part[0] = full_day_timeseries[start_index_last] # populate the last part of the O-U process (after the wavepacket) for i in range(len(last_part) - 1): last_part[i + 1] = last_part[i] + dt * (-( last_part[i] - mu) / tau) + sigma_bis * sqrtdt * np.random.randn() for i in range(len(last_part)): index = start_index_last + i full_day_timeseries[index] = last_part[i] if (data_source[index] == 'wavepacket' or data_source[index] == 'OU_first_part') and index != start_index_last: print(index) print('duplicate') elif data_source[index] == 'wavepacket' and index == start_index_last: data_source[index] = 'overlap' else: data_source[index] = 'OU_last_part' return full_day_timeseries
phases, sig_powerlaw = make_random_power(nzones, this_powerlaw.k_min, this_powerlaw.k_max, this_powerlaw.slope, phases=phases, var=this_powerlaw.var, mean=this_powerlaw.mean) plt.clf() plt.hist(sig_powerlaw, histtype='step') plt.savefig('p44_hist.pdf') check_powerlaw(sig_powerlaw) sig_gauss = Norm_Signal * signal.gaussian(nzones, width_signal) sig = sig_powerlaw.real + sig_gauss fit_sig = gauss_fit(signal_x, sig) fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True) ax_orig.plot(sig_gauss, c='g') #, marker='*') ax_orig.plot(sig_powerlaw.real, c='b') #, marker='*') ax_orig.plot(signal_x, gauss_me(signal_x, fit_sig['fit_norm'], fit_sig['fit_center'], fit_sig['fit_width']), c='r') ax_orig.plot(sig, c='k') ax_orig.set_title('Gauss width %0.2f Combined Fit %0.2f' % (width_signal, fit_sig['fit_width'])) ax_orig.margins(0, 0.1)
def group_lasso_dataset_generator(n_samples=100, n_features=100, gaussian_noise=0.5, random_state=None): """ Generates synthetic data for group lasso tests. This function generates a matrix generated from 7 basic atoms, grouped as [0, 1, 3], [2, 4, 5], linearly combined with random weights. A certain level of gaussian noise is added to the signal. Parameters ---------- n_samples: int, optional Number of samples for the output matrix. n_features: int, optional Number of features the output matrix must have. gaussian_noise: float, optional The level of noise to add to the synthetic data. random_state: RandomState or int, optional RandomState or seed used to generate RandomState for the reproducibility of data. If None each time RandomState is randomly initialised. Returns ------- array_like, shape=(n_samples, n_features) Generated matrix of data array_like, shape=(n_samples, 7) Coefficients array_like, shape=(7, n_features) Dictionary """ rnd = check_random_state(random_state) number_of_atoms = 6 atoms = np.empty([n_features, number_of_atoms]) t = np.linspace(0, 1, n_features) atoms[:, 0] = signal.sawtooth(2 * np.pi * 5 * t) atoms[:, 1] = np.sin(2 * np.pi * t) atoms[:, 2] = np.sin(2 * np.pi * t - 15) atoms[:, 3] = signal.gaussian(n_features, 5) atoms[:, 4] = signal.square(2 * np.pi * 5 * t) atoms[:, 5] = np.abs(np.sin(2 * np.pi * t)) groups = [[0, 1, 3], [2, 4, 5]] signals = np.empty((n_samples, n_features)) coefficients = np.zeros((n_samples, number_of_atoms)) for i in range(n_samples // 2): coeffs = rnd.random_sample(len(groups[0])) * 10 coefficients[i, groups[0]] = coeffs for i in range(n_samples // 2, n_samples): coeffs = rnd.random_sample(len(groups[1])) * 10 coefficients[i, groups[1]] = coeffs signals = coefficients.dot(atoms.T) return signals, coefficients, atoms.T
plt.subplot(1,2,1) io.imshow(imagen_filt) #Se muestra la imagen con el filtro ya aplicado. plt.axis('off') plt.subplot(1,2,2) io.imshow(imagen) #Se muestra la imagen original para futuras comparaciones. plt.axis('off') io.show() #Se carga una imagen de prueba del directorio con dimensiones pequeñas filename = os.path.join('repoEQ3/','prueba.jpg') #Se lee la carpeta que contiene la imagen prueba imagen = io.imread(filename) #Filtro de Enfoque k=np.array([[0,-1,0],[-1,5,-1],[0,-1,0]]) show_convolve2d(imagen,k) #Filtro de Desenfoque o Filtro de Media tam = 5 k = np.ones((tam,tam))/(tam**2) show_convolve2d(imagen,k) #Suavizado Gaussiano tam = 5 k = signal.gaussian(tam, 1).reshape(-1, 1)@signal.gaussian(tam, 1).reshape(1, -1) k = k / np.sum(k) show_convolve2d(imagen, k)
def generate_geo( fileNameGeo='/scratch/lforesti/maple_data/python-database/geo.pkl'): ''' Generate geo features (shapefile, DEM, data limits) ''' if (os.path.isfile(fileNameGeo) == True): geo = joblib.load(fileNameGeo) print(fileNameGeo, 'loaded.') else: geo = Data_Structure() # Limits small domain for statistical analysis geo.binSpacingKM = 8 geo.xlimsKM = [400, 840] geo.ylimsKM = [20, 330] # Limits large domain for plotting geo.xlimsKM_large = [310, 910] geo.ylimsKM_large = [-100, 440] geo.extent_smalldomain = [ geo.xlimsKM[0] * 1000, geo.xlimsKM[1] * 1000, geo.ylimsKM[0] * 1000, geo.ylimsKM[1] * 1000 ] geo.extent_largedomain = [ geo.xlimsKM_large[0] * 1000, geo.xlimsKM_large[1] * 1000, geo.ylimsKM_large[0] * 1000, geo.ylimsKM_large[1] * 1000 ] #################### GET DEM ################################################## print('Preparing DEM and shapefile...') # Read SRTM DEM geo.fileNameDEM_SRTM = '/store/mch/msrad/radar/precip_attractor/gis_data/dem_proc/dem_merged_projected_clip1000CCS4.tif' x_dem, y_dem, geo.demImg, = gis.gdal_read_raster(geo.fileNameDEM_SRTM) x_dem_min = min(x_dem) y_dem_min = min(y_dem) x_dem_max = max(x_dem) y_dem_max = max(y_dem) geo.demImg = geo.demImg.astype(float) geo.demImg[geo.demImg < -1000] = np.nan # Smoothed DEM for countour levels from scipy import signal kernel_size = 5 conv_kernel = np.outer(signal.gaussian(kernel_size, kernel_size / 4), signal.gaussian(kernel_size, kernel_size / 4)) conv_kernel = conv_kernel / np.sum(conv_kernel) geo.demImg_smooth = signal.convolve2d(geo.demImg, conv_kernel, boundary='symm', mode='same') # Limits of CCS4 domain (from extent) Xmin = 255000 Xmax = 965000 Ymin = -160000 Ymax = 480000 geo.extent_CCS4 = [Xmin, Xmax, Ymin, Ymax] #################### GET SHAPEFILE ################################################## # Shapefile name and projections geo.fileNameShapefile = '/store/mch/msrad/radar/precip_attractor/gis_data/shapefiles_proc/CCS4_merged_proj_clip_G05_countries/CCS4_merged_proj_clip_G05_countries.shp' geo.proj4stringWGS84 = "+proj=longlat +ellps=WGS84 +datum=WGS84" geo.proj4stringCH = "+proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 \ +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel +towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs" #################### GET RADAR MASK ################################################## print('Getting radar mask...') # Get one sample radar file for the composite mask radar_object = io.read_gif_image('200901010000', product='AQC', minR = 0.1, fftDomainSize = 600, \ resKm = 1, timeAccumMin = 5, inBaseDir = '/scratch/lforesti/data/', noData = -999.0, cmaptype = 'MeteoSwiss', domain = 'CCS4') geo.radarMask = radar_object.mask geo.radarExtent = radar_object.extent # Write out geo file joblib.dump(geo, fileNameGeo) print(fileNameGeo, 'written.') return (geo)
def __init__(self, threshold=10.0, use_cuda=True): super(CannyFilter, self).__init__() self.threshold = threshold self.use_cuda = use_cuda filter_size = 5 generated_filters = gaussian(filter_size, std=1.0).reshape([1, filter_size]) self.gaussian_filter_horizontal = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(1, filter_size), padding=(0, filter_size // 2)) self.gaussian_filter_horizontal.weight.data.copy_( torch.from_numpy(generated_filters)) self.gaussian_filter_horizontal.bias.data.copy_( torch.from_numpy(np.array([0.0]))) self.gaussian_filter_vertical = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(filter_size, 1), padding=(filter_size // 2, 0)) self.gaussian_filter_vertical.weight.data.copy_( torch.from_numpy(generated_filters.T)) self.gaussian_filter_vertical.bias.data.copy_( torch.from_numpy(np.array([0.0]))) sobel_filter = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) self.sobel_filter_horizontal = nn.Conv2d( in_channels=1, out_channels=1, kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0] // 2) self.sobel_filter_horizontal.weight.data.copy_( torch.from_numpy(sobel_filter)) self.sobel_filter_horizontal.bias.data.copy_( torch.from_numpy(np.array([0.0]))) self.sobel_filter_vertical = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=sobel_filter.shape, padding=sobel_filter.shape[0] // 2) self.sobel_filter_vertical.weight.data.copy_( torch.from_numpy(sobel_filter.T)) self.sobel_filter_vertical.bias.data.copy_( torch.from_numpy(np.array([0.0]))) # filters were flipped manually filter_0 = np.array([[0, 0, 0], [0, 1, -1], [0, 0, 0]]) filter_45 = np.array([[0, 0, 0], [0, 1, 0], [0, 0, -1]]) filter_90 = np.array([[0, 0, 0], [0, 1, 0], [0, -1, 0]]) filter_135 = np.array([[0, 0, 0], [0, 1, 0], [-1, 0, 0]]) filter_180 = np.array([[0, 0, 0], [-1, 1, 0], [0, 0, 0]]) filter_225 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 0]]) filter_270 = np.array([[0, -1, 0], [0, 1, 0], [0, 0, 0]]) filter_315 = np.array([[0, 0, -1], [0, 1, 0], [0, 0, 0]]) all_filters = np.stack([ filter_0, filter_45, filter_90, filter_135, filter_180, filter_225, filter_270, filter_315 ]) self.directional_filter = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=filter_0.shape, padding=filter_0.shape[-1] // 2) self.directional_filter.weight.data.copy_( torch.from_numpy(all_filters[:, None, ...])) self.directional_filter.bias.data.copy_( torch.from_numpy(np.zeros(shape=(all_filters.shape[0], ))))
def gaussSmooth(vals, num): b = gaussian(num, 1) smoothVals = filters.convolve1d(vals, b / b.sum()) return smoothVals
ax1 = subplot(111) ax1.set_yscale('log') #ax.set_xscale('log') #ax1.scatter (x/81.0,f_tsvd,marker='o',label='TSVD Regularization',color='black') ax1.scatter(x, s, marker=(3, 1), label=r"${\sigma _i}$", color='black') ax1.scatter(x, abs(utb), marker='*', label='$ {u_i^TP}$', color='red') ax1.scatter(x, abs(utbs), marker='o', label='${u_i^TP}/{\sigma _i}$', color='green') ccx = np.arange(0, 2300) #ax1.plot(ccx/2300.0,cc,label='Exact profiles',color='red',linewidth=3.0) #smoothcurce f2 = interp1d(x, f_tsvd) xx = np.linspace(0, 80, 100) yy = f2(xx) # make a gaussian window window = signal.gaussian(10, 20) smoothed = signal.convolve(yy, window / window.sum(), mode='same') #plt.plot(xx/80,smoothed,linewidth=3.0,label='Gaussian smooth') #ax1.set_ylim(-0.1,1.7) ax1.set_xlabel('Index for SVD', fontsize=15) ax1.set_ylabel('Value (dimensionless)', fontsize=15) #ax1.set_title('K=17',fontsize=20) ax1.xaxis.set_tick_params(labelsize=15) ax1.yaxis.set_tick_params(labelsize=15) legend = ax1.legend(loc='upper right', shadow=True, prop={'size': 15}) show()
def SmoothGauss(x, y): b = gaussian(19, 20) return filters.convolve1d(y, b / b.sum())
def CreatePulse(self, i): sig_wave = sig_amp * math.sin(2 * math.pi * sig_freq * i * dt) sig = sig_wave * signal.gaussian(sig_duration, std=sigma)[i] return sig
def synthetic_data_non_negative(gaussian_noise=1, random_state=None): """ Generates synthetic non-negative data for dictionary learning tests. This function generates a matrix generated from 7 basic atoms linearly combined with random weights sparse over the atoms. A certain level of gaussian noise is added to the signal. Parameters ---------- gaussian_noise: float, optional The level of noise to add to the synthetic data. random_state: RandomState or int, optional RandomState or seed used to generate RandomState for the reproducibility of data. If None each time RandomState is randomly initialised. Returns ------- array_like, shape=(80, 96) Generated matrix of data array_like, shape=(80, 7) Coefficients array_like, shape=(7, 96) Dictionary """ number_of_features = 96 number_of_samples = 80 number_of_atoms = 7 rnd = check_random_state(random_state) atoms = np.empty([number_of_features, number_of_atoms]) atoms[:, 0] = np.transpose( np.concatenate((np.ones([30, 1]), np.zeros([66, 1])))) atoms[:, 1] = np.transpose( np.concatenate((np.zeros([60, 1]), np.ones([36, 1])))) atoms[:, 2] = np.transpose( np.concatenate((np.zeros([24, 1]), np.ones([30, 1]), np.zeros([42, 1])))) atoms[:, 3] = signal.gaussian(96, 5) atoms[:, 4] = np.transpose( np.concatenate((np.zeros([17, 1]), np.ones([15, 1]), np.zeros([30, 1]), np.ones([24, 1]), np.zeros([10, 1])))) atoms[:, 5] = np.roll(signal.gaussian(96, 5), 30) atoms[:, 6] = signal.gaussian(96, 8) atoms[0:50, 6] = 0 sums = np.sum(atoms, axis=0) atoms = atoms / sums # create sparse coefficients coefficients = np.zeros([number_of_atoms, number_of_samples]) for i in range(0, number_of_samples): number_of_nonzero_elements = rnd.randint(2, 4) indices = rnd.choice(range(0, 7), number_of_nonzero_elements, replace=False) coeffs = rnd.random_sample(number_of_nonzero_elements) * 100 coefficients[indices, i] = coeffs # create matrix v = np.dot(atoms, coefficients) # add noise v_tilde = v + np.random.normal(0, gaussian_noise, (number_of_features, number_of_samples)) v_tilde[np.where(v_tilde < 0)] = 0 return v_tilde.T, coefficients.T, atoms.T
class TimeAlignment(unittest.TestCase): make_plots_blocking = False n_samples = 100. q1_initial = Quaternion(0, 0, 0, 1) q1_final = Quaternion(np.sqrt(2.) / 2., 0, 0, np.sqrt(2.) / 2.) ts = np.linspace(0, (n_samples + 1) / n_samples, n_samples) t1s = ts q2_initial = Quaternion(np.sqrt(2.) / 2., 0, 0, np.sqrt(2.) / 2.) q2_final = Quaternion(1., 0, 0, 0.) t2s = ts + 1.5 + signal.gaussian(len(ts), 0.1) # To generate varying angular velocities, we assign new random times # between the different quaternions. quat_interpolate_times = np.random.rand(n_samples) quat_interpolate_times.sort() quat_interpolate_times2 = ( quat_interpolate_times * (quat_interpolate_times[-1] - quat_interpolate_times[0]) + t2s[0]) q1s = quaternions_interpolate(q1_initial, t1s[0], q1_final, t1s[-1], quat_interpolate_times) q2s = quaternions_interpolate(q2_initial, t2s[0], q2_final, t2s[-1], quat_interpolate_times2) # ts_betwenn_quaternions = ts angular_velocity1_norms = [] for i in range(len(q1s) - 1): angular_velocity = angular_velocity_between_quaternions( q1s[i], q1s[i + 1], t1s[i + 1] - t1s[i]) angular_velocity1_norms.append(np.linalg.norm(angular_velocity)) angular_velocity2_norms = [] for i in range(len(q2s) - 1): angular_velocity = angular_velocity_between_quaternions( q2s[i], q2s[i + 1], t2s[i + 1] - t2s[i]) angular_velocity2_norms.append(np.linalg.norm(angular_velocity)) angular_velocity1_norms += signal.gaussian(len(angular_velocity1_norms), 1) dx = np.mean(np.diff(t1s)) def test_time_alignment(self): time_offset = calculate_time_offset_from_signals( self.t1s[0:-1], self.angular_velocity1_norms, self.t2s[0:-1], self.angular_velocity2_norms, plot=True, block=False) print(time_offset) # TODO(ff): Finish this test. def test_time_alignment_from_sample_csv_poses(self): # TODO(ff): Read stamped poses from csv files. # Then call calculate_time_offset. pass def test_introduce_data_drops(self): test_size = 1000 test = [ math.sin(float(x)) for x in np.linspace(0, 2 * math.pi, test_size) ] test_before = copy.deepcopy(test) config = DataDropConfig() config.max_percentage_for_single_drop = 5.0 config.overall_drop_percentage = 20.0 print("test size before dropping: {}".format(len(test))) set_to_none = True introduce_data_drops(test, config, set_to_none) print("test size after dropping: {}".format(len(test))) expected_test_size = float(test_size) - \ ((config.overall_drop_percentage / 100.0) * float(test_size)) print("expected_test_size: {}".format(expected_test_size)) # assert abs(len(test) - expected_test_size) < 1e-8 plot_alignment(test_before, test, blocking=self.make_plots_blocking) def test_introduce_data_drops_with_time_alignment(self): angular_velocity1_norms_before = copy.deepcopy( angular_velocity1_norms) angular_velocity2_norms_before = copy.deepcopy( angular_velocity2_norms) time_offset = calculate_time_offset_from_signals( self.t1s[0:-1], self.angular_velocity1_norms, self.t2s[0:-1], self.angular_velocity2_norms, plot=True, block=True) print(time_offset) config = DataDropConfig() config.max_percentage_for_single_drop = 5.0 config.overall_drop_percentage = 20.0 set_to_none = False introduce_data_drops(test, config, set_to_none) expected_test_size = float(test_size) - \ ((config.overall_drop_percentage / 100.0) * float(test_size)) print("expected_test_size: {}".format(expected_test_size)) # assert abs(len(test) - expected_test_size) < 1e-8 plot_alignment(test_before, test, blocking=self.make_plots_blocking)
datay1.append(ch1) datay2.append(ch2) datay1 = datay1[250000:350000] datay2 = datay2[260000:340000] count1 = len(datay1) count2 = len(datay2) fftnum = 500 #64#512#1024#8192 std = 125 #16#128#256#1500 axis_xf = range(3, 21) freq = np.array([fs * i / fftnum for i in axis_xf]) win = signal.gaussian(fftnum, std) basis = [] for k in range(3, 21): basis.append([ complex(cos(2 * pi / fftnum * k * n), sin(2 * pi / fftnum * k * n)) for n in range(fftnum) ]) basis = np.transpose(basis) index = 0 interval = 5 magnitude1 = [] magnitude2 = [] while True:
def gaussian_coeffs(self): res = signal.gaussian(9, 1) res = res / res.sum() return res.reshape(3, 3)
import requests from pattern import web import scipy.signal as spsig import pandas as pd import datetime import matplotlib.pyplot as plt import numpy as np url='https://us.cnn.com/?hpt=header_edition-picker' html=requests.get(url).text disaster_words=['hurricane','storm','cyclone','flood'] dw_count=sum([html.count(dw) for dw in disaster_words]) #base = datetime.datetime.today() base=datetime.datetime.strptime('2018-12-31', '%Y-%m-%d') days_series = [base - datetime.timedelta(days=x) for x in range(0, 365)] simul_dw_series=spsig.gaussian(365,15) simul_dw_series=np.append(simul_dw_series,np.zeros(5u))[-365:] simul_dw_series*2-1 for i in range(0,len(simul_dw_series)): if simul_dw_series[i]<0.0: simul_dw_series[i]=0.0 plt.plot(days_series,simul_dw_series,linewidth=1.0) plt.show()
def create_gaussian_kernel(kernel_size, std=1): gkern1d = signal.gaussian(kernel_size, std=std).reshape(kernel_size, 1) gkern2d = np.outer(gkern1d, gkern1d) return gkern2d / gkern2d.sum()
def do_gaussian_fit(self, axis, data): """ Perform a gaussian fit. @param axis: @param data: @return: """ model, params = self._fit_logic.make_gaussian_model() if len(axis) < len(params): self.log.warning('Fit could not be performed because number of ' 'parameters is smaller than data points.') return self.do_no_fit() else: parameters_to_substitute = dict() update_dict=dict() #TODO: move this to "gated counter" estimator in fitlogic # make the filter an extra function shared and usable for other # functions gauss = gaussian(10, 10) data_smooth = filters.convolve1d(data, gauss/gauss.sum(), mode='mirror') # integral of data corresponds to sqrt(2) * Amplitude * Sigma function = InterpolatedUnivariateSpline(axis, data_smooth, k=1) Integral = function.integral(axis[0], axis[-1]) amp = data_smooth.max() sigma = Integral / amp / np.sqrt(2 * np.pi) amplitude = amp * sigma * np.sqrt(2 * np.pi) update_dict['offset'] = {'min': 0, 'max': data.max(), 'value': 0, 'vary': False} update_dict['center'] = {'min': axis.min(), 'max': axis.max(), 'value': axis[np.argmax(data)]} update_dict['sigma'] = {'min': -np.inf, 'max': np.inf, 'value': sigma} update_dict['amplitude'] = {'min': 0, 'max': np.inf, 'value': amplitude} result = self._fit_logic.make_gaussian_fit(x_axis=axis, data=data, estimator=self._fit_logic.estimate_gaussian_peak, units=None, # TODO add_params=update_dict) # 1000 points in x axis for smooth fit data hist_fit_x = np.linspace(axis[0], axis[-1], 1000) hist_fit_y = model.eval(x=hist_fit_x, params=result.params) param_dict = OrderedDict() # create the proper param_dict with the values: param_dict['sigma_0'] = {'value': result.params['sigma'].value, 'error': result.params['sigma'].stderr, 'unit' : 'Occurrences'} param_dict['FWHM'] = {'value': result.params['fwhm'].value, 'error': result.params['fwhm'].stderr, 'unit' : 'Counts/s'} param_dict['Center'] = {'value': result.params['center'].value, 'error': result.params['center'].stderr, 'unit' : 'Counts/s'} param_dict['Amplitude'] = {'value': result.params['amplitude'].value, 'error': result.params['amplitude'].stderr, 'unit' : 'Occurrences'} param_dict['chi_sqr'] = {'value': result.chisqr, 'unit': ''} return hist_fit_x, hist_fit_y, param_dict, result
def load_data(dataset, tra_ori_model, rand=False, aug=0.0, batch_size=1, sample_rate=None): if type(dataset[0]) == str: img_name, folder_name, img_size = get_maximum_img_size_and_names( dataset, sample_rate) else: img_name, folder_name, img_size = dataset if rand: rand_idx = np.arange(len(img_name)) np.random.shuffle(rand_idx) img_name = img_name[rand_idx] folder_name = folder_name[rand_idx] if batch_size > 1 and use_multiprocessing == True: p = Pool(batch_size) p_sub_load_data = partial(sub_load_data, img_size=img_size, aug=aug) for i in xrange(0, len(img_name), batch_size): have_alignment = np.ones([batch_size, 1, 1, 1]) image = np.zeros((batch_size, img_size[0], img_size[1], 1)) segment = np.zeros((batch_size, img_size[0], img_size[1], 1)) alignment = np.zeros((batch_size, img_size[0], img_size[1], 1)) minutiae_w = np.zeros( (batch_size, img_size[0] / 8, img_size[1] / 8, 1)) - 1 minutiae_h = np.zeros( (batch_size, img_size[0] / 8, img_size[1] / 8, 1)) - 1 minutiae_o = np.zeros( (batch_size, img_size[0] / 8, img_size[1] / 8, 1)) - 1 batch_name = [ img_name[(i + j) % len(img_name)] for j in xrange(batch_size) ] batch_f_name = [ folder_name[(i + j) % len(img_name)] for j in xrange(batch_size) ] if batch_size > 1 and use_multiprocessing == True: results = p.map(p_sub_load_data, zip(batch_name, batch_f_name)) else: results = map(p_sub_load_data, zip(batch_name, batch_f_name)) for j in xrange(batch_size): img, seg, ali, mnt = results[j] if np.sum(ali) == 0: have_alignment[j, 0, 0, 0] = 0 image[j, :, :, 0] = img / 255.0 segment[j, :, :, 0] = seg / 255.0 alignment[j, :, :, 0] = ali / 255.0 minutiae_w[j, (mnt[:, 1] / 8).astype(int), (mnt[:, 0] / 8).astype(int), 0] = mnt[:, 0] % 8 minutiae_h[j, (mnt[:, 1] / 8).astype(int), (mnt[:, 0] / 8).astype(int), 0] = mnt[:, 1] % 8 minutiae_o[j, (mnt[:, 1] / 8).astype(int), (mnt[:, 0] / 8).astype(int), 0] = mnt[:, 2] # get seg label_seg = segment[:, ::8, ::8, :] label_seg[label_seg > 0] = 1 label_seg[label_seg <= 0] = 0 minutiae_seg = (minutiae_o != -1).astype(float) # get ori & mnt orientation = tra_ori_model.predict(alignment) orientation = orientation / np.pi * 180 + 90 orientation[orientation >= 180.0] = 0.0 # orientation [0, 180) minutiae_o = minutiae_o / np.pi * 180 + 90 # [90, 450) minutiae_o[minutiae_o > 360] = minutiae_o[ minutiae_o > 360] - 360 # to current coordinate system [0, 360) minutiae_ori_o = np.copy(minutiae_o) # copy one minutiae_ori_o[minutiae_ori_o >= 180] = minutiae_ori_o[ minutiae_ori_o >= 180] - 180 # for strong ori label [0,180) # ori 2 gaussian gaussian_pdf = signal.gaussian(361, 3) y = np.reshape(np.arange(1, 180, 2), [1, 1, 1, -1]) delta = np.array(np.abs(orientation - y), dtype=int) delta = np.minimum(delta, 180 - delta) + 180 label_ori = gaussian_pdf[delta] # ori_o 2 gaussian delta = np.array(np.abs(minutiae_ori_o - y), dtype=int) delta = np.minimum(delta, 180 - delta) + 180 label_ori_o = gaussian_pdf[delta] # mnt_o 2 gaussian y = np.reshape(np.arange(1, 360, 2), [1, 1, 1, -1]) delta = np.array(np.abs(minutiae_o - y), dtype=int) delta = np.minimum(delta, 360 - delta) + 180 label_mnt_o = gaussian_pdf[delta] # w 2 gaussian gaussian_pdf = signal.gaussian(17, 2) y = np.reshape(np.arange(0, 8), [1, 1, 1, -1]) delta = (minutiae_w - y + 8).astype(int) label_mnt_w = gaussian_pdf[delta] # h 2 gaussian delta = (minutiae_h - y + 8).astype(int) label_mnt_h = gaussian_pdf[delta] # mnt cls label -1:neg, 0:no care, 1:pos label_mnt_s = np.copy(minutiae_seg) label_mnt_s[label_mnt_s == 0] = -1 # neg to -1 label_mnt_s = (label_mnt_s + ndimage.maximum_filter( label_mnt_s, size=(1, 3, 3, 1))) / 2 # around 3*3 pos -> 0 # apply segmentation label_ori = label_ori * label_seg * have_alignment label_ori_o = label_ori_o * minutiae_seg label_mnt_o = label_mnt_o * minutiae_seg label_mnt_w = label_mnt_w * minutiae_seg label_mnt_h = label_mnt_h * minutiae_seg yield image, label_ori, label_ori_o, label_seg, label_mnt_w, label_mnt_h, label_mnt_o, label_mnt_s, batch_name if batch_size > 1 and use_multiprocessing == True: p.close() p.join() return
def run(args=None, config=None): parser = AnalysisParser('config') args = parser.parse_analysis_args(args) config = args.config eeg_path = '/media/sf_shared/graddata/ica_denoised_raw.fif' fmri_path = '/media/sf_shared/CoRe_011/rfMRI/d2/11-BOLD_Sleep_BOLD_Sleep_20150824220820_11.nii' vmrk_path = '/media/sf_shared/CoRe_011/eeg/CoRe_011_Day2_Night_01.vmrk' event_ids, event_lats = helpers.read_vmrk(vmrk_path) event_lats = np.array(event_lats) grad_inds = [ index for index, value in enumerate(event_ids) if value == 'R1' ] grad_inds = np.array(grad_inds) grad_lats = event_lats[grad_inds] grad_lats = grad_lats / 20 # resample from 5000Hz to 250Hz start_ind = int(grad_lats[0]) end_ind = int(grad_lats[-1]) canica = CanICA(n_components=40, smoothing_fwhm=6., threshold=None, verbose=10, random_state=0) fmri = nib.load(fmri_path) # get TR, n_slices, and n_TRs fmri_info = helpers.fmri_info(fmri_path) canica.fit(fmri) cimg = canica.components_img_.get_data() TR = fmri_info[0] tr_times = np.arange(0, 30, TR) hrf = get_hrf(tr_times) # plot components for i in np.arange(0, 40): plt.subplot(4, 10, i + 1) plt.imshow(np.max(cimg[:, :, :, i], axis=2)) # get the EEG raw = mne.io.read_raw_fif(eeg_path, preload=True) raw_data = raw.get_data() # get power spectrum for different sleep stages (BOLD) comps = canica.transform([fmri])[0].transpose() bold_srate = 1 / fmri_info[0] bold_epochl = int(7500 / (250 / bold_srate)) #bold_pxx,bold_f = pxx_bold_component_epoch(comps, bold_srate, 250, bold_epochl, sleep_stages) #eeg_pxx,eeg_f = pxx_eeg_epochs(raw_data, sleep_stages, 7500) # concatenate the epochs, then compute the psd # 1) get triggers, 2) concatenate data, 3) compute psd def get_trigger_inds(trigger_name, event_ids): trig_inds = [ index for index, value in enumerate(event_ids) if value == trigger_names[trig] ] return trig_inds def epoch_triggers(raw_data, lats, pre_samples, post_samples): epochs = np.zeros( (raw_data.shape[0], lats.shape[0], pre_samples + post_samples)) for lat in np.arange(0, lats.shape[0]): epochs[:, lat, :] = raw_data[:, lats[lat] - pre_samples:lats[lat] + post_samples] return epochs trigger_names = ['wake', 'NREM1', 'NREM2', 'NREM3'] """ epoch BOLD and get power for different trigger types what you actually want is single trial EEG and BOLD psd first get all the indices that are contained within the BOLD timeseries then, get the EEG power spectrum values within those same indices """ eeg_srate = 250 bold_pre_samples = 15 bold_post_samples = 25 eeg_pre_samples = int(bold_pre_samples * fmri_info[0] * eeg_srate) eeg_post_samples = int(bold_post_samples * fmri_info[0] * eeg_srate) bold_conversion = eeg_srate / (1 / fmri_info[0]) all_bold_epochs = [] all_eeg_epochs = [] for trig in np.arange(0, len(trigger_names)): trig_inds = get_trigger_inds(trigger_names[trig], event_ids) trig_lats = event_lats[trig_inds] bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int) bads = np.where((bold_lats - bold_pre_samples < 0) | (bold_lats + bold_post_samples >= comps.shape[1])) bold_lats = np.delete(bold_lats, bads, axis=0) eeg_lats = np.delete(trig_lats, bads, axis=0) bold_epochs = epoch_triggers(comps, bold_lats, bold_pre_samples, bold_post_samples) eeg_epochs = epoch_triggers(raw_data, eeg_lats, eeg_pre_samples, eeg_post_samples) all_bold_epochs.append(bold_epochs) all_eeg_epochs.append(eeg_epochs) # comput power for i in np.arange(0, len(all_eeg_epochs)): eeg_epochs = all_eeg_epochs[i] bold_epochs = all_bold_epochs[i] bold_f, bold_pxx = signal.welch(bold_epochs) eeg_f, eeg_pxx = signal.welch(eeg_epochs) gauss = signal.gaussian(eeg_srate, 20) gauss = gauss / np.sum(gauss) freqs = np.zeros((5, 2)) freqs[0, 0] = 1 freqs[0, 1] = 3 freqs[1, 0] = 4 freqs[1, 1] = 7 freqs[2, 0] = 8 freqs[2, 1] = 15 freqs[3, 0] = 17 freqs[3, 1] = 30 freqs[4, 0] = 30 freqs[4, 1] = 80 chan_freqs = filter_and_downsample(raw_data, comps, freqs, start_ind, end_ind) conved = convolve_chanfreqs(np.log(chan_freqs), hrf) # epoch all the hrf-convolved filtered EEG power all_conved_epochs = [] for trig in np.arange(0, len(trigger_names)): trig_inds = get_trigger_inds(trigger_names[trig], event_ids) trig_lats = event_lats[trig_inds] bold_lats = ((trig_lats - start_ind) / bold_conversion).astype(int) bads = np.where((bold_lats - bold_pre_samples < 0) | (bold_lats + bold_post_samples >= comps.shape[1])) bold_lats = np.delete(bold_lats, bads, axis=0) conved_epochs = np.zeros( (conved.shape[0], conved.shape[1], bold_lats.shape[0], bold_pre_samples + bold_post_samples)) for i in np.arange(0, conved.shape[1]): conved_epochs[:, i, :] = epoch_triggers(conved[:, i, :], bold_lats, bold_pre_samples, bold_post_samples) all_conved_epochs.append(conved_epochs) sig1 = chan_freqs[3, 2, :] sig2 = comps[0, :] sig2 = butter_bandpass_filter(sig2, 0.005, 0.1, 1 / fmri_info[0]) nlags = 50 def xcorr(sig1, sig2, nlags): vec_l = sig1.shape[0] - nlags xcorrs = np.zeros(nlags) vec1 = sig1[int(sig1.shape[0] / 2 - vec_l / 2):int(sig1.shape[0] / 2 + vec_l / 2)] start_p = 0 for i in np.arange(0, nlags): vec2 = sig2[(start_p + i):(start_p + vec_l + i)] xcorrs[i] = np.corrcoef(vec1, vec2)[0, 1] return xcorrs all_xcorrs = [] for i in np.arange(0, len(all_conved_epochs)): xc_i = np.zeros( (1, all_conved_epochs[i].shape[1], all_conved_epochs[i].shape[2], all_bold_epochs[i].shape[0], 20)) for j in np.arange(0, 1): print(j) for k in np.arange(0, all_conved_epochs[i].shape[1]): for el in np.arange(0, all_conved_epochs[i].shape[2]): for m in np.arange(0, all_bold_epochs[i].shape[0]): xc_i[j, k, el, m, :] = xcorr(all_conved_epochs[i][5, k, el, :], all_bold_epochs[i][m, el, :], 20) all_xcorrs.append(xc_i) plt.plot(np.mean(all_xcorrs[1][0, 1, :, 0, :], axis=0)) plt.plot(np.mean(all_xcorrs[2][0, 1, :, 0, :], axis=0)) plt.plot(np.mean(all_xcorrs[3][0, 1, :, 0, :], axis=0)) # correlate power across different epochs """
def calculate_threshold(self, hist_data=None, distr='poissonian'): """ Calculate the threshold by minimizing its overlap with the poissonian fits. @param np.array hist_data: 2D array whitch represent the x and y values of a histogram of a trace. string distr: tells the function on what distribution it should calculate the threshold ( Added because it might happen that one normalizes data between (-1,1) and then a poissonian distribution won't work anymore. @return tuple(float, float): threshold: the calculated threshold between two overlapping poissonian distributed peaks. fidelity: the measure how good the two peaks are resolved according to the calculated threshold The calculation of the threshold relies on fitting two poissonian distributions to the count histogram and minimize a threshold with respect to the overlap area: """ # in any case calculate the hist data x_axis = hist_data[0][:-1] + (hist_data[0][1] - hist_data[0][0]) / 2. y_data = hist_data[1] if distr == 'poissonian': # perform the fit hist_fit_x, hist_fit_y, param_dict = self.do_doublepossonian_fit(x_axis, y_data) if param_dict.get('lambda_0') is None: self.log.error('The double poissonian fit does not work! Take at ' 'least a dummy value, in order not to break the ' 'routine.') amp0 = 1 amp1 = 1 param_dict['Amplitude_0'] = {'value': amp0, 'unit': 'occurences'} param_dict['Amplitude_1'] = {'value': amp0, 'unit': 'occurences'} # make them a bit different so that fit works. mu0 = hist_data[0][:].mean()-0.1 mu1 = hist_data[0][:].mean()+0.1 param_dict['lambda_0'] = {'value': mu0, 'unit': 'counts'} param_dict['lambda_1'] = {'value': mu1, 'unit': 'counts'} else: mu0 = param_dict['lambda_0']['value'] mu1 = param_dict['lambda_1']['value'] amp0 = param_dict['Amplitude_0']['value'] amp1 = param_dict['Amplitude_1']['value'] if mu0 < mu1: first_dist = self.get_poissonian(x_val=hist_data[0], mu=mu0, amplitude=amp0) sec_dist = self.get_poissonian(x_val=hist_data[0], mu=mu1, amplitude=amp1) else: first_dist = self.get_poissonian(x_val=hist_data[0], mu=mu1, amplitude=amp1) sec_dist = self.get_poissonian(x_val=hist_data[0], mu=mu0, amplitude=amp0) # create a two poissonian array, where the second poissonian # distribution is add as negative values. Now the transition from # positive to negative values will get the threshold: difference_poissonian = first_dist - sec_dist trans_index = 0 for i in range(len(difference_poissonian)-1): # go through the combined histogram array and the point which # changes the sign. The transition from positive to negative values # will get the threshold: if difference_poissonian[i] < 0 and difference_poissonian[i+1] >= 0: trans_index = i break elif difference_poissonian[i] > 0 and difference_poissonian[i+1] <= 0: trans_index = i break threshold_fit = hist_data[0][trans_index] # Calculate also the readout fidelity, i.e. sum the area under the # first peak before the threshold of the first and second distribution # and take the ratio of that area. Do the same thing after the threshold # (of course with a reversed choice of the distribution). If the overlap # in both cases is very small, then the fidelity is good, if the overlap # is identical, then fidelity indicates a poor separation of the peaks. if mu0 < mu1: area0_low = self.get_poissonian(hist_data[0][0:trans_index], mu0, 1).sum() area0_high = self.get_poissonian(hist_data[0][trans_index:], mu0, 1).sum() area1_low = self.get_poissonian(hist_data[0][0:trans_index], mu1, 1).sum() area1_high = self.get_poissonian(hist_data[0][trans_index:], mu1, 1).sum() area0_low_amp = self.get_poissonian(hist_data[0][0:trans_index], mu0, amp0).sum() area0_high_amp = self.get_poissonian(hist_data[0][trans_index:], mu0, amp0).sum() area1_low_amp = self.get_poissonian(hist_data[0][0:trans_index], mu1, amp1).sum() area1_high_amp = self.get_poissonian(hist_data[0][trans_index:], mu1, amp1).sum() else: area1_low = self.get_poissonian(hist_data[0][0:trans_index], mu0, 1).sum() area1_high = self.get_poissonian(hist_data[0][trans_index:], mu0, 1).sum() area0_low = self.get_poissonian(hist_data[0][0:trans_index], mu1, 1).sum() area0_high = self.get_poissonian(hist_data[0][trans_index:], mu1, 1).sum() area1_low_amp = self.get_poissonian(hist_data[0][0:trans_index], mu0, amp0).sum() area1_high_amp = self.get_poissonian(hist_data[0][trans_index:], mu0, amp0).sum() area0_low_amp = self.get_poissonian(hist_data[0][0:trans_index], mu1, amp1).sum() area0_high_amp = self.get_poissonian(hist_data[0][trans_index:], mu1, amp1).sum() # Now calculate how big is the overlap relative to the sum of the other # part of the area, that will give the normalized fidelity: fidelity = 1 - (area1_low / area0_low + area0_high / area1_high) / 2 area0 = self.get_poissonian(hist_data[0][:], mu0, amp0).sum() area1 = self.get_poissonian(hist_data[0][:], mu1, amp1).sum() # try this new measure for the fidelity fidelity2 = 1 - ((area1_low_amp/area1) / (area0_low_amp/area0) + (area0_high_amp/area0) / (area1_high_amp/area1) ) / 2 param_dict['normalized_fidelity'] = fidelity2 return threshold_fit, fidelity, param_dict # this works if your data is normalized to the interval (-1,1) if distr == 'gaussian_normalized': # first some helper functions def two_gaussian_intersect(m1, m2, std1, std2, amp1, amp2): """ function to calculate intersection of two gaussians """ a = 1 / (2 * std1 ** 2) - 1 / (2 * std2 ** 2) b = m2 / (std2 ** 2) - m1 / (std1 ** 2) c = m1 ** 2 / (2 * std1 ** 2) - m2 ** 2 / (2 * std2 ** 2) - np.log(amp2 / amp1) return np.roots([a, b, c]) def gaussian(counts, amp, stdv, mean): return amp * np.exp(-(counts - mean) ** 2 / (2 * stdv ** 2)) / (stdv * np.sqrt(2 * np.pi)) try: result = self._fit_logic.make_twogausspeakoffset_fit(x_axis, y_data) # calculating the threshold # NOTE the threshold is taken as the intersection of the two gaussians, while this should give # a good approximation I doubt it is mathematical exact. mu0 = result.params['g0_center'].value mu1 = result.params['g1_center'].value sigma0 = result.params['g0_sigma'].value sigma1 = result.params['g1_sigma'].value amp0 = result.params['g0_amplitude'].value / (sigma0 * np.sqrt(2 * np.pi)) amp1 = result.params['g1_amplitude'].value / (sigma1 * np.sqrt(2 * np.pi)) candidates = two_gaussian_intersect(mu0, mu1, sigma0, sigma1, amp0, amp1) # we want to get the intersection that lies between the two peaks if mu0 < mu1: threshold = [i for i in filter(lambda x: (x > mu0) & (x < mu1), candidates)] else: threshold = [i for i in filter(lambda x: (x < mu0) & (x > mu1), candidates)] threshold = threshold[0] # now we want to get the readout fidelity # of the bigger peak ( most likely the two states that aren't driven by the mw pi pulse ) if mu0 < mu1: gc0 = integrate.quad(lambda counts: gaussian(counts, amp1, sigma1, mu1), -1, 1) gp0 = integrate.quad(lambda counts: gaussian(counts, amp1, sigma1, mu1), -1, threshold) else: gc0 = integrate.quad(lambda counts: gaussian(counts, amp0, sigma0, mu0), -1, 1) gp0 = integrate.quad(lambda counts: gaussian(counts, amp0, sigma0, mu0), -1, threshold) # and then the same for the other peak ] if mu0 > mu1: gc1 = integrate.quad(lambda counts: gaussian(counts, amp1, sigma1, mu1), -1, 1) gp1 = integrate.quad(lambda counts: gaussian(counts, amp1, sigma1, mu1), threshold, 1) else: gc1 = integrate.quad(lambda counts: gaussian(counts, amp0, sigma0, mu0), -1, 1) gp1 = integrate.quad(lambda counts: gaussian(counts, amp0, sigma0, mu0), threshold, 1) param_dict = {} fidelity = 1 - (gp0[0] / gc0[0] + gp1[0] / gc1[0])/2 fidelity1 = 1 - (gp0[0] / gc0[0]) fidelity2 = 1 - gp1[0] / gc1[0] threshold_fit = threshold # if the fit worked, add also the result to the param_dict, which might be useful for debugging param_dict['result'] = result except: self.log.error('could not fit the data') error= True fidelity = 0 threshold_fit = 0 param_dict = {} new_dict = {} new_dict['value'] = np.inf param_dict['chi_sqr'] = new_dict return threshold_fit, fidelity, param_dict
def moving_average(y, n=n_gouss, sig=sigma): b = signal.gaussian(n, sig) average = ndimage.filters.convolve1d(y, b / b.sum()) var = ndimage.filters.convolve1d(np.power(y - average, 2), b / b.sum()) return average, var
omega = 2 * np.pi * fs # angular frequency (rad/s) xbar = np.mean(epdist) # Mean epicentral distance of the network print("Number of frequency samples is: ", len(fs)) ################################################################################ # Preliminary processing ################################################################################ # ***** Generate weight function for the stations to attenuate secondary lobes def gauss(x, mu, sigma): return np.exp(-0.5 * ((x - mu) / sigma)**2) wn = gauss(epdist, (epdist[-1] + epdist[0]) / 2, aperture / 4) #wn=np.ones((nrec)) wn_uss = ss.gaussian( nrec, nrec / 4) # can be used only in case of uniformly spaced stations #***** Remove mean xtdata = ss.detrend(td_data) #***** Normalize each trace with respect to greatest dynamic range in entire data xtdata = np.matrix(xtdata) maxet = np.matrix.max(xtdata, axis=1) minet = np.matrix.min(xtdata, axis=1) ret = maxet - minet divisor = ret * (np.matrix(np.ones((1, nsamp)))) xtdata = xtdata / divisor #***** Fourier transform with respect to time xwdata = np.fft.fftshift(np.fft.fft(xtdata), axes=1) #####################################################################################################
def compute_prior_prob_smoothed(prior_prob_path, prior_prob_smoothed_path, sigma=5, do_plot=True, verbose=1): """ Interpolation on prior prob, next using interpolation to smoothness path, and normalize again Reference: https://github.com/foamliu/Colorful-Image-Colorization/blob/master/class_rebal.py Usage: info = dict( prior_prob_path = os.path.join(module_dir, "data", "prior_prob_train_div2k.npy"), prior_prob_smoothed_path = os.path.join(module_dir, "data", "prior_prob_smoothed_train_div2k.npy"), sigma = 5, do_plot = True, verbose = True, ) locals().update(**info) prior_prob_smoothed = compute_prior_prob_smoothed(**info) """ # load prior probability if verbose == 1: print("\n=== Compute Prior Probability Smoothed === ") prior_prob = np.load(prior_prob_path) # add an epsilon to prior prob to avoid 0 vakues and possible NaN prior_prob += 1E-3 * np.min(prior_prob) # renormalize prior_prob = prior_prob / (1.0 * np.sum(prior_prob)) # Smooth with gaussian f = interp1d(np.arange(prior_prob.shape[0]), prior_prob) xx = np.linspace(0, prior_prob.shape[0] - 1, 1000) yy = f(xx) window = gaussian(2000, sigma) # 2000 pts in the window, sigma=5 smoothed = convolve(yy, window / window.sum(), mode='same') fout = interp1d(xx, smoothed) prior_prob_smoothed = np.array( [fout(i) for i in range(prior_prob.shape[0])]) prior_prob_smoothed = prior_prob_smoothed / np.sum(prior_prob_smoothed) # Save if prior_prob_smoothed_path is not None: save_dir = os.path.dirname(prior_prob_smoothed_path) if save_dir != "" and os.path.exists(save_dir) == False: os.makedirs(save_dir) np.save(prior_prob_smoothed_path, prior_prob_smoothed) # if if do_plot: plt.figure(figsize=(20, 10)) plt.subplot(2, 2, 1) plt.plot(prior_prob, label="prior_prob") plt.plot(prior_prob_smoothed, "g--", label="prior_prob_smoothed") plt.yscale("log") plt.legend() plt.subplot(2, 2, 2) plt.plot(prior_prob, label="prior_prob") plt.plot(xx, smoothed, "r-", label="smoothed") plt.yscale("log") plt.legend() plt.subplot(2, 2, 3) plt.hist(prior_prob, bins=100) plt.xlabel("Prior probability") plt.ylabel("Frequency") plt.yscale("log") plt.subplot(2, 2, 4) plt.hist(prior_prob_smoothed, bins=100) plt.xlabel("Prior probability smoothed") plt.ylabel("Frequency") plt.yscale("log") plt.show() # if return prior_prob_smoothed
def window(self, fwhm): window = gaussian(self.spectrum.N, fwhm2std(fwhm)) return window / np.sum(window)
SEC_IN_HOUR = 3600 SEC_IN_MONTH = 2628000 DAYS_IN_MONTH = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]) RHO = 900. # ice density G = 9.81 # gravity N = 3. # Glen's law's exponent A = 2.4e-24 # Glen's default creep's parameter FS = 5.7e-20 # Default sliding parameter from Oerlemans - OUTDATED TWO_THIRDS = 2. / 3. FOUR_THIRDS = 4. / 3. ONE_FIFTH = 1. / 5. GAUSSIAN_KERNEL = dict() for ks in [5, 7, 9]: kernel = gaussian(ks, 1) GAUSSIAN_KERNEL[ks] = kernel / kernel.sum() _doc = 'A geotiff file containing the DEM (reprojected into the local grid).' BASENAMES['dem'] = ('dem.tif', _doc) _doc = 'The glacier outlines in the local projection.' BASENAMES['outlines'] = ('outlines.shp', _doc) _doc = 'The glacier intersects in the local projection.' BASENAMES['intersects'] = ('intersects.shp', _doc) _doc = 'The flowline catchments in the local projection.' BASENAMES['flowline_catchments'] = ('flowline_catchments.shp', _doc) _doc = 'The catchments intersections in the local projection.'
def get_growth_rate(t1, t2, plane="H", beam=1): """ Get the growth rate of an instability occuring from t1 to t2""" plane_number = 1 if plane == 'H' else 2 # useful for the name of our variable variable = 'LHC.BQBBQ.CONTINUOUS_HS.B' + str(beam) + ':EIGEN_AMPL_' + str( plane_number) print("Getting the data...") vn = [variable] data = db.get(vn, t1, t2) if (len(data[variable][0]) == 0): # if no data (empty array) print("No data available for BBQ amplitude in that time period (%s)" % str(variable)) print("\033[91m" + "[-] Fail") exit() x_v = (data[variable][0] - data[variable][0][0]) y_v = data[variable][1] print("[+] Success") # We now try to get the right limit of the exponential (on the x-axis) # For that, we look for the maximum of the derivative # We first need to apply a filter to remove noise print("Applying a gaussian filter...") size_window = len(y_v) // 10 std = size_window // 3 window = signal.gaussian(size_window, std=std) y_conv = np.convolve(y_v, window, 'valid') # gaussian filter print("[+] Success") # We then apply a second filter to the derivative, and get its argmax print("Getting the right boundary...") y_prime_v = np.diff(y_conv, n=1) y_prime_conv = np.convolve(y_prime_v, window, 'valid') right_bound = np.argmax( y_prime_conv ) + size_window # We add size_window to compensate the two convolutions print("[+] Success") print("Fitting the curve...") # Initialization parameters : a naive exponential who pass by the two limit points x1, x2 = x_v[0], x_v[right_bound] y1, y2 = y_v[0], y_v[right_bound] d = (x1 - x2) / ( np.log(y1) - np.log(y2) ) # can be obtained by just writing down the equation system x0 = x1 - d * np.log(y1) def expo(x, d, c, x0): return np.exp((x - x0) / d) + c popt, pcov = curve_fit(expo, x_v[:right_bound], y_v[:right_bound], maxfev=10000, p0=[d, y_v[0], x0]) print("[+] Success") print("Found growth rate : ", popt[0]) plt.plot(x_v, y_v, label="Real data") plt.plot(x_v[:right_bound], expo(x_v[:right_bound], *popt), c="red", label="Exponential curve fitted") return popt[0]
w[49][25] = 1.0 image_new = signal.fftconvolve(image, w) plt.figure() plt.imshow(image) plt.gray() plt.title('Original image') plt.show() plt.figure() plt.imshow(image_new) plt.gray() plt.title('Filtered image') plt.show() image = misc.ascent() w = signal.gaussian(50, 10.0) image_new = signal.sepfir2d(image, w, w) plt.figure() plt.imshow(image) plt.gray() plt.title('Original image') plt.show() plt.figure() plt.imshow(image_new) plt.gray() plt.title('Filtered image') plt.show()
w2=0.01 time=arange(-5,2.5-W1+W2,W2) def sig_bar(sigs,axis,y,color): w=np.diff(axis)[0] continuity = np.diff(sigs) for i,c in enumerate(continuity): beg =axis[sigs[i]] end = beg+w fill_between([beg,end],[y[0],y[0]],[y[1],y[1]],color=color) b = gaussian(5,10) b = ones(5) num_cores = multiprocessing.cpu_count() def smooth_i(i,h): m=nanmean(h[:,i],1) p=filters.convolve1d(m, b/b.sum()) return p def exact_mc_perm_test2(xs, ys, nmc): k=0 diff = np.mean(xs -ys) for j in range(nmc): shuffle(xs) k += diff < np.mean(xs -ys)