def HR_func_generator(t1): arr_HR = np.arange(50, 180) # Possible heart rates # make a array of functions f1 = lambda B, D: ( (D * win.triang(len(t1))).astype(int) + B).astype(np.float32) #triang f2=lambda B,D:((D*win.triang(2*len(t1))).astype(int)+B).astype(np.float32)\ [:len(t1)] # 1st half of triang f3 = lambda B, D: ( (D * win.tukey(len(t1), alpha=(0.3 * np.random.rand() + 0.7))).astype( int) + B).astype(np.float32) #tukey f4=lambda B,D:((D*win.tukey(2*len(t1),alpha=(0.3*np.random.rand()+0.7))).astype(int)+B)\ .astype(np.float32)[:len(t1)] # 1st half of tukey arr_f = np.array( 1 * [f1] + 1 * [f2] + 1 * [f3] + 1 * [f4]) # possible to change the proportion of functions #randomly select elements D_HR = 0 HRs = [] D_HR_max = 50 while D_HR == 0: # we don't want D_HR to be zero so keep resampling HRs += [arr_HR[np.random.randint(len(arr_HR))]] HR_range = np.arange(HRs[0] + 1, min([HRs[0] + D_HR_max, 180]) + 1) HRs += [HR_range[np.random.randint(len(HR_range))]] B_HR, D_HR = HRs[0], HRs[1] - HRs[0] #B_HR,D_HR=arr_B_HR[np.random.randint(len(arr_B_HR))],arr_D_HR[np.random.randint(len(arr_D_HR))] HR_curve_f = arr_f[np.random.randint(len(arr_f))](B_HR, D_HR) #trend return HR_curve_f, D_HR
def test_basic(self): assert_allclose(windows.triang(6, True), [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) assert_allclose(windows.triang(7), [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) assert_allclose(windows.triang(6, sym=False), [1/4, 1/2, 3/4, 1, 3/4, 1/2])
def test_basic(self): assert_allclose(windows.triang(6, True), [1/6, 1/2, 5/6, 5/6, 1/2, 1/6]) assert_allclose(windows.triang(7), [1/4, 1/2, 3/4, 1, 3/4, 1/2, 1/4]) assert_allclose(windows.triang(6, sym=False), [1/4, 1/2, 3/4, 1, 3/4, 1/2])
def prepare(paths, res, factor, train): ram(paths, res, train) labels = pandas.read_csv('D:\\_Retina_Data\\trainLabels.csv') train_x = np.zeros((len(paths), res * res), dtype=np.float32) train_y = np.zeros(len(paths), dtype=np.int32) tr = triang(factor * 2 + 1).reshape(factor * 2 + 1, 1) kernel = np.dot(tr, tr.T) kernel /= np.sum(kernel) for i in tqdm(range(len(paths)), desc="Preparing images"): img = cv.imread(paths[i]) img = separate(img) img = resize(img, res) img = equalize(img) img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) img = cv.filter2D(img, -1, kernel) train_x[i, ...] = img.flatten() / 255.0 train_y[i] = labels.loc[labels['image'] == paths[i].split("\\")[-1].split(".")[0]].iloc[0]['level'] train_y[train_y != 0] = -1 train_y[train_y == 0] = 1 return train_x, train_y
def harmonicModel(x, fs, w, N, t, nH, minf0, maxf0, f0et): """ Analysis/synthesis of a sound using the sinusoidal harmonic model x: input sound, fs: sampling rate, w: analysis window, N: FFT size (minimum 512), t: threshold in negative dB, nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz, f0et: error threshold in the f0 detection (ex: 5), returns y: output array sound """ hN = N/2 # size of positive spectrum hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding hM2 = int(math.floor(w.size/2)) # half analysis window size by floor x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0 x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample Ns = 512 # FFT size for synthesis (even) H = Ns/4 # Hop size used for analysis and synthesis hNs = Ns/2 pin = max(hNs, hM1) # init sound pointer in middle of anal window pend = x.size - max(hNs, hM1) # last sample to start a frame fftbuffer = np.zeros(N) # initialize buffer for FFT yh = np.zeros(Ns) # initialize output sound frame y = np.zeros(x.size) # initialize output array w = w / sum(w) # normalize analysis window sw = np.zeros(Ns) # initialize synthesis window ow = triang(2*H) # overlapping window sw[int(hNs-H):int(hNs+H)] = int(ow) bh = blackmanharris(Ns) # synthesis window bh = bh / sum(bh) # normalize synthesis window sw[int(hNs-H):int(hNs+H)] = sw[int(hNs-H):int(hNs+H)] / bh[int(hNs-H):int(hNs+H)] # window for overlap-add hfreqp = [] f0t = 0 f0stable = 0 while pin<pend: #-----analysis----- x1 = x[pin-hM1:pin+hM2] # select frame mX, pX = DFT.dftAnal(x1, w, N) # compute dft ploc = UF.peakDetection(mX, t) # detect peak locations iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values ipfreq = fs * iploc/N f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0 if ((f0stable==0)&(f0t>0)) \ or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)): f0stable = f0t # consider a stable f0 if it is close to the previous one else: f0stable = 0 hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs) # find harmonics hfreqp = hfreq #-----synthesis----- Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs) # generate spec sines fftbuffer = np.real(ifft(Yh)) # inverse FFT yh[:int(hNs-1)] = fftbuffer[int(hNs+1):] # undo zero-phase window yh[int(hNs-1):] = fftbuffer[:int(hNs+1)] y[pin-hNs:pin+hNs] += sw*yh # overlap-add pin += H # advance sound pointer y = np.delete(y, range(hM2)) # delete half of first window which was added in stftAnal y = np.delete(y, range(y.size-hM1, y.size)) # add zeros at the end to analyze last sample return y
def _get_kernel_window(kernel, ks, sigma): assert kernel in ['gaussian', 'triang', 'laplace'] half_ks = (ks - 1) // 2 if kernel == 'gaussian': base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks base_kernel = np.array(base_kernel, dtype=np.float32) kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / sum( gaussian_filter1d(base_kernel, sigma=sigma)) elif kernel == 'triang': kernel_window = triang(ks) / sum(triang(ks)) else: laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma) kernel_window = list(map(laplace, np.arange( -half_ks, half_ks + 1))) / sum( map(laplace, np.arange(-half_ks, half_ks + 1))) print(f'Using FDS: [{kernel.upper()}] ({ks}/{sigma})') return torch.tensor(kernel_window, dtype=torch.float32).cuda()
def get_lds_kernel_window(kernel, ks, sigma): assert kernel in ['gaussian', 'triang', 'laplace'] half_ks = (ks - 1) // 2 if kernel == 'gaussian': base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks kernel_window = gaussian_filter1d(base_kernel, sigma=sigma) / max(gaussian_filter1d(base_kernel, sigma=sigma)) elif kernel == 'triang': kernel_window = triang(ks) else: laplace = lambda x: np.exp(-abs(x) / sigma) / (2. * sigma) kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / max(map(laplace, np.arange(-half_ks, half_ks + 1))) return kernel_window
def triang(dataset, **kwargs): r""" Calculate triangular apodization with non-null extremities and maximum value normalized to 1. For multidimensional NDDataset, the apodization is by default performed on the last dimension. The data in the last dimension MUST be time-domain or dimensionless, otherwise an error is raised. Parameters ---------- dataset : array Input dataset. **kwargs Optional keyword parameters (see Other Parameters). Returns ------- apodized Dataset apod_arr The apodization array only if 'retapod' is True. Other Parameters ---------------- dim : str or int, keyword parameter, optional, default='x'. Specify on which dimension to apply the apodization method. If `dim` is specified as an integer it is equivalent to the usual `axis` numpy parameter. inv : bool, keyword parameter, optional, default=False. True for inverse apodization. rev : bool, keyword parameter, optional, default=False. True to reverse the apodization before applying it to the data. inplace : bool, keyword parameter, optional, default=False. True if we make the transform inplace. If False, the function return a new dataset retapod : bool, keyword parameter, optional, default=False True to return the apodization array along with the apodized object See Also -------- gm, sp, sine, sinm, qsin, hamming, bartlett, blackmanharris """ x = dataset return x * windows.triang(len(x), sym=True)
def sineModel(x, fs, w, N, t): """ Analysis/synthesis of a sound using the sinusoidal model, without sine tracking x: input array sound, w: analysis window, N: size of complex spectrum, t: threshold in negative dB returns y: output array sound """ hM1 = int(math.floor( (w.size + 1) / 2)) # half analysis window size by rounding hM2 = int(math.floor(w.size / 2)) # half analysis window size by floor Ns = 512 # FFT size for synthesis (even) H = Ns / 4 # Hop size used for analysis and synthesis hNs = Ns / 2 # half of synthesis FFT size pin = max(hNs, hM1) # init sound pointer in middle of anal window pend = x.size - max(hNs, hM1) # last sample to start a frame fftbuffer = np.zeros(N) # initialize buffer for FFT yw = np.zeros(Ns) # initialize output sound frame y = np.zeros(x.size) # initialize output array w = w / sum(w) # normalize analysis window sw = np.zeros(Ns) # initialize synthesis window ow = triang(2 * H) # triangular window sw[hNs - H:hNs + H] = ow # add triangular window bh = blackmanharris(Ns) # blackmanharris window bh = bh / sum(bh) # normalized blackmanharris window sw[hNs - H:hNs + H] = sw[hNs - H:hNs + H] / bh[hNs - H:hNs + H] # normalized synthesis window while pin < pend: # while input sound pointer is within sound #-----analysis----- x1 = x[pin - hM1:pin + hM2] # select frame mX, pX = DFT.dftAnal(x1, w, N) # compute dft ploc = UF.peakDetection(mX, t) # detect locations of peaks pmag = mX[ploc] # get the magnitude of the peaks iploc, ipmag, ipphase = UF.peakInterp( mX, pX, ploc) # refine peak values by interpolation ipfreq = fs * iploc / float(N) # convert peak locations to Hertz #-----synthesis----- Y = UF.genSpecSines(ipfreq, ipmag, ipphase, Ns, fs) # generate sines in the spectrum fftbuffer = np.real(ifft(Y)) # compute inverse FFT yw[:hNs - 1] = fftbuffer[hNs + 1:] # undo zero-phase window yw[hNs - 1:] = fftbuffer[:hNs + 1] y[pin - hNs:pin + hNs] += sw * yw # overlap-add and apply a synthesis window pin += H # advance sound pointer return y
def sineModelSynth(tfreq, tmag, tphase, N, H, fs): """ Synthesis of a sound using the sinusoidal model tfreq,tmag,tphase: frequencies, magnitudes and phases of sinusoids N: synthesis FFT size, H: hop size, fs: sampling rate returns y: output array sound """ hN = N // 2 # half of FFT size for synthesis L = tfreq.shape[0] # number of frames pout = 0 # initialize output sound pointer ysize = H * (L + 3) # output sound size y = np.zeros(ysize) # initialize output array sw = np.zeros(N) # initialize synthesis window ow = triang(2 * H) # triangular window sw[hN - H:hN + H] = ow # add triangular window bh = blackmanharris(N) # blackmanharris window bh = bh / sum(bh) # normalized blackmanharris window sw[hN - H:hN + H] = sw[hN - H:hN + H] / bh[hN - H:hN + H] # normalized synthesis window lastytfreq = tfreq[0, :] # initialize synthesis frequencies ytphase = 2 * np.pi * np.random.rand( tfreq[0, :].size) # initialize synthesis phases for l in range(L): # iterate over all frames if (tphase.size > 0): # if no phases generate them ytphase = tphase[l, :] else: ytphase += (np.pi * (lastytfreq + tfreq[l, :]) / fs) * H # propagate phases Y = UF.genSpecSines(tfreq[l, :], tmag[l, :], ytphase, N, fs) # generate sines in the spectrum lastytfreq = tfreq[l, :] # save frequency for phase propagation ytphase = ytphase % (2 * np.pi) # make phase inside 2*pi yw = np.real(fftshift(ifft(Y))) # compute inverse FFT y[pout:pout + N] += sw * yw # overlap-add and apply a synthesis window pout += H # advance sound pointer y = np.delete(y, range(hN)) # delete half of first window y = np.delete(y, range(y.size - hN, y.size)) # delete half of the last window return y
def get_lds_kernel_window(lds_kernel="gaussian", lds_ks=9, lds_sigma=1): r"""Function to determine the label distribution smoothing kernel window lds_kernel (str): LDS kernel type lds_ks (int): LDS kernel size (should be an odd number). lds_sigma (float): LDS gaussian/laplace kernel sigma """ assert lds_kernel in ['gaussian', 'triang', 'laplace'] half_ks = (lds_ks - 1) // 2 if lds_kernel == 'gaussian': base_kernel = [0.] * half_ks + [1.] + [0.] * half_ks kernel_window = gaussian_filter1d( base_kernel, sigma=lds_sigma) / max(gaussian_filter1d(base_kernel, sigma=lds_sigma)) elif lds_kernel == 'triang': kernel_window = triang(lds_ks) else: def laplace(x): return np.exp(-abs(x) / lds_sigma) / (2. * lds_sigma) kernel_window = list(map(laplace, np.arange(-half_ks, half_ks + 1))) / \ max(map(laplace, np.arange(-half_ks, half_ks + 1))) return kernel_window
def sineSubtraction(x, N, H, sfreq, smag, sphase, fs): """ Subtract sinusoids from a sound x: input sound, N: fft-size, H: hop-size sfreq, smag, sphase: sinusoidal frequencies, magnitudes and phases returns xr: residual sound """ hN = N / 2 # half of fft size x = np.append( np.zeros(hN), x) # add zeros at beginning to center first window at sample 0 x = np.append(x, np.zeros(hN)) # add zeros at the end to analyze last sample bh = blackmanharris(N) # blackman harris window w = bh / sum(bh) # normalize window sw = np.zeros(N) # initialize synthesis window sw[hN - H:hN + H] = triang(2 * H) / w[hN - H:hN + H] # synthesis window L = sfreq.shape[0] # number of frames, this works if no sines xr = np.zeros(x.size) # initialize output array pin = 0 for l in range(L): xw = x[pin:pin + N] * w # window the input sound X = fft(fftshift(xw)) # compute FFT Yh = UF_C.genSpecSines(N * sfreq[l, :] / fs, smag[l, :], sphase[l, :], N) # generate spec sines Xr = X - Yh # subtract sines from original spectrum xrw = np.real(fftshift(ifft(Xr))) # inverse FFT xr[pin:pin + N] += xrw * sw # overlap-add pin += H # advance sound pointer xr = np.delete( xr, range(hN)) # delete half of first window which was added in stftAnal xr = np.delete(xr, range( xr.size - hN, xr.size)) # delete half of last window which was added in stftAnal return xr
def pre_processing(self): """ Complete various pre-processing steps for encoded protein sequences before doing any of the DSP-related functions or transformations. Zero-pad the sequences, remove any +/- infinity or NAN values, get the approximate protein spectra and window function parameter names. Parameters ---------- :self (PyDSP object): instance of PyDSP class. Returns ------- None """ #zero-pad encoded sequences so they are all the same length self.protein_seqs = zero_padding(self.protein_seqs) #get shape parameters of proteins seqs self.num_seqs = self.protein_seqs.shape[0] self.signal_len = self.protein_seqs.shape[1] #replace any positive or negative infinity or NAN values with 0 self.protein_seqs[self.protein_seqs == -np.inf] = 0 self.protein_seqs[self.protein_seqs == np.inf] = 0 self.protein_seqs[self.protein_seqs == np.nan] = 0 #replace any NAN's with 0's #self.protein_seqs.fillna(0, inplace=True) self.protein_seqs = np.nan_to_num(self.protein_seqs) #initialise zeros array to store all protein spectra self.fft_power = np.zeros((self.num_seqs, self.signal_len)) self.fft_real = np.zeros((self.num_seqs, self.signal_len)) self.fft_imag = np.zeros((self.num_seqs, self.signal_len)) self.fft_abs = np.zeros((self.num_seqs, self.signal_len)) #list of accepted spectra, window functions and filters all_spectra = ['power', 'absolute', 'real', 'imaginary'] all_windows = [ 'hamming', 'blackman', 'blackmanharris', 'gaussian', 'bartlett', 'kaiser', 'barthann', 'bohman', 'chebwin', 'cosine', 'exponential' 'flattop', 'hann', 'boxcar', 'hanning', 'nuttall', 'parzen', 'triang', 'tukey' ] all_filters = [ 'savgol', 'medfilt', 'symiirorder1', 'lfilter', 'hilbert' ] #set required input parameters, raise error if spectrum is none if self.spectrum == None: raise ValueError( 'Invalid input Spectrum type ({}) not available in valid spectra: {}' .format(self.spectrum, all_spectra)) else: #get closest correct spectra from user input, if no close match then raise error spectra_matches = (get_close_matches(self.spectrum, all_spectra, cutoff=0.4)) if spectra_matches == []: raise ValueError( 'Invalid input Spectrum type ({}) not available in valid spectra: {}' .format(self.spectrum, all_spectra)) else: self.spectra = spectra_matches[0] #closest match in array if self.window_type == None: self.window = 1 #window = 1 is the same as applying no window else: #get closest correct window function from user input window_matches = (get_close_matches(self.window, all_windows, cutoff=0.4)) #check if sym=True or sym=False #get window function specified by window input parameter, if no match then window = 1 if window_matches != []: if window_matches[0] == 'hamming': self.window = hamming(self.signal_len, sym=True) self.window_type = "hamming" elif window_matches[0] == "blackman": self.window = blackman(self.signal_len, sym=True) self.window = "blackman" elif window_matches[0] == "blackmanharris": self.window = blackmanharris(self.signal_len, sym=True) #** self.window_type = "blackmanharris" elif window_matches[0] == "bartlett": self.window = bartlett(self.signal_len, sym=True) self.window_type = "bartlett" elif window_matches[0] == "gaussian": self.window = gaussian(self.signal_len, std=7, sym=True) self.window_type = "gaussian" elif window_matches[0] == "kaiser": self.window = kaiser(self.signal_len, beta=14, sym=True) self.window_type = "kaiser" elif window_matches[0] == "hanning": self.window = hanning(self.signal_len, sym=True) self.window_type = "hanning" elif window_matches[0] == "barthann": self.window = barthann(self.signal_len, sym=True) self.window_type = "barthann" elif window_matches[0] == "bohman": self.window = bohman(self.signal_len, sym=True) self.window_type = "bohman" elif window_matches[0] == "chebwin": self.window = chebwin(self.signal_len, sym=True) self.window_type = "chebwin" elif window_matches[0] == "cosine": self.window = cosine(self.signal_len, sym=True) self.window_type = "cosine" elif window_matches[0] == "exponential": self.window = exponential(self.signal_len, sym=True) self.window_type = "exponential" elif window_matches[0] == "flattop": self.window = flattop(self.signal_len, sym=True) self.window_type = "flattop" elif window_matches[0] == "boxcar": self.window = boxcar(self.signal_len, sym=True) self.window_type = "boxcar" elif window_matches[0] == "nuttall": self.window = nuttall(self.signal_len, sym=True) self.window_type = "nuttall" elif window_matches[0] == "parzen": self.window = parzen(self.signal_len, sym=True) self.window_type = "parzen" elif window_matches[0] == "triang": self.window = triang(self.signal_len, sym=True) self.window_type = "triang" elif window_matches[0] == "tukey": self.window = tukey(self.signal_len, sym=True) self.window_type = "tukey" else: self.window = 1 #window = 1 is the same as applying no window #calculate convolution from protein sequences if self.convolution is not None: if self.window is not None: self.convoled_seqs = signal.convolve( self.protein_seqs, self.window, mode='same') / sum( self.window) if self.filter != None: #get closest correct filter from user input filter_matches = (get_close_matches(self.filter, all_filters, cutoff=0.4)) #set filter attribute according to approximate user input if filter_matches != []: if filter_matches[0] == 'savgol': self.filter = savgol_filter(self.signal_len, self.signal_len) elif filter_matches[0] == 'medfilt': self.filter = medfilt(self.signal_len) elif filter_matches[0] == 'symiirorder1': self.filter = symiirorder1(self.signal_len, c0=1, z1=1) elif filter_matches[0] == 'lfilter': self.filter = lfilter(self.signal_len) elif filter_matches[0] == 'hilbert': self.filter = hilbert(self.signal_len) else: self.filter = "" #no filter
def gain(data,dt,option1,parameters,option2): ''' GAIN: Gain a group of traces. gain(d,dt,option1,parameters,option2); IN d(nt,nx): traces dt: sampling interval option1 = 'time' parameters = [a,b], gain = t.^a . * exp(-bt) = 'agc' parameters = [agc_gate], length of the agc gate in secs option2 = 0 No normalization = 1 Normalize each trace by amplitude = 2 Normalize each trace by rms value OUT dout(nt,nx): traces after application of gain function ''' nt,nx = data.shape dout = np.zeros(data.shape) if option1 == 'time': a = parameters[0] b = parameters[1] t = [x*dt for x in range(nt)] tgain = [(x**a)*math.exp(x*b) for x in t] for k in range(nx): dout[:,k] = data[:,k]*tgain elif option1 == 'agc': L = parameters/dt+1 L = np.floor(L/2) h = triang(2*L+1) shaped_h = h.reshape(len(h),1) for k in range(nx): aux = data[:,k] e = aux**2 shaped_e = e.reshape(len(e),1) rms = np.sqrt(conv2(shaped_e,shaped_h,"same")) epsi = 1e-10*max(rms) op = rms/(rms**2+epsi) op = op.reshape(len(op),) dout[:,k] = data[:,k]*op #Normalize by amplitude if option2==1: for k in range(nx): aux = dout[:,k] amax = max(abs(aux)) dout[:,k] = dout[:,k]/amax #Normalize by rms if option2==2: for k in range(nx): aux = dout[:,k] amax = np.sqrt(sum(aux**2)/nt) dout[:,k] = dout[:,k]/amax return dout