def getCurvVectors(X, MaxOrder, sigma, loop = False, m = 'nearest'): """ Get smoothed curvature vectors up to a particular order :param X: An N x d matrix of points in R^d :param MaxOrder: The maximum order of torsion to compute (e.g. 3 for position, velocity, and curvature, and torsion) :param sigma: The smoothing amount :param loop: Treat this trajectory as a topological loop (i.e. add an edge between first and last point?) """ if loop: m = 'wrap' XSmooth = gf1d(X, sigma, axis=0, order = 0, mode = m) Vel = gf1d(X, sigma, axis=0, order = 1, mode = m) VelNorm = np.sqrt(np.sum(Vel**2, 1)) VelNorm[VelNorm == 0] = 1 Curvs = [XSmooth, Vel] for order in range(2, MaxOrder+1): Tors = gf1d(X, sigma, axis=0, order=order, mode = m) for j in range(1, order): #Project away other components NormsDenom = np.sum(Curvs[j]**2, 1) NormsDenom[NormsDenom == 0] = 1 Norms = np.sum(Tors*Curvs[j], 1)/NormsDenom Tors = Tors - Curvs[j]*Norms[:, None] Tors = Tors/(VelNorm[:, None]**order) Curvs.append(Tors) return Curvs
def get_derivative_shells(patches, pd, n_shells, shells_fn=get_shells, orders=[0, 1, 2], r_max=None): """ Compute shell descriptors on the raw patch and after derivative filters applied to the patch. """ patchesim = np.reshape(patches, (patches.shape[0], pd[0], pd[1])) all_shells = [] for order in orders: if order == 0: shells = shells_fn(patches, pd, n_shells, r_max) else: imx = gf1d(patchesim, sigma=1, order=order, axis=2) imy = gf1d(patchesim, sigma=1, order=order, axis=1) patchesorder = np.sqrt(imx**2 + imy**2) shells = shells_fn(np.reshape(patchesorder, patches.shape), pd, n_shells, r_max) # Normalize each by the standard deviation so they are comparable shells /= np.std(shells) all_shells.append(shells) return np.concatenate(tuple(all_shells), 1)
def gau_smooth(arr0,typ='single',krn=75.0/4.0,verbose=False): import numpy as np from scipy.ndimage.filters import gaussian_filter1d as gf1d from scipy.ndimage.filters import gaussian_filter as gf N = arr0.ndim ktyp = type(krn) modes = ['reflect','wrap','wrap'] if ktyp==float: krn = [krn,krn,krn] if typ=='single': ret = np.zeros(arr0.shape) for i in range(3): if verbose: print('Smoothing type: wrap, component: {}'.format(i)) ret[i] = gf(arr0[i],sigma=krn,mode='wrap') return ret elif typ=='piecewise': for i in range(3): if N==3: cax = i if N==4: cax = i+1 if verbose: print('Axis = {}; k = {}, mode = {}'.format(cax,krn[i],modes[i])) clab = 'arr{}'.format(i) nlab = 'arr{}'.format(i+1) if verbose: print(clab,nlab) locals()[nlab] = gf1d(locals()[clab],axis=cax,sigma=krn[i],mode=modes[i]) if verbose: print('Returning {}'.format(nlab)) return locals()[nlab]
def plot_spectrum(wavelength, spectrum, ax=None, plot_raw=True, smooth=10, raw_kwargs=dict(markersize=1., color='k', linestyle='', marker='.'), **kwargs): """ Standardised method for plotting a spectrum on a matplotlib axes. The raw spectrum is plotted as dots and a smoothed line is drawn through. Args: wavelength (np.ndarray): array of wavelengths spectra (np.ndarray): array of spectrum values ax (matplotlib.axis): An matplotlib axis instance. If none exist then one will be created plot_raw (bool): Plot the raw data as black dots smooth (float): sigma for Gaussian smoothing raw_kwargs (dict): Dictionary of keyword arguments passed to the axes plot method for the raw spectrum plot **kwargs: Arbitrary keyword arguments passed to the axes plot method for the smoothed spectrum plot """ if ax is None: ax = plt.gca() if ax is None: ax = plt.subplot(111) if plot_raw: print 'plotting' ax.plot(wavelength, spectrum, **raw_kwargs) if 'linestyle' not in kwargs: kwargs['linestyle'] = '-' ax.plot(wavelength, gf1d(spectrum, smooth), **kwargs)
def plot_rewards(rewards, args): plt.cla() p = plt.plot(rewards, alpha=0.3) plt.plot(gf1d(rewards, sigma=15), c=p[0].get_color()) plt.title('BipedalWalker-v2: Test MP') plt.savefig('./{}_rewards.png'.format(args.env_name))
def plot_fps(): data = pd.read_csv('fps_data.csv') for col in data.columns: p = plt.plot(gf1d(data[col], sigma=5), label=col) plt.plot(data[col], alpha=0.2, color=p[0].get_color()) plt.legend() plt.title('FPS per Tracking method', weight='bold') plt.show()
def getCurvVectors(X, MaxOrder, sigma, loop = False): m = 'nearest' if loop: m = 'wrap' XSmooth = gf1d(X, sigma, axis=0, order = 0, mode = m) Vel = gf1d(X, sigma, axis=0, order = 1, mode = m) VelNorm = np.sqrt(np.sum(Vel**2, 1)) VelNorm[VelNorm == 0] = 1 Curvs = [XSmooth, Vel] for order in range(2, MaxOrder+1): Tors = gf1d(X, sigma, axis=0, order=order, mode = m) for j in range(1, order): #Project away other components NormsDenom = np.sum(Curvs[j]**2, 1) NormsDenom[NormsDenom == 0] = 1 Norms = np.sum(Tors*Curvs[j], 1)/NormsDenom Tors = Tors - Curvs[j]*Norms[:, None] Tors = Tors/(VelNorm[:, None]**order) Curvs.append(Tors) return Curvs
def getCurvVectors(X, MaxOrder, sigma, loop=False): m = 'nearest' if loop: m = 'wrap' XSmooth = gf1d(X, sigma, axis=0, order=0, mode=m) Vel = gf1d(X, sigma, axis=0, order=1, mode=m) VelNorm = np.sqrt(np.sum(Vel**2, 1)) VelNorm[VelNorm == 0] = 1 Curvs = [XSmooth, Vel] for order in range(2, MaxOrder + 1): Tors = gf1d(X, sigma, axis=0, order=order, mode=m) for j in range(1, order): #Project away other components NormsDenom = np.sum(Curvs[j]**2, 1) NormsDenom[NormsDenom == 0] = 1 Norms = np.sum(Tors * Curvs[j], 1) / NormsDenom Tors = Tors - Curvs[j] * Norms[:, None] Tors = Tors / (VelNorm[:, None]**order) Curvs.append(Tors) return Curvs
def getAccelerationFeatureStack(X, sigma): """ Compute an extimate of acceleration of each component of a feature stack using convolution with a Gaussian derivative Parameters ---------- X: ndarray(N, K) An array of features sigma: int Width of the gaussian by which to convolve """ return gf1d(X, sigma, axis=0, order=2, mode='nearest')
def create_hill_x_input(): """ Create Input for TenStream caluclations on a gaussian hill""" from scipy.ndimage.filters import gaussian_filter as gf from scipy.ndimage.filters import gaussian_filter1d as gf1d from scipy.interpolate import interp1d # Create elevation map for gaussian hill Nx, Ny = 3, 31 DX = .1 # 100m horizontal resolution HILL_HEIGHT = 1.5 # 500m hill elev = np.zeros((Nx, Ny)) elev[:, (Ny - 1) / 2] = 1. elev = gf1d(elev, HILL_HEIGHT / DX, axis=1) elev /= np.max(elev) / HILL_HEIGHT # Interpolate atmosphere file on new grid afglus = np.loadtxt('afglus_100m.dat') z = afglus[:, 0] p = afglus[:, 1] Nz = np.shape(z)[-1] pressure_by_height = interp1d(z, p, kind='linear') hhl = np.tile(z, Nx * Ny).reshape( (Nx, Ny, Nz))[:, :, ::-1] # hhl now begins at surface hill = hhl.copy() # Create surface following sigma coordinates with coordinate stretching in the vertical for k in range(Nz): hill[:, :, k] = hhl[:, :, k] + elev * np.exp(-hhl[:, :, k] / HILL_HEIGHT) hill = np.minimum(hill, np.max(z)) hill = hill[:, :, ::-1] # hhl now begins at TOA p_hill = pressure_by_height(hill) lev_coord = p_hill create_var('plev', interp_var(p, afglus[:, 1], lev_coord), 'lev') lay_coord = (lev_coord[:, :, 1:] + lev_coord[:, :, :-1]) / 2 create_var('tlay', interp_var(p, afglus[:, 2], lay_coord), 'lay') create_var('air', interp_var(p, afglus[:, 3], lay_coord), 'lay') create_var('o3vmr', interp_var(p, afglus[:, 4], lay_coord), 'lay') create_var('o2vmr', interp_var(p, afglus[:, 5], lay_coord), 'lay') create_var('h2ovmr', interp_var(p, afglus[:, 6], lay_coord), 'lay') create_var('co2vmr', interp_var(p, afglus[:, 7], lay_coord), 'lay') create_var('n2ovmr', interp_var(p, afglus[:, 8], lay_coord), 'lay')
def data_augmentation(inputs, outputs, input_channels=[], kernels=(3, 5, 7, 11), noises=None): if not isinstance(input_channels, list): input_channels = [input_channels] input_channels = [ip for ip in input_channels if ip in list(inputs)] # apply repeat_channels = [ip for ip in list(inputs) if ip not in input_channels] inputs_ = {ic: np.vstack((inputs[ic],) + \ tuple([gf1d(inputs[ic], k, axis = 1) \ for k in kernels])) \ for ic in input_channels} inputs_ = dict(inputs_, **{ic: np.vstack((inputs[ic],) + \ tuple([inputs[ic] \ for k in kernels])) \ for ic in repeat_channels}) if noises is not None: inputs__ = {ic: np.vstack(tuple([inputs[ic] + \ np.random.normal(loc=0.0, scale=n, size = inputs[ic].shape) \ for n in noises])) \ for ic in input_channels} inputs__ = dict(inputs__, **{ic: np.vstack(tuple([inputs[ic] \ for n in noises])) \ for ic in repeat_channels}) inputs_ = { ic: np.vstack((inputs_[ic], inputs__[ic])) for ic in inputs_ } # Repeat outputs if outputs is not None: outputs_ = {oc: np.vstack((outputs[oc],) + \ tuple([outputs[oc] \ for k in kernels])) \ for oc in list(outputs)} if noises is not None: outputs_ = {oc: np.vstack((outputs_[oc],) + \ tuple([outputs[oc] \ for n in noises])) \ for oc in list(outputs)} return inputs_, outputs_
def GaussBlurG1(Chro,sigma,sigrange=0,returnPDF=False,normalize=False): ''' Outputs a new Chromosome which has converted 0s to 1s in Chro ... ... with odds based on a gaussian blur of Chro''' blurred = gf1d( [float(i) for i in Chro] , sigma, mode='constant') # list comprehension needed to convert ints to floats if normalize: NormalizePDF(blurred,sum(Chro)) if returnPDF: blurred = [max(0,i) for i in (blurred - Chro)] # converts 1s in Chro to 0s in blurred #### because Chro is binary, it either doesn't affect (if 0) or pushes it to 0 (if 1 which is always bigger than blurred) if sigrange: # raise NameError('CHECK FIRST WHETHER CUTOFF METHOD IS CORRECT, IT IS CURRENTLY UNTESTED') cutoff = stats.norm.pdf(sigma*sigrange,scale=sigma) ## I THINK THIS IS CORRECT, UNTESTED THOUGH blurred = [0 if i < cutoff else i for i in blurred] # turn low values to 0 to decrease memory used in numpy array return blurred return [1 if nprnd.rand() < nuc else 0 for nuc in Chro+blurred]
def getCurvVectors(X, MaxOrder, sigma, loop=False, m='nearest'): """ Get smoothed curvature vectors up to a particular order Parameters ---------- X: ndarray(N, d) An N x d matrix of points in R^d MaxOrder: int The maximum order of torsion to compute (e.g. 3 for position, velocity, and curvature, and torsion) sigma: float The smoothing amount loop: boolean Whether to treat this trajectory as a topological loop (i.e. add an edge between first and last point) Returns ------- Curvs: A list of ndarray(N, d), starting with the smoothed curve, then followed by the smoothed velocity, curvature, torsion, etc. up to the MaxOrder """ if loop: m = 'wrap' XSmooth = gf1d(X, sigma, axis=0, order=0, mode=m) Vel = gf1d(X, sigma, axis=0, order=1, mode=m) VelNorm = np.sqrt(np.sum(Vel**2, 1)) VelNorm[VelNorm == 0] = 1 Curvs = [XSmooth, Vel] for order in range(2, MaxOrder + 1): Tors = gf1d(X, sigma, axis=0, order=order, mode=m) for j in range(1, order): #Project away other components NormsDenom = np.sum(Curvs[j]**2, 1) NormsDenom[NormsDenom == 0] = 1 Norms = np.sum(Tors * Curvs[j], 1) / NormsDenom Tors = Tors - Curvs[j] * Norms[:, None] Tors = Tors / (VelNorm[:, None]**order) Curvs.append(Tors) return Curvs
def getOnsetMeans(px, win=20, sigma=1, truncate=4, edge=10, do_plot=False): """ Do a mollified Gaussian derivative followed by a moving average to get smoothed local tempo estimates """ x = px[edge:-edge] # Truncate edges since they seem to be unreliable x = gf1d(x, sigma, truncate=truncate, order=1, mode='reflect') x = x[truncate * sigma:-truncate * sigma] if do_plot: plt.figure() plt.subplot(211) plt.plot(px) plt.subplot(212) plt.plot(x) plt.show() M = x.size - win + 1 X = np.zeros((M, win)) for k in range(win): X[:, k] = x[k:k + M] ret = np.mean(X, 1) return ret / np.median(ret)
def G1phase(Chro,G1method=GaussBlurG1,power=gausspow): ''' Gaussian blur (or other method) of Chro is made which functions as the blueprint for the chance of nascent CENP-A incorporation. The Distribution is only made once at the beginning of G1 phase, meaning that nascent CENP-A does not guide more nascent CENP-A incorporation. This makes the code a lot faster. A method was built in to ensure that there is no 'reloading' of CENP-A at any given position''' global sendChro if gaussOffset: sendChro = np.zeros(ChroL) for nuc in xrange(ChroL): if Chro[nuc] == 1: if nuc >= gaussOffset: sendChro[nuc-gaussOffset] += 0.5 if nuc < ChroL-gaussOffset: sendChro[nuc+gaussOffset] += 0.5 else: sendChro = Chro Distrib = gf1d( [float(i) for i in sendChro] , sigma, mode='constant') Distrib = [max(0,i) for i in (Distrib - Chro)] if power != 1: # >1 makes the distribution favor clusters (at least in theory) Distrib = np.power(Distrib,[power]*len(Distrib)) print sum(Distrib) Distrib /= sum(Distrib)/efficiency # normailizes the distribution to sum to efficiency (i.e. random numbers above efficiency will not lead to CA incorporation) Cumulative = np.cumsum(Distrib) # converts the distribution to a cumulative distribution list plt.plot(Distrib) print sum(Distrib) reloads = 0 for n in xrange(CApool): # for each molecule in the available pool of CA X = nprnd.rand() if X < efficiency: # test if it will be loaded or not newPos = np.searchsorted(Cumulative,X) while Chro[newPos]: # ensures that previously loaded sites are not 'reloaded'; this also alleviates the necessity of converting 1s to 0s in GaussBlur X = nprnd.rand()/2 newPos = np.searchsorted(Cumulative,X) reloads += 1 # counter to check how often a previously loaded site revisited Chro[newPos] = 1 # convert a 0 from the cumulative distribution to 1 return Chro , reloads
def G1phase(Chro): ''' Gaussian blur (or other method) of Chro is made which functions as the blueprint for the chance of nascent CENP-A incorporation. The Distribution is only made once at the beginning of G1 phase, meaning that nascent CENP-A does not guide more nascent CENP-A incorporation. This makes the code a lot faster. A method was built in to ensure that there is no 'reloading' of CENP-A at any given position''' # Create Offset array which will be used to calculate the frequency distribution of nascent CA loading if gaussOffset: OffsetChro = np.zeros(ChroL) for nuc in xrange(ChroL): if Chro[nuc] == 1: if nuc >= gaussOffset: OffsetChro[nuc-gaussOffset] += 0.5 if nuc < ChroL-gaussOffset: OffsetChro[nuc+gaussOffset] += 0.5 else: OffsetChro = np.array(Chro) # Create frequency distribution based on gaussian blurred Distrib = gf1d( [float(i) for i in OffsetChro] , sigma, mode='constant') #this line is no longer outsourced to G1_phase_Methods file! Distrib = [max(0,i) for i in (Distrib - Chro)] # convert 1s in Chro to 0s in Distrib so that there is not reloading of CA at old positions if gausspow != 1: # >1 makes the distribution favor clusters (at least in theory) Distrib = np.power(Distrib,[gausspow]*len(Distrib)) Distrib /= sum(Distrib)/efficiency # normailizes the distribution to sum to efficiency (i.e. random numbers above efficiency will not lead to CA incorporation) Cumulative = np.cumsum(Distrib) # converts the distribution to a cumulative distribution list reloads=0 # counter to check how often a previously loaded site revisited for n in xrange(CApool): # for each molecule in the available pool of CA X = nprnd.rand() if X < efficiency: # test if it will be loaded or not newPos = np.searchsorted(Cumulative,X) while Chro[newPos]: # ensures that previously loaded sites are not 'reloaded'; this also alleviates the necessity of converting 1s to 0s in GaussBlur X = nprnd.rand()/2 newPos = np.searchsorted(Cumulative,X) reloads += 1 # counter to check how often a previously loaded site revisited Chro[newPos] = 1 # convert a 0 from the cumulative distribution to 1 return Chro , reloads
batch_reward.extend([p_r, n_r]) l2_update, mean_r, std_r = train_step(net, batch_noise, batch_reward) p_bar.update(1) last_r = eval_indiv(env, net) p_bar.set_description('Reward: {}'.format(last_r)) writer.add_scalar('L2 update', np.mean(l2_update), step) writer.add_scalar('Mean reward', mean_r, step) writer.add_scalar('Reward std', std_r, step) recap['mean_r'].append(mean_r) recap['l2_updates'].append(np.mean(l2_update)) recap['std_r'].append(std_r) p = plt.plot(recap['mean_r'], alpha=0.3) plt.plot(gf1d(recap['mean_r'], sigma=5), color=p[0].get_color()) plt.title('Mean reward') plt.savefig('mean.png') plt.cla() p = plt.plot(recap['l2_updates'], alpha=0.3) plt.plot(gf1d(recap['l2_updates'], sigma=5), color=p[0].get_color()) plt.title('L2 update') plt.savefig('l2.png') plt.cla() p = plt.plot(recap['std_r'], alpha=0.3) plt.plot(gf1d(recap['std_r'], sigma=5), color=p[0].get_color()) plt.title('Std reward') plt.savefig('std.png') plt.cla() p_bar.close()
def get_DLNC0(x, sr, hop_length, lag=10, do_plot=False): """ Compute decaying locally adaptive normalize C0 (DLNC0) features Parameters ---------- x: ndarray(N) Audio samples sr: int Sample rate hop_length: int Hop size between windows lag: int Number of lags to include Returns ------- X: ndarray(n_win, 12) The DLNC0 features """ from scipy.ndimage.filters import gaussian_filter1d as gf1d from scipy.ndimage.filters import maximum_filter1d import librosa X = np.abs(librosa.cqt(x, sr=sr, hop_length=hop_length, bins_per_octave=12)) # Half-wave rectify discrete derivative #X = librosa.amplitude_to_db(X, ref=np.max) #X[:, 0:-1] = X[:, 1::] - X[:, 0:-1] X = gf1d(X, 5, axis=1, order=1) X[X < 0] = 0 # Retain peaks XLeft = X[:, 0:-2] XRight = X[:, 2::] mask = np.zeros_like(X) mask[:, 1:-1] = (X[:, 1:-1] > XLeft) * (X[:, 1:-1] > XRight) X[mask < 1] = 0 # Fold into octave n_octaves = int(X.shape[0] / 12) X2 = np.zeros((12, X.shape[1]), dtype=X.dtype) for i in range(n_octaves): X2 += X[i * 12:(i + 1) * 12, :] X = X2 # Compute norms if do_plot: import librosa.display plt.subplot(411) librosa.display.specshow(X, sr=sr, x_axis='time', y_axis='chroma') norms = np.sqrt(np.sum(X**2, 0)) if do_plot: plt.subplot(412) plt.plot(norms) norms = maximum_filter1d(norms, size=int(2 * sr / hop_length)) if do_plot: import librosa.display plt.plot(norms) plt.subplot(413) X = X / norms[None, :] librosa.display.specshow(X, sr=sr, x_axis='time', y_axis='chroma') # Apply LNCO decays = np.linspace(0, 1, lag + 1)[1::] decays = np.sqrt(decays[::-1]) XRet = np.zeros_like(X) M = X.shape[1] - lag + 1 for i in range(lag): XRet[:, i:i + M] += X[:, 0:M] * decays[i] if do_plot: plt.subplot(414) librosa.display.specshow(XRet, sr=sr, x_axis='time', y_axis='chroma') plt.show() return XRet
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import glob import numpy as np from scipy.ndimage.filters import gaussian_filter1d as gf1d plt.style.use('ggplot') if __name__ == "__main__": smoother = lambda x: gf1d(x, sigma=15) files = glob.glob('./runs/*csv') ppo_files = [f for f in files if 'ppo' in f] ppg_files = [f for f in files if 'ppg' in f] ppo_df = pd.concat([pd.read_csv(f) for f in ppo_files], axis=1) ppg_df = pd.concat([pd.read_csv(f) for f in ppg_files], axis=1) ppo_df['min_val'] = ppo_df.min(axis=1) ppo_df['max_val'] = ppo_df.max(axis=1) ppo_df['mean_val'] = ppo_df.mean(axis=1) ppg_df['min_val'] = ppg_df.min(axis=1) ppg_df['max_val'] = ppg_df.max(axis=1) ppg_df['mean_val'] = ppg_df.mean(axis=1) f, ax = plt.subplots(figsize=(20, 14)) x_ticks = np.arange(0, ppo_df.shape[0]) plt.fill_between(x_ticks,
def interpolate_spectrum(wavelength, spectrum, smooth=10): func = interp1d(wavelength, spectrum) new_wavelength = np.arange(wavelength.min(), wavelength.max(), 1) new_spectrum = func(new_wavelength) new_spectrum = gf1d(new_spectrum, smooth) return new_wavelength, new_spectrum