def interp1(x, y, z, interpz): # interplinex = interpolate.interp1d(z, x, kind='cubic') # interpliney = interpolate.interp1d(z, y, kind='cubic') interplinex = interpolate.InterpolatedUnivariateSpline(z, x, k=2) interpliney = interpolate.PchipInterpolator(z, y) # interpliney = interpolate.InterpolatedUnivariateSpline(z, y, k=3) interpx = interplinex(interpz) interpy = interpliney(interpz) return interpx, interpy
def test_hessian(self): x = np.linspace(0, 1, 100) y = x * x spline = SplineWrapper( interpolate.InterpolatedUnivariateSpline(x, y, k=1)) x_var = tt.dscalar('x') g_x, = tt.grad(spline(x_var), [x_var]) with pytest.raises(NotImplementedError): tt.grad(g_x, [x_var])
def match_interpolated_for_range(f1, f2, h1, h2, psdfun, flo, fhi, df, zpf=5): """ Compute the match between FD waveforms h1, h2 interpolated onto a prescribed grid :param h1, h2: arrays :param psdfun: power spectral density as a function of frequency in Hz :param zpf: zero-padding factor """ # f1 = df1*np.arange(0,len(h1)) # f2 = df2*np.arange(0,len(h2)) A1 = np.abs(h1) A2 = np.abs(h2) A1_I = ip.InterpolatedUnivariateSpline(f1, A1, k=3) A2_I = ip.InterpolatedUnivariateSpline(f2, A2, k=3) phi1 = np.unwrap(np.angle(h1)) phi2 = np.unwrap(np.angle(h2)) phi1_I = ip.InterpolatedUnivariateSpline(f1, phi1, k=3) phi2_I = ip.InterpolatedUnivariateSpline(f2, phi2, k=3) if ((fhi > f1[-1]) or (fhi > f2[-1])): fhi = np.min([f1[-1], f2[-1]]) print 'fhi > maximum frequency of data! Resetting fhi to', fhi n = int((fhi - flo) / df) f = flo + np.arange(n) * df h1_data = A1_I(f) * np.exp(1j * phi1_I(f)) h2_data = A2_I(f) * np.exp(1j * phi2_I(f)) psd_ratio = psdfun(100) / np.array(map(psdfun, f)) if f[0] == 0: psd_ratio[0] = psd_ratio[1] # get rid of psdfun(0) = nan h1abs = np.abs(h1_data) h2abs = np.abs(h2_data) norm1 = np.dot(h1abs, h1abs * psd_ratio) norm2 = np.dot(h2abs, h2abs * psd_ratio) integrand = h1_data * h2_data.conj() * psd_ratio # different name! #integrand_zp = np.lib.pad(integrand, n*zpf, 'constant', constant_values=0) # zeropad it integrand_zp = np.concatenate( [np.zeros(n * zpf), integrand, np.zeros(n * zpf)]) # zeropad it, in case we don't have np.lib.pad csnr = np.asarray( np.fft.fft(integrand_zp )) # complex snr; numpy.fft = Mma iFFT with our conventions return np.max(np.abs(csnr)) / np.sqrt(norm1 * norm2)
def __attitude_spline(self): """ Creates spline for each component of the attitude quaternion: s_x, s_y, s_z, s_w Attributes ----------- :func_attitude: lambda func, returns: attitude quaternion at time t from spline. :func_x_axis_lmn: lambda func, returns: position of x_axis of satellite at time t, in lmn frame. :func_z_axis_lmn: lambda func, returns: position of z_axis of satellite at time t, in lmn frame. """ w_list = [] x_list = [] y_list = [] z_list = [] t_list = [] for obj in self.storage: t_list.append(obj[0]) x_list.append(obj[4].x) y_list.append(obj[4].y) z_list.append(obj[4].z) w_list.append(obj[4].w) # This should be faster ?? yes but it is not a bottleneck. update() is # t_list = np.array(self.storage)[:, 0] # x_list = np.array(self.storage)[:, 4].x # y_list = np.array(self.storage)[:, 4].y # z_list = np.array(self.storage)[:, 4].z # w_list = np.array(self.storage)[:, 4].w # Splines for each coordinates i, i_list at each time in t_list of degree k (order = k+1) self.s_w = interpolate.InterpolatedUnivariateSpline(t_list, w_list, k=self.spline_degree) self.s_x = interpolate.InterpolatedUnivariateSpline(t_list, x_list, k=self.spline_degree) self.s_y = interpolate.InterpolatedUnivariateSpline(t_list, y_list, k=self.spline_degree) self.s_z = interpolate.InterpolatedUnivariateSpline(t_list, z_list, k=self.spline_degree) # Attitude self.func_attitude = lambda t: np.quaternion(self.s_w(t), self.s_x(t), self.s_y(t), self.s_z(t)).normalized() # Attitude in the lmn frame self.func_x_axis_lmn = lambda t: ft.xyz_to_lmn(self.func_attitude(t), np.array([1, 0, 0])) # wherewe want to be self.func_y_axis_lmn = lambda t: ft.xyz_to_lmn(self.func_attitude(t), np.array([0, 1, 0])) self.func_z_axis_lmn = lambda t: ft.xyz_to_lmn(self.func_attitude(t), np.array([0, 0, 1]))
def __init__(self, m, p, psid, psiq, r1, id, iq, ls=0): super(self.__class__, self).__init__(m, p, r1, ls) if isinstance(psid, (float, int)): self._psid = lambda id, iq: np.array([[psid]]) self._psiq = lambda id, iq: np.array([[psiq]]) return psid = np.asarray(psid) psiq = np.asarray(psiq) id = np.asarray(id) iq = np.asarray(iq) self.idrange = (min(id), max(id)) self.iqrange = (min(iq), max(iq)) if np.mean(self.idrange) > 0: self.betarange = (0, np.pi/2) else: self.betarange = (-np.pi/2, 0) self.io = np.max(iq)/2, np.min(id)/2 if np.any(psid.shape < (4, 4)): if psid.shape[0] > 1 and psid.shape[1] > 1: self._psid = ip.interp2d(iq, id, psid.T) self._psiq = ip.interp2d(iq, id, psiq.T) return if len(id) == 1 or psid.shape[1] == 1: self._psid = lambda x, y: ip.InterpolatedUnivariateSpline( iq, psid)(x) self._psiq = lambda x, y: ip.InterpolatedUnivariateSpline( iq, psiq)(x) return if len(iq) == 1 or psid.shape[0] == 1: self._psid = lambda x, y: ip.InterpolatedUnivariateSpline( id, psid)(y) self._psiq = lambda x, y: ip.InterpolatedUnivariateSpline( id, psiq)(y) return raise ValueError("unsupported array size {}x{}".format( len(psid.shape[0]), psid.shape[1])) self._psid = lambda x, y: ip.RectBivariateSpline( iq, id, psid).ev(x, y) self._psiq = lambda x, y: ip.RectBivariateSpline( iq, id, psiq).ev(x, y)
def gen_cdf_match(src, perc_src, perc_ref, min_val=None, max_val=None, k=1): """ General cdf matching: 1. computes discrete cumulative density functions of src- and ref at the given percentiles 2. computes continuous CDFs by k-th order spline fitting 3. CDF of src is matched to CDF of ref Parameters ---------- src: numpy.array input dataset which will be scaled perc_src: numpy.array percentiles of src perc_ref: numpy.array percentiles of reference data estimated through method of choice, must be same size as perc_src min_val: float, optional Minimum allowed value, output data is capped at this value max_val: float, optional Maximum allowed value, output data is capped at this value k : int, optional Order of spline to fit Returns ------- CDF matched values: numpy.array dataset src with CDF as ref """ # InterpolatedUnivariateSpline uses extrapolation # outside of boundaries so all values can be rescaled # This is important if the stored percentiles were generated # using a subset of the data and the new data has values outside # of this original range try: inter = sc_int.InterpolatedUnivariateSpline(perc_src, perc_ref, k=k) except Exception: # here we must catch all exceptions since scipy does not raise a proper # Exception warn("Too few percentiles for chosen k.") return np.full_like(src, np.nan) scaled = inter(src) if max_val is not None: scaled[scaled > max_val] = max_val if min_val is not None: scaled[scaled < min_val] = min_val return scaled
def dust_vals_disk(self, lcen, bcen, dist, radius): """ NAME: dust_vals_disk PURPOSE: return the distribution of extinction within a small disk as samples INPUT: lcen, bcen - Galactic longitude and latitude of the center of the disk (deg) dist - distance in kpc radius - radius of the disk (deg) OUTPUT: (pixarea,extinction) - arrays of pixel-area in sq rad and extinction value HISTORY: 2015-03-06 - Written - Bovy (IAS) """ # Convert the disk center to a HEALPIX vector vec = healpy.pixelfunc.ang2vec((90. - bcen) * _DEGTORAD, lcen * _DEGTORAD) distmod = 5. * numpy.log10(dist) + 10. # Query the HEALPIX map for pixels that lie within the disk pixarea = [] extinction = [] for nside in self._nsides: # Find the pixels at this resolution that fall within the disk ipixs = healpy.query_disc(nside, vec, radius * _DEGTORAD, inclusive=False, nest=True) # Get indices of all pixels within the disk at current nside level nsideindx = self._pix_info['nside'] == nside potenIndxs = self._indexArray[nsideindx] nsidepix = self._pix_info['healpix_index'][nsideindx] # Loop through the pixels in the (small) disk tout = [] for ii, ipix in enumerate(ipixs): lbIndx = potenIndxs[ipix == nsidepix] if numpy.sum(lbIndx) == 0: continue if self._intps[lbIndx] != 0: tout.append(self._intps[lbIndx][0](distmod)) else: interpData=\ interpolate.InterpolatedUnivariateSpline(self._distmods, self._best_fit[lbIndx], k=self._interpk) tout.append(interpData(distmod)) self._intps[lbIndx] = interpData tarea = healpy.pixelfunc.nside2pixarea(nside) tarea = [tarea for ii in range(len(tout))] pixarea.extend(tarea) extinction.extend(tout) pixarea = numpy.array(pixarea) extinction = numpy.array(extinction) if not self._filter is None: extinction = extinction * aebv(self._filter, sf10=self._sf10) return (pixarea, extinction)
def compute_xi_bar(self): """ Compute the value of the spherical averaged correlation function over the range r_min - r_max """ for idx, r in enumerate(self.r_array): self.xi_bar_array[idx] = self.raw_xi_bar(r) self._xi_bar_spline = interpolate.InterpolatedUnivariateSpline( self.r_array, self.xi_bar_array) self.initialized_xi_bar_spline = True return None
def InteractionResultReader(data_name, model_name, exp_id, train_test, topic_id): mdir = os.path.join(retdir, data_name, 'interaction', model_name, exp_id, train_test) filename = topic_id + '.csv' filepath = os.path.join(mdir, filename) check_file(filepath) model = get_model_name(model_name) df = pd.read_csv(filepath) # construct new data startp, endp, nump = 0.1, 1.0, 10 df = df.drop_duplicates(['sampled_num']) df['percentage'] = df['sampled_num'] / df['total_num'] max_percentage = df['percentage'].max() min_percentage = df['percentage'].min() df['relative_error'] = np.abs(df['total_esti_r'] - df['total_true_r']) / df['total_true_r'] df = df.fillna(0) x = np.linspace(startp, endp, nump) f = interpolate.InterpolatedUnivariateSpline(df['percentage'].values, df['relative_error'].values) relative_error = revised_f(x, f, min_percentage, max_percentage) f = interpolate.InterpolatedUnivariateSpline( df['percentage'].values, df['running_true_recall'].values) recall = revised_f(x, f, min_percentage, max_percentage) f = interpolate.InterpolatedUnivariateSpline(df['percentage'].values, df['ap'].values) ap = revised_f(x, f, min_percentage, max_percentage) df = pd.DataFrame({ 'model_name': [model] * nump, 'exp_id': [exp_id] * nump, 'percentage': x, 'relative_error': relative_error, 'recall': recall, 'ap': ap }) return df
def lepton_modulation(E, Elog, spectrum, potential): dimE = len(E) spectrum_mod = np.zeros(int(dimE)) A = ((E + m_e)**2 - m_e**2) / (((E + m_e) + potential)**2 - m_e**2) F = interpolate.InterpolatedUnivariateSpline(Elog, spectrum) Elog_shifted = np.zeros(dimE) for i in xrange(dimE): Elog_shifted[i] = np.log(E[i] + potential) spectrum_mod = A * F(Elog_shifted) return spectrum_mod
def interp_univar(x, y, s=1, savgol=False): if not is_monotonic(x): x, y = make_monotonic(x, y) '''Interpolate pair of vectors at integer increments between min(x) and max(x)''' xnew = range(int(np.ceil(min(x))), int(np.floor(max(x)))) F = interpolate.InterpolatedUnivariateSpline(x, y) ynew = F(xnew) if savgol: ynew = norm2one(savgol_filter(ynew, 15, 2)) return xnew, ynew
def test_extract_coeffs_knots_from_splines(self): k = 4 length = 5 x = np.arange(length) y = np.random.rand(length) spline = interpolate.InterpolatedUnivariateSpline(x, y, k=k) spline_list = [spline] coeffs, knots, splines = af.extract_coeffs_knots_from_splines([spline], k) self.assertEqual(len(coeffs), len(spline_list))
def __init__(self, xvec, yvec, xtol, order=3, ext=3): self._xvec = xvec self._yvec = yvec self._xtol = xtol self._order = order self._ext = ext self._fun = interp.InterpolatedUnivariateSpline(xvec, yvec, k=order, ext=ext)
def plot_probability(self, label, errors): hist, bins = np.histogram(errors, bins=50, density=True) centers = (bins[:-1] + bins[1:]) / 2 prob = interpolate.InterpolatedUnivariateSpline(centers, hist) color = (random(), random(), random()) xp = np.linspace(centers[0], centers[-1], 100) self.plot.plot(centers, hist, '.', color=color) self.plot.plot(xp, prob(xp), '-', color=color, label=label)
def reset(event): global yvals global spline #reset the values yvals = funcx(u) yspline = inter.InterpolatedUnivariateSpline (u, yvals) l.set_ydata(yvals) m.set_xdata(xspline(U)) # redraw canvas while idle fig.canvas.draw_idle()
def log_interp1d(xx, yy, kind='linear'): import numpy as np import scipy.interpolate as scinterp logx = np.log10(xx) logy = np.log10(yy) order = 1 s = scinterp.InterpolatedUnivariateSpline(logx, logy, k=order) # linterp = scinterp.interp1d(logx, logy, kind=kind) logterp = lambda zz: np.power(10.0, s(np.log10(zz))) return logterp
def calc_spline(self, y): """ Calculate a spline that represents the smoothed data points """ try: t_range = self.owner.temprange spline = interpolate.InterpolatedUnivariateSpline(t_range, y) return spline except Exception as err: print('Calculation of spline failed! ', err)
def loadProfiles(self, dirname): cpath = dirname + "/" + self.profile_dir behIDarr = dirname.split("/") behID = behIDarr[len(behIDarr) - 1] constraints = [] try: with open(cpath + "/" + behID + ".constraints.csv") as f: content = f.readlines() for item in content: constraints.append(item.split(",")) #constraints = np.genfromtxt(cpath + "/" + behID + ".constraints.csv", delimiter=",") except IOError: print('IO error opening the file ' + cpath + "/" + behID + ".constraints.csv") exit(0) sp_profiles = [] start_times = [] end_times = [] temp_times = [] #csv=[] for row in constraints: csv = np.genfromtxt(cpath + "/" + row[0], delimiter=" ") time1 = csv[:, 0] #print(time1) y1 = csv[:, 1] self.ESTs.append(int(row[1].strip())) self.LSTs.append(int(row[2].strip())) if not time1[0] == 0: time1 = np.insert(time1, 0, 0) y1 = np.insert(y1, 0, 0) si = inter.InterpolatedUnivariateSpline(time1, y1) #knots = self.max_dev_knots(csv, 3) #si = inter.LSQUnivariateSpline(time1, y1, k=3, t=knots) sp_profiles.append(si) start_times.append(time1[0]) end_times.append(time1[len(time1) - 1]) temp_times.append(time1) self.loads = [] for i in range(0, len(sp_profiles)): #print(temp_times[i]) # visto che il consumo parte da zero, l'end_time e' la durata self.loads.append( Load(start_times[i], end_times[i], sp_profiles[i], self.ESTs[i], self.LSTs[i], temp_times[i])) #calcolo durata media dei carichi self.duration_average += end_times[i] self.duration_average = self.duration_average / len(sp_profiles) self.duration_average = int(self.duration_average)
def dndlnM(self, M, z, Delta=200., wantsigma=False): """ mass fxn at redshift of power spectrum you input. """ if np.fabs(Delta - 200.) > 0.01: print 'I have not coded the interpolation for nofM; only currently works for Delta = 200.' return 0. if np.fabs(z - self.zpk) > 0.001: print 'code not generalized yet, mass fxn z must be hte same as power spectrum z.' return 0. ## parameters from http://adsabs.harvard.edu/abs/2008ApJ...688..709T for Delta = 200 (Table 2) A = 0.186 * (1. + z)**-0.14 a = 1.47 * (1. + z)**-0.06 alpha = 10**(-(0.75 / np.log10(Delta / 75.))**1.2) b = 2.57 * (1. + z)**-alpha c = 1.19 sig = self.linPk.sigmaR(self.LagrangianMtoR(M)) ## make an interpolator object to get the log derivative. try: ll = len(M) ## ok, M is an array so proceed to derive derivative straight from there. lnsiginterp = interp.InterpolatedUnivariateSpline( np.log(M), np.log(sig)) sigfac = np.array( [-lnsiginterp.derivatives(np.log(mi))[1] for mi in M]) except: ## M is a scalar, so evaluate sig at neighboring mass points and derive derivative. tmpfac = 0.9 ## factor (smaller than 1) to scale mass on either side to compute sigma derivative. mtmp = np.array([tmpfac * M, M, M / tmpfac]) slist = np.array([ self.linPk.sigmaR(self.LagrangianMtoR(tmpfac * M)), sig, self.linPk.sigmaR(self.LagrangianMtoR(M / tmpfac)) ]) lnsiginterp = interp.InterpolatedUnivariateSpline( np.log(mtmp), np.log(slist)) sigfac = -lnsiginterp.derivatives(np.log(M))[1] fsigma = A * ((sig / b)**-a + 1.) * np.exp(-c / sig**2) if (wantsigma == False): return fsigma * rhocrit * self.ocb / M * sigfac else: ## return sigma vector corresponding to M as well. return sig, fsigma * rhocrit * self.ocb / M * sigfac
def at_same_phase_spectra(self, ALL_data_pkl): dic_all = cPickle.load(open(ALL_data_pkl)) sn_name = dic_all.keys() Time = N.linspace(-13, 43, 57) dic_time = {} for i, sn in enumerate(sn_name): print i, sn TS = self.big_dico['Time_series'][sn]['Y'] X = self.big_dico['Time_series'][sn]['X'] new_grid = [] DAYS = [-999] for t in range(len(dic_all[sn].keys())): DAYS.append(dic_all[sn]['%i' % (t)]['phase_salt2']) if '%10.1f' % DAYS[-2] != '%10.1f' % DAYS[-1]: new_grid.append(dic_all[sn]['%i' % (t)]['phase_salt2']) YY = N.zeros((len(new_grid), len(X))) for W in range(len(X)): SPLINEW = inter.InterpolatedUnivariateSpline( X[N.isfinite(TS[:, W])], TS[:, W][N.isfinite(TS[:, W])]) TS[:, W][~N.isfinite(TS[:, W])] = SPLINEW( X[~N.isfinite(TS[:, W])]) SPLINE = inter.InterpolatedUnivariateSpline( N.linspace(-13, 43, 57), TS[:, W]) YY[:, W] = SPLINE(new_grid) dic_time.update({ sn: { 'x': X, 'Y_cosmo': YY, 'Time': new_grid, 'X0': self.big_dico['All'][sn]['0']['X0'], 'X1': self.big_dico['All'][sn]['0']['X1'], 'C': self.big_dico['All'][sn]['0']['C'], 'z': self.big_dico['All'][sn]['0']['z'] } }) self.DIC_SALT2 = dic_time
def getImitationData(dt, time, multiple_demos=False): if multiple_demos: np.random.seed(10) nSteps = len(time) nDemo = 45 r = np.random.randn(nDemo, 1) * 0.05 + 2 qs = np.sin(2 * pi * np.outer(r, time[500:1001])) x = np.insert(time[500:1001], 0, np.array([0, dt])) x = np.append(x, np.array([time[-2], time[-1]])) q = np.zeros((nDemo, nSteps)) for i in range(nDemo): y = np.insert(qs[i, :], 0, np.array([-pi, -pi])) y = np.append(y, np.array([0.3, 0.3])) f = interp.InterpolatedUnivariateSpline(x, y) q[i, :] = f(time) + np.random.randn(1) * 0.35 + 1 else: qs = np.sin(2 * pi * 2 * time[500:1001]) x = np.insert(time[500:1001], 0, np.array([0, dt])) x = np.append(x, np.array([time[-2], time[-1]])) y = np.insert(qs, 0, np.array([-pi, -pi])) y = np.append(y, np.array([0.3, 0.3])) f1 = interp.InterpolatedUnivariateSpline(x, y) q1 = f1(time) f2 = interp.InterpolatedUnivariateSpline( np.array([0, dt, time[-2], time[-1]]), np.array([0, 0, -0.8, -0.8])) q2 = f2(time) q = np.vstack((q1, q2)) qd = np.diff(q) / dt qd = np.hstack((qd, qd[:, -1, None])) qdd = np.diff(qd) / dt qdd = np.hstack((qdd, qdd[:, -1, None])) return [q, qd, qdd]
def compute_xi(self): """ Compute the value of the correlation over the range r_min - r_max """ # print('I AM COMPUTING XI') for idx, r in enumerate(self.r_array): self.xi_array[idx] = self.raw_xi(r) self._xi_spline = interpolate.InterpolatedUnivariateSpline( self.r_array, self.xi_array) self.initialized_xi_spline = True return None
def __init__(self, treecool_file="data/TREECOOL_ep_2018p"): #Format of the treecool table: # log_10(1+z), Gamma_HI, Gamma_HeI, Gamma_HeII, Qdot_HI, Qdot_HeI, Qdot_HeII, # where 'Gamma' is the photoionization rate and 'Qdot' is the photoheating rate. # The Gamma's are in units of s^-1, and the Qdot's are in units of erg s^-1. try: data = np.loadtxt(treecool_file) except OSError: treefile = os.path.join( os.path.dirname(os.path.realpath(__file__)), treecool_file) data = np.loadtxt(treefile) redshifts = data[:, 0] photo_rates = data[:, 1:4] assert np.shape(redshifts)[0] == np.shape(photo_rates)[0] self.Gamma_HI = interp.InterpolatedUnivariateSpline( redshifts, photo_rates[:, 0]) self.Gamma_HeI = interp.InterpolatedUnivariateSpline( redshifts, photo_rates[:, 1]) self.Gamma_HeII = interp.InterpolatedUnivariateSpline( redshifts, photo_rates[:, 2])
def scipy_InterpolatedUnivariateSpline(*args, **kwargs): if not scipyInstalled: raise FuncDesignerException( 'to use scipy_InterpolatedUnivariateSpline you should have scipy installed, see scipy.org' ) assert len(args) > 1 assert not isinstance(args[0], oofun) and not isinstance(args[1], oofun), \ 'init scipy splines from oovar/oofun content is not implemented yet' S = interpolate.InterpolatedUnivariateSpline(*args, **kwargs) return SplineGenerator(S, *args, **kwargs)
def spline(abweichung, length): from scipy import interpolate x = linspace(0, 1, 5) y = array([0, 0.5 - abweichung, 0.5, 0.5 + abweichung, 1]) #s = interpolate.PchipInterpolator(x,y) s = interpolate.InterpolatedUnivariateSpline(x, y) xnew = linspace(0, 1, length) ynew = s(xnew) figure() plot(x, y, 'x', xnew, ynew) return ynew
def set_lmfit_parameters(self, lmparams): """ function to update the settings of this class during an least squares fit Parameters ---------- lmparams : lmfit.Parameters lmfit Parameters list of sample and instrument parameters """ pv = lmparams.valuesdict() settings = dict() h = list(self.pdiff[0].data)[0] fp = self.pdiff[0].data[h]['conv'].convolvers for conv in fp: name = conv[5:] settings[name] = dict() self.I0 = pv.pop('primary_beam_intensity', 1) set_splbkg = False spliney = {} for p in pv: if p.startswith('phase_'): # sample phase parameters midx = 0 for i, name in enumerate(self.materials.namelist): if p.find(name) > 0: midx = i name = self.materials.namelist[midx] attrname = p[p.find(name) + len(name) + 1:] setattr(self.materials[midx], attrname, pv[p]) elif p.startswith('background_coeff'): self._bckg_pol[int(p.split('_')[-1])] = pv[p] elif p.startswith('background_spl_coeff'): set_splbkg = True spliney[int(p.split('_')[-1])] = pv[p] else: # instrument parameters for k in settings: if p.startswith(k): slist = p[len(k) + 1:].split('_') if len(slist) > 2 and slist[-2] == 'item': name = '_'.join(slist[:-2]) if slist[-1] == '0': settings[k][name] = [] settings[k][name].append(pv[p]) else: name = p[len(k) + 1:] settings[k][name] = pv[p] break if set_splbkg: self._bckg_spline = interpolate.InterpolatedUnivariateSpline( self._bckg_spline._data[0], [spliney[k] for k in sorted(spliney)], ext=0) self.set_parameters(settings)
def update(val): global yvals global spline # update curve for i in np.arange(N): yvals[i] = sliders[i].val l.set_ydata(yvals) spline = inter.InterpolatedUnivariateSpline(x, yvals) m.set_ydata(spline(X)) # redraw canvas while idle fig.canvas.draw_idle()
def get_interp(self, spline=True): """ Return an interpolator. Care should be taken to avoid regions where the data are sparse, and especially regions outside of the dataset time range. """ import scipy.interpolate as interp if spline: return interp.InterpolatedUnivariateSpline(self.data[0], self.data[1]) return interp.interp1d(self.data[0], self.data[1])
def GetCurvatureCurve(self): ''' :return: approx. curvature curve (type of InterpolateUnivariateSpline) ''' u_new = np.linspace(0, 1, len(self.u), endpoint=True) dx = interpolate.splev(u_new, self.tck, der=1) ddx = interpolate.splev(u_new, self.tck, der=2) # compute curvature samples k_samples = (dx[0] * ddx[1] - ddx[0] * dx[1]) / np.linalg.norm(dx)**3 fs = interpolate.InterpolatedUnivariateSpline(u_new, k_samples, k=3) return fs
def _interpolate_drift(self, xvals, yvals): """ Interpolate drift data to produce a drift value for each camera frame in the original movie. """ drift_pol = (interpolate.InterpolatedUnivariateSpline(xvals, yvals, k=3)) t_inter = np.arange(self.num_frames) final_drift = drift_pol(t_inter) return final_drift