def test_calibrate_lrt_works_as_expected(self): m = 1 df = 0.01 freq = np.arange(df, 5 + df, df) nfreq = freq.size rng = np.random.RandomState(100) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = df ps.norm = "leahy" loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1) s_all = np.atleast_2d(np.ones(10) * 2.0).T model2 = models.PowerLaw1D() + models.Const1D() model2.x_0_0.fixed = True loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, m=1) pe = PSDParEst(ps) pval = pe.calibrate_lrt(loglike, [2.0], loglike2, [2.0, 1.0, 2.0], sample=s_all, max_post=False, nsim=5, seed=100) assert pval > 0.001
def fit_gaussian(data_1d, x_loc=0.0): nx = data_1d.shape[0] if x_loc < 0.01: x_loc = nx / 2.0 x_1d = np.arange(nx) # subtract the mean or median, although it may be better to have # the model fit a continuum with Const1D ? datamed = np.median(data_1d) # data_sub = data_1d - np.median(data_1d) gauss_init = models.Gaussian1D( amplitude=1.0, mean=x_loc, stddev=1.5) + models.Const1D(amplitude=datamed) fit_g = fitting.LevMarLSQFitter() gauss = fit_g(gauss_init, x_1d, data_1d) return gauss
def astropy_clipping_fit(x, row, variance): gauss_model = models.Gaussian1D() + models.Const1D() fit = fitting.LevMarLSQFitter() clipping_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=30, sigma=15.0) # initialize fitters fit = fitting.LevMarLSQFitter() or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=3, sigma=6.0) g_init = models.Gaussian1D( amplitude=np.max(row), mean=len(x) / 2, stddev=1.0) + models.Const1D(amplitude=np.min(row)) filtered_data, or_fitted_model = or_fit(g_init, x, row, weights=1.0 / variance) return or_fitted_model.mean_0.value
def fit_gaussians_to_spectrum(obs_flx, obs_wvl, idx_peaks): print ' Fitting multiple ({:.0f}) gaussinas to the extracted arc spectrum'.format(len(idx_peaks)) # median_val = np.median(obs_flx) mean_wvl = 0 # np.mean(obs_wvl) fit_model = models.Const1D(amplitude=np.median(obs_flx)) + polynomial.Polynomial1D(4) for i_p, idx_p in enumerate(idx_peaks): peak_val = obs_flx[idx_p] - median_val # dela boljs brez nastavljenih boundov fit_model += models.Gaussian1D(amplitude=peak_val, mean=obs_wvl[idx_p]-mean_wvl, stddev=0.1)#, # bounds={'mean': (obs_wvl[idx_p]-0.5, obs_wvl[idx_p]+0.5), # 'amplitude': (peak_val*0.8, peak_val*1.2)}) fit_t = fitting.LevMarLSQFitter() fitted_model = fit_t(fit_model, obs_wvl-mean_wvl, obs_flx) return fitted_model
def setup_class(cls): nx = 1000000 cls.x = np.arange(nx) cls.countrate = 10.0 cls.cerr = 2.0 cls.y = np.random.normal(cls.countrate, cls.cerr, size=cls.x.shape[0]) cls.yerr = np.ones_like(cls.y) * cls.cerr cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.countrate, scale=cls.cerr).pdf(amplitude) cls.priors = {"amplitude": p_amplitude}
def setup_class(cls): np.random.seed(1000) m = 1 nfreq = 100 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.n = freq.shape[0] ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "powell" cls.max_post = True cls.t0 = np.array([2.0]) cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-10) cls.opt.x = np.atleast_1d(cls.opt.x) cls.optres = OptimizationResultsSubclassDummy(cls.lpost, cls.opt, neg=True)
def estimate_peak_width(data, min=2, max=8): """ Estimates the FWHM of the spectral features (arc lines) by fitting Gaussians to the brightest peaks. Parameters ---------- data: ndarray 1D data array (will be modified) min: int minimum plausible peak width max: int maximum plausible peak width (not inclusive) Returns ------- float: estimate of FWHM of features """ all_widths = [] for fwidth in range(min, max + 1): # plausible range of widths data_copy = data.copy() # We'll be editing the data widths = [] for i in range(15): # 15 brightest peaks, should ensure we get real ones index = 2 * fwidth + np.argmax(data_copy[2 * fwidth:-2 * fwidth - 1]) data_to_fit = data_copy[index - 2 * fwidth:index + 2 * fwidth + 1] m_init = models.Gaussian1D(stddev=0.42466 * fwidth) + models.Const1D(np.min(data_to_fit)) m_init.mean_0.bounds = [-1, 1] m_init.amplitude_1.fixed = True fit_it = fitting.FittingWithOutlierRemoval(fitting.LevMarLSQFitter(), sigma_clip, sigma=3) with warnings.catch_warnings(): # Ignore model linearity warning from the fitter warnings.simplefilter('ignore') m_final, _ = fit_it(m_init, np.arange(-2 * fwidth, 2 * fwidth + 1), data_to_fit) # Quick'n'dirty logic to remove "peaks" at edges of CCDs if m_final.amplitude_1 != 0 and m_final.stddev_0 < fwidth: widths.append(m_final.stddev_0 / 0.42466) # Set data to zero so no peak is found here data_copy[index - 2 * fwidth:index + 2 * fwidth + 1] = 0. all_widths.append(sigma_clip(widths).mean()) return sigma_clip(all_widths).mean()
def test_transform_magnitudes_identical_input(order): # Analogous to the test case for calculate_transform_coefficients # above where the input magnitudes are identical, except the input # objects have coordinates. n_stars = 100 zero = models.Const1D(0.0) instrumental, catalog_table = generate_tables(n_stars, zero) calib_mags, stars_with_match, transform = \ transform_magnitudes(instrumental, catalog_table, catalog_table, order=order) print(calib_mags) assert all(calib_mags == catalog_table['r_mag']) assert all(stars_with_match) assert len(transform.parameters) == order + 1 assert all(transform.parameters == 0)
def fitB4(coord_iso): u = coord_iso[0] v = coord_iso[1] r = np.sqrt(u**2 + v**2) E=np.zeros(len(u)) for i in range(0,len(u)): if np.sign(u[i])>0 and np.sign(v[i])>0 : E[i]=np.arctan(v[i]/u[i]) elif np.sign(u[i])>0 and np.sign(v[i])<0 : E[i]=2*np.pi+np.arctan(v[i]/u[i]) else : E[i]=np.pi+np.arctan(v[i]/u[i]) Es = np.linspace(0,2*np.pi,num=100) dic = {'frequency': True, 'phase': True} g_init = (models.Sine1D(amplitude=0.1, frequency=1.5/np.pi, fixed=dic) +models.Sine1D(amplitude=0.1, frequency=2/np.pi, fixed=dic) +models.Sine1D(amplitude=0.1, frequency=1.5/np.pi, phase=0.25, fixed=dic) +models.Sine1D(amplitude=0.1, frequency=2/np.pi, phase=0.25, fixed=dic) +models.Const1D(amplitude=1.0,fixed={'amplitude':True})) fit = fitting.LevMarLSQFitter() or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=3, sigma=3.0) filtered_data, or_fitted_model = or_fit(g_init, E, r) fitted_model = fit(g_init, E, r) plt.figure(figsize=(8,5)) plt.plot(E, r, 'gx', label="original data") plt.plot(E, filtered_data, 'r+', label="filtered data") plt.plot(Es, fitted_model(Es), 'g-', label="model fitted w/ original data") plt.plot(Es, or_fitted_model(Es), 'r--', label="model fitted w/ filtered data") plt.legend(loc=2, numpoints=1) return or_fitted_model[3].amplitude.value
def test_calibrate_highest_outlier_works_with_sampling(self): m = 1 nfreq = 100 seed = 100 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(seed) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" nsim = 5 lpost = PSDPosterior(ps.freq, ps.power, model, m=1) p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=1.0, scale=1.0).pdf( amplitude) priors = {"amplitude": p_amplitude} lpost.logprior = set_logprior(lpost, priors) pe = PSDParEst(ps) with catch_warnings(RuntimeWarning): pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None, max_post=True, seed=seed, nsim=nsim, niter=10, nwalkers=20, burnin=10) assert pval > 0.001
def fit_gaussian_peak(x, y, guess=None): # fit a Gaussian to a peak # # Return model fitted object # peak_model_fit.parameters = [offset, amplitude, mean, standard deviation] if guess is None: # help make some guesses offset = np.min(y) guess = [offset, np.max(y) - offset, x[np.argmax(y)], 1.0] g1 = models.Gaussian1D(guess[1], guess[2], guess[3]) amp = models.Const1D(guess[0]) peak_model = amp + g1 fitter = fitting.LevMarLSQFitter() peak_model_fit = fitter(peak_model, x, y) return peak_model_fit
def plot_lum_vs_lag(): rmid_list = get_total_rmid_list() zfinal = pickle.load(open(Location.project_loca + "/info_database/zfinal.pkl")) lag_list = list() lag_err_list = list() lum_list = list() lum_err_list = list() fig = plt.figure() for each in rmid_list: try: lag, lag_err = get_lag(each, zfinal) lum, lum_err = get_lum(each, zfinal) if lag < 3.0 * lag_err or lum < 3.0 * lum_err: raise Exception lag_list.append(lag) lag_err_list.append(lag_err) lum_list.append(lum) lum_err_list.append(lum_err) except Exception: continue # plt.errorbar([lum], [lag]) # fig.text(np.log10(lum), np.log10(lag), str(each)) lag = np.log10(np.array(lag_list)) lum = np.log10(np.array(lum_list)) lag_err = np.array(lag_err_list) / np.array(lag_list) lum_err = np.array(lum_err_list) / np.array(lum_list) theory = models.Linear1D(0.5, -10.0, fixed={"slope": True}) obs = models.Const1D(0.0) fitter = fitting.LinearLSQFitter() theory_fit = fitter(theory, lum, lag, weights = lag_err ** 2.0) print(theory_fit.parameters) obs_fit = fitter(obs, lum, lag, weights = lag_err ** 2.0) print(obs_fit.parameters) rcs = np.sum((obs_fit(lum) - lag) ** 2.0) ** 0.5 / (len(rmid_list) - 1.0) print(rcs) print(len(lum)) plt.errorbar(lum, lag, xerr=lum_err, yerr=lag_err, fmt='o') # plt.plot(lum, theory_fit(lum)) plt.plot(lum, obs_fit(lum)) plt.show()
def fit_gaussians_to_spectrum_linebyline(obs_flx, obs_wvl, idx_peaks, d_wvl=2): print ' Fitting {:.0f} individual gaussinas to the extracted arc spectrum'.format(len(idx_peaks)) fitted_vals = [] # [mean, std] combinations for i_p, idx_p in enumerate(idx_peaks): # data subset idx_data_use = np.logical_and(obs_wvl > obs_wvl[idx_p]-d_wvl, obs_wvl < obs_wvl[idx_p]+d_wvl) use_flx = obs_flx[idx_data_use] use_wvl = obs_wvl[idx_data_use] # median_val = np.median(use_flx) fit_model = models.Const1D(amplitude=np.median(use_flx)) peak_val = obs_flx[idx_p] - median_val # dela boljs brez nastavljenih boundov fit_model += models.Gaussian1D(amplitude=peak_val, mean=obs_wvl[idx_p], stddev=0.1)#, # bounds={'mean': (obs_wvl[idx_p]-0.5, obs_wvl[idx_p]+0.5), # 'amplitude': (peak_val*0.8, peak_val*1.2)}) fit_t = fitting.LevMarLSQFitter() fitted_model = fit_t(fit_model, use_wvl, use_flx) # print fitted_model.param_names fitted_vals.append(fitted_model.parameters[2:]) return np.array(fitted_vals)
def refine_velocity_guess(spectrum, axis, v_guess, detected_line, return_fit = False): """ Refines a velocity guess with better accuracy, avoiding to have discrete guess corresponding to pixel numbers. A gaussian function is fitted to estimate the line position. Parameters ---------- spectrum : 1D :class:`~numpy:numpy.ndarray` The spectrum containing emission lines axis : 1D :class:`~numpy:numpy.ndarray` The corresponding axis, in wavenumber v_guess : float A first guess on the velocity, typically obtained from :func:`guess_source_velocity` detected_line : str Names of the main line present in the spectrum (as defined `here <http://celeste.phy.ulaval.ca/orcs-doc/introduction.html#list-of-available-lines>`_) return_fit : bool, Default = False (Optional) If True, returns the fit parameters, for further investigation Returns ------- v : float The updated velocity guess """ from orb.utils.spectrum import line_shift, compute_radial_velocity from orb.core import Lines from astropy.modeling import models, fitting line_rest = Lines().get_line_cm1(detected_line) mu = line_rest + line_shift(v_guess, line_rest, wavenumber=True) G0 = models.Gaussian1D(amplitude=np.nanmax(spectrum), mean=mu, stddev=11) G0.mean.max = line_rest + line_shift(v_guess-25, line_rest, wavenumber=True) G0.mean.min = line_rest + line_shift(v_guess+25, line_rest, wavenumber=True) C = models.Const1D(amplitude = np.nanmedian(spectrum)) model = C+G0 fitter = fitting.LevMarLSQFitter() fit = fitter(model, axis, spectrum) if return_fit: return compute_radial_velocity(fit.mean_1,line_rest, wavenumber=True), fit else: return compute_radial_velocity(fit.mean_1,line_rest, wavenumber=True)
def __init__(self, x, y, dy, initial_models=None): QObject.__init__(self) self.x = x self.y = y self.dy = dy if initial_models is None: initial_models = [models.Const1D(0.0)] self.models = initial_models self.ui = ModelBrowserUI(self, self.models, self.x, self.y) self.plot, self.resid = _build_axes(self.ui.canvas.fig) self._draw(preserve_limits=False) self.mouse_handler = ModelEventHandler(self.plot, self.active_model) add_callback(self.mouse_handler, 'model', self.set_model) self.ui.fit.pressed.connect(nonpartial(self.fit)) self._sync_model_list()
def setup_class(cls): np.random.seed(150) cls.nlor = 3 cls.x_0_0 = 0.5 cls.x_0_1 = 2.0 cls.x_0_2 = 7.5 cls.amplitude_0 = 200.0 cls.amplitude_1 = 100.0 cls.amplitude_2 = 50.0 cls.fwhm_0 = 0.1 cls.fwhm_1 = 1.0 cls.fwhm_2 = 0.5 cls.whitenoise = 2.0 cls.model = models.Lorentz1D(cls.amplitude_0, cls.x_0_0, cls.fwhm_0) + \ models.Lorentz1D(cls.amplitude_1, cls.x_0_1, cls.fwhm_1) + \ models.Lorentz1D(cls.amplitude_2, cls.x_0_2, cls.fwhm_2) + \ models.Const1D(cls.whitenoise) freq = np.linspace(0.01, 10.0, 10.0 / 0.01) p = cls.model(freq) noise = np.random.exponential(size=len(freq)) power = p * noise cls.ps = Powerspectrum() cls.ps.freq = freq cls.ps.power = power cls.ps.df = cls.ps.freq[1] - cls.ps.freq[0] cls.ps.m = 1 cls.t0 = np.asarray( [200.0, 0.5, 0.1, 100.0, 2.0, 1.0, 50.0, 7.5, 0.5, 2.0]) cls.parest, cls.res = fit_lorentzians(cls.ps, cls.nlor, cls.t0)
def abtov2v3(channel, **kwargs): # Construct the reference data model in general JWST imager type input_model = datamodels.ImageModel() # Convert input of type '1A' into the band and channel that pipeline needs theband, thechan = bandchan(channel) # Set the filter in the data model meta header input_model.meta.instrument.band = theband input_model.meta.instrument.channel = thechan # If passed input refs keyword, unpack and use it if ('refs' in kwargs): therefs = kwargs['refs'] # Otherwise use default reference files else: therefs = setreffiles_cdp6(channel) # The pipeline transform actually uses the triple # (alpha,beta,lambda) -> (v2,v3,lambda) basedistortion = miri.abl_to_v2v3l(input_model, therefs) # At present, the pipeline uses v2,v3 in degrees so convert to arcsec distortion = basedistortion | models.Scale(3600.) & models.Scale( 3600.) & models.Identity(1) # Therefore we need to hack a reasonable wavelength onto our input, run transform, # then hack it back off again thewave = midwave(channel) # Duplicate the beta value at first, then replace with wavelength value map = models.Mapping( (0, 1, 1 )) | models.Identity(1) & models.Identity(1) & models.Const1D(thewave) map.inverse = models.Mapping((0, 1), n_inputs=3) allmap = map | distortion | map.inverse allmap.inverse = map | distortion.inverse | map.inverse # Return the distortion object that can then be queried return allmap
def setup_class(cls): m = 1 nfreq = 100000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude} cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model, m=cls.ps.m) cls.lpost.logprior = set_logprior(cls.lpost, cls.priors) cls.fitmethod = "BFGS" cls.max_post = True cls.t0 = [2.0] cls.neg = True cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0, method=cls.fitmethod, args=cls.neg, tol=1.e-10)
def setup_class(cls): m = 1 nfreq = 1000000 freq = np.arange(nfreq) noise = np.random.exponential(size=nfreq) power = noise * 2.0 ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude}
def test_fitting_backend(): np.random.seed(42) x, y = build_spectrum() spectrum = Spectrum1D(flux=y * u.Jy, spectral_axis=x * u.um) g1f = models.Gaussian1D(0.7 * u.Jy, 4.65 * u.um, 0.3 * u.um, name='g1') g2f = models.Gaussian1D(2.0 * u.Jy, 5.55 * u.um, 0.3 * u.um, name='g2') g3f = models.Gaussian1D(-2. * u.Jy, 8.15 * u.um, 0.2 * u.um, name='g3') zero_level = models.Const1D(1. * u.Jy, name='const1d') model_list = [g1f, g2f, g3f, zero_level] expression = "g1 + g2 + g3 + const1d" # Returns the initial model fm, fitted_spectrum = fb.fit_model_to_spectrum(spectrum, model_list, expression, run_fitter=False) parameters_expected = np.array( [0.7, 4.65, 0.3, 2., 5.55, 0.3, -2., 8.15, 0.2, 1.]) assert np.allclose(fm.parameters, parameters_expected, atol=1e-5) # Returns the fitted model fm, fitted_spectrum = fb.fit_model_to_spectrum(spectrum, model_list, expression, run_fitter=True) parameters_expected = np.array([ 1.0104705, 4.58956282, 0.19590464, 2.39892026, 5.49867754, 0.10834472, -1.66902953, 8.19714439, 0.09535613, 3.99125545 ]) assert np.allclose(fm.parameters, parameters_expected, atol=1e-5)
def setup_class(cls): cls.m = 10 nfreq = 1000000 freq = np.arange(nfreq) noise = scipy.stats.chi2(2. * cls.m).rvs(size=nfreq) / np.float(cls.m) power = noise ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = cls.m ps.df = freq[1] - freq[0] ps.norm = "leahy" cls.ps = ps cls.a_mean, cls.a_var = 2.0, 1.0 cls.model = models.Const1D() p_amplitude = lambda amplitude: \ scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude) cls.priors = {"amplitude": p_amplitude}
def test_simulate_highest_outlier_works(self): m = 1 nfreq = 100000 seed = 100 freq = np.linspace(1, 10, nfreq) rng = np.random.RandomState(seed) noise = rng.exponential(size=nfreq) model = models.Const1D() model.amplitude = 2.0 p = model(freq) power = noise * p ps = Powerspectrum() ps.freq = freq ps.power = power ps.m = m ps.df = freq[1] - freq[0] ps.norm = "leahy" nsim = 10 loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1) s_all = np.atleast_2d(np.ones(nsim) * 2.0).T pe = PSDParEst(ps) res = pe.fit(loglike, [2.0], neg=True) maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0], max_post=False, seed=seed) assert maxpow_sim.shape[0] == nsim assert np.all(maxpow_sim > 20.00) and np.all(maxpow_sim < 31.0)
def create_channel_selector(alpha, lam, channel, beta): if channel == 1: nslice = 21 elif channel == 2: nslice = 17 elif channel == 3: nslice = 16 elif channel == 4: nslice = 12 else: raise ValueError("Incorrect channel #") ind = [] for i in range(5): for j in range(5): ind.append((i, j)) selector = {} # In the paper the formula is (x-xs)^j*y^i, so the 'x' corresponds # to y in modeling. - swapped in Mapping axs = alpha.field('x_s') lxs = lam.field('x_s') for i in range(nslice): ashift = models.Shift(axs[i]) lshift = models.Shift(lxs[i]) palpha = models.Polynomial2D(8) plam = models.Polynomial2D(8) for index, coeff in zip(ind, alpha[i][1:]): setattr(palpha, 'c{0}_{1}'.format(index[0], index[1]), coeff) for index, coeff in zip(ind, lam[i][1:]): setattr(plam, 'c{0}_{1}'.format(index[0], index[1]), coeff) alpha_model = ashift & models.Identity(1) | palpha lam_model = lshift & models.Identity(1) | plam beta_model = models.Const1D(beta[0] + (i - 1) * beta[1]) selector[i] = models.Mapping((1, 0, 1, 0, 0)) | alpha_model & lam_model & beta_model return selector
# We will search for pulsations over a range of frequencies around the known pulsation period. df = (period_ranges[1] - period_ranges[0]) / period_bins frequencies = 1 / np.arange(period_ranges[0], period_ranges[1], df) freq, efstat = epoch_folding_search(times, frequencies, nbin=nbin) pulse_frequency = freq[np.where(efstat == max(efstat))[0][0]] print('pulse frequency', pulse_frequency) _ = write_files('pulse_frequency_NuSTAR', pulse_frequency) #fitting epoch folding distribution with Lorentzian curve g_init = models.Lorentz1D( amplitude=max(efstat) - min(efstat), x_0=pulse_frequency, fwhm=pulse_frequency / 500) + models.Const1D(amplitude=min(efstat)) fit_g = fitting.LevMarLSQFitter() bin_max = [np.where(efstat == max(efstat))[0][0]][0] bin_left_min = 0 bin_right_min = len(efstat) for j in range(min(bin_max, len(efstat) - bin_max) - 2): d_efstat = efstat[bin_max - j - 1] - efstat[bin_max - j] if d_efstat > 0 and max(efstat) - efstat[bin_max - j] > (max(efstat) - min(efstat)) / 2: bin_left_min = bin_max - j break for j in range(min(bin_max, len(efstat) - bin_max) - 2): d_efstat = efstat[bin_max + j + 1] - efstat[bin_max + j] if d_efstat > 0 and max(efstat) - efstat[bin_max + j] > (max(efstat) - min(efstat)) / 2:
def test_LabelMapper(): transform = models.Const1D(12.3) lm = selector.LabelMapper(inputs=('x', 'y'), mapper=transform, inputs_mapping=(1,)) x = np.linspace(3, 11, 20) assert_allclose(lm(x, x), transform(x))
def test_cube_fitting_backend(): np.random.seed(42) SIGMA = 0.1 # noise in data TOL = 0.4 # test tolerance # Flux cube oriented as in JWST data. To build a Spectrum1D # instance with this, one need to transpose it so the spectral # axis direction corresponds to the last index. flux_cube = np.zeros((SPECTRUM_SIZE, IMAGE_SIZE, IMAGE_SIZE)) # Generate list of all spaxels to be fitted _spx = [[(x, y) for x in range(IMAGE_SIZE)] for y in range(IMAGE_SIZE)] spaxels = [item for sublist in _spx for item in sublist] # Fill cube spaxels with spectra that differ from # each other only by their noise component. x, _ = build_spectrum() for spx in spaxels: flux_cube[:, spx[0], spx[1]] = build_spectrum(sigma=SIGMA)[1] # Transpose so it can be packed in a Spectrum1D instance. flux_cube = flux_cube.transpose(1, 2, 0) spectrum = Spectrum1D(flux=flux_cube*u.Jy, spectral_axis=x*u.um) # Initial model for fit. g1f = models.Gaussian1D(0.7*u.Jy, 4.65*u.um, 0.3*u.um, name='g1') g2f = models.Gaussian1D(2.0*u.Jy, 5.55*u.um, 0.3*u.um, name='g2') g3f = models.Gaussian1D(-2.*u.Jy, 8.15*u.um, 0.2*u.um, name='g3') zero_level = models.Const1D(1.*u.Jy, name='const1d') model_list = [g1f, g2f, g3f, zero_level] expression = "g1 + g2 + g3 + const1d" # Fit to all spaxels. fitted_parameters, fitted_spectrum = fb.fit_model_to_spectrum( spectrum, model_list, expression) # Check that parameter results are formatted as expected. assert type(fitted_parameters) == list assert len(fitted_parameters) == 225 for m in fitted_parameters: if m['x'] == 3 and m['y'] == 2: fitted_model = m['model'] assert type(fitted_model[0].amplitude.value) == np.float64 assert fitted_model[0].amplitude.unit == u.Jy assert type(fitted_model[0] == params.Parameter) assert type(fitted_model[0].mean.value) == np.float64 assert fitted_model[0].mean.unit == u.um # Check that spectrum result is formatted as expected. assert type(fitted_spectrum) == Spectrum1D assert len(fitted_spectrum.shape) == 3 assert fitted_spectrum.shape == (IMAGE_SIZE, IMAGE_SIZE, SPECTRUM_SIZE) assert fitted_spectrum.flux.unit == u.Jy # The important point here isn't to check the accuracy of the # fit, which was already tested elsewhere. We are mostly # interested here in checking the correctness of the data # packaging into the output products. assert np.allclose(fitted_model[0].amplitude.value, 1.09, atol=TOL) assert np.allclose(fitted_model[1].amplitude.value, 2.4, atol=TOL) assert np.allclose(fitted_model[2].amplitude.value, -1.7, atol=TOL) assert np.allclose(fitted_model[0].mean.value, 4.6, atol=TOL) assert np.allclose(fitted_model[1].mean.value, 5.5, atol=TOL) assert np.allclose(fitted_model[2].mean.value, 8.2, atol=TOL) assert np.allclose(fitted_model[0].stddev.value, 0.2, atol=TOL) assert np.allclose(fitted_model[1].stddev.value, 0.1, atol=TOL) assert np.allclose(fitted_model[2].stddev.value, 0.1, atol=TOL) assert np.allclose(fitted_model[3].amplitude.value, 4.0, atol=TOL)
def test_const1d(tmpdir, standard_version): helpers.assert_roundtrip_tree({"model": astmodels.Const1D(amplitude=5.)}, tmpdir, init_options={"version": standard_version})
astmodels.RotateNative2Celestial(5.63, -72.5, 180), astmodels.Multiply(3), astmodels.Multiply(10 * u.m), astmodels.RotateCelestial2Native(5.63, -72.5, 180), astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'), astmodels.Mapping((0, 1), n_inputs=3), astmodels.Shift(2. * u.deg), astmodels.Scale(3.4 * u.deg), astmodels.RotateNative2Celestial(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg), astmodels.RotateCelestial2Native(5.63 * u.deg, -72.5 * u.deg, 180 * u.deg), astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'), astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'), astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5), astmodels.Box1D(amplitude=10., x_0=0.5, width=5.), astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.), astmodels.Const1D(amplitude=5.), astmodels.Const2D(amplitude=5.), astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.), astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4., theta=0.1), astmodels.Exponential1D(amplitude=10., tau=3.5), astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.), astmodels.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=3., y_stddev=3.), astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.), astmodels.Logarithmic1D(amplitude=10., tau=3.5), astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5), astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
'Trapezoid1D': models.Trapezoid1D(1.0, 1.0, 1.0, 1.0), 'Moffat1D': models.Moffat1D(1.0, 1.0, 1.0, 1.0), 'ExponentialCutoffPowerLaw1D': models.ExponentialCutoffPowerLaw1D(1.0, 1.0, 1.0, 1.0), 'BrokenPowerLaw1D': models.BrokenPowerLaw1D(1.0, 1.0, 1.0, 1.0), 'LogParabola1D': models.LogParabola1D(1.0, 1.0, 1.0, 1.0), 'PowerLaw1D': models.PowerLaw1D(1.0, 1.0, 1.0), 'Linear1D': models.Linear1D(1.0, 0.0), 'Const1D': models.Const1D(0.0), 'Redshift': models.Redshift(0.0), 'Scale': models.Scale(1.0), 'Shift': models.Shift(0.0), 'Sine1D': models.Sine1D(1.0, 1.0), 'Chebyshev1D': models.Chebyshev1D(1), 'Legendre1D': models.Legendre1D(1), 'Polynomial1D': models.Polynomial1D(1), }
def fit_lorentzians(ps, nlor, starting_pars, fit_whitenoise=True, max_post=False, priors=None, fitmethod="L-BFGS-B"): """ Fit a number of Lorentzians to a power spectrum, possibly including white noise. Each Lorentzian has three parameters (amplitude, centroid position, full-width at half maximum), plus one extra parameter if the white noise level should be fit as well. Priors for each parameter can be included in case `max_post = True`, in which case the function will attempt a Maximum-A-Posteriori fit. Priors must be specified as a dictionary with one entry for each parameter. The parameter names are `(amplitude_i, x_0_i, fwhm_i)` for each `i` out of a total of `N` Lorentzians. The white noise level has a parameter `amplitude_(N+1)`. For example, a model with two Lorentzians and a white noise level would have parameters: [amplitude_0, x_0_0, fwhm_0, amplitude_1, x_0_1, fwhm_1, amplitude_2]. Parameters ---------- ps : Powerspectrum A Powerspectrum object with the data to be fit nlor : int The number of Lorentzians to fit starting_pars : iterable The list of starting guesses for the optimizer. See explanation above for ordering of parameters in this list. fit_whitenoise : bool, optional, default True If True, the code will attempt to fit a white noise level along with the Lorentzians. Be sure to include a starting parameter for the optimizer in `starting_pars`! max_post : bool, optional, default False If True, perform a Maximum-A-Posteriori fit of the data rather than a Maximum Likelihood fit. Note that this requires priors to be specified, otherwise this will cause an exception! priors : {dict | None}, optional, default None Dictionary with priors for the MAP fit. This should be of the form {"parameter name": probability distribution, ...} fitmethod : string, optional, default "L-BFGS-B" Specifies an optimization algorithm to use. Supply any valid option for `scipy.optimize.minimize`. Returns ------- parest : PSDParEst object A PSDParEst object for further analysis res : OptimizationResults object The OptimizationResults object storing useful results and quantities relating to the fit Example ------- We start by making an example power spectrum with three Lorentzians >>> np.random.seed(400) >>> nlor = 3 >>> x_0_0 = 0.5 >>> x_0_1 = 2.0 >>> x_0_2 = 7.5 >>> amplitude_0 = 150.0 >>> amplitude_1 = 50.0 >>> amplitude_2 = 15.0 >>> fwhm_0 = 0.1 >>> fwhm_1 = 1.0 >>> fwhm_2 = 0.5 We will also include a white noise level: >>> whitenoise = 2.0 >>> model = models.Lorentz1D(amplitude_0, x_0_0, fwhm_0) + \\ ... models.Lorentz1D(amplitude_1, x_0_1, fwhm_1) + \\ ... models.Lorentz1D(amplitude_2, x_0_2, fwhm_2) + \\ ... models.Const1D(whitenoise) >>> freq = np.linspace(0.01, 10.0, 10.0/0.01) >>> p = model(freq) >>> noise = np.random.exponential(size=len(freq)) >>> power = p*noise >>> ps = Powerspectrum() >>> ps.freq = freq >>> ps.power = power >>> ps.df = ps.freq[1] - ps.freq[0] >>> ps.m = 1 Now we have to guess starting parameters. For each Lorentzian, we have amplitude, centroid position and fwhm, and this pattern repeats for each Lorentzian in the fit. The white noise level is the last parameter. >>> t0 = [150, 0.4, 0.2, 50, 2.3, 0.6, 20, 8.0, 0.4, 2.1] We're ready for doing the fit: >>> parest, res = fit_lorentzians(ps, nlor, t0) `res` contains a whole array of useful information about the fit, for example the parameters at the optimum: >>> p_opt = res.p_opt """ model = models.Lorentz1D() if nlor > 1: for i in range(nlor - 1): model += models.Lorentz1D() if fit_whitenoise: model += models.Const1D() return fit_powerspectrum(ps, model, starting_pars, max_post=max_post, priors=priors, fitmethod=fitmethod)