예제 #1
0
def get_raw_lum(rmid):
    mjd_list = map(int, os.listdir(Location.project_loca + "data/raw/" +
                                   str(rmid)))
    try:
        f = open(Location.project_loca + "result/flux_of_line/" + str(rmid) +
                 "/cont_error.pkl")
        cont_err = pickle.load(f)
        f.close()
        err = max(cont_err.values())
    except Exception:
        raise
    lum = 0.0
    num = 0.0
    for each in mjd_list:
        try:
            f = open(Location.project_loca + "result/fit_with_temp/data/" +
                     str(rmid) + "/" + str(each) + "-cont.pkl", "rb")
            cont_res = pickle.load(f)
            f.close()
            cont_func = models.PowerLaw1D(cont_res[0], cont_res[1], cont_res[2])
            num = num + 1.0
            lum = lum + cont_func(5100.0)
        except Exception:
            continue
    return [lum / num, err]
예제 #2
0
    def test_calibrate_lrt_works_with_mvn(self):

        m = 1
        nfreq = 10000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
                                [2.0, 1.0, 2.0], sample=None,
                                max_post=False, nsim=10,
                                seed=100)

        assert pval > 0.001
예제 #3
0
    def test_calibrate_lrt_works_with_sampling(self):
        m = 1
        nfreq = 100
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        lpost = PSDPosterior(ps.freq, ps.power, model, m=1)

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
                amplitude)

        priors = {"amplitude": p_amplitude_1}

        priors2 = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        lpost.logprior = set_logprior(lpost, priors)

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
        lpost2.logprior = set_logprior(lpost2, priors2)

        pe = PSDParEst(ps)

        with catch_warnings(RuntimeWarning):
            pval = pe.calibrate_lrt(lpost, [2.0],
                                    lpost2, [2.0, 1.0, 2.0],
                                    sample=None,
                                    max_post=True,
                                    nsim=10,
                                    nwalkers=10,
                                    burnin=10,
                                    niter=10,
                                    seed=100)

        assert pval > 0.001
예제 #4
0
    def setup_class(cls):
        photon_arrivals = np.sort(np.random.uniform(0, 1000, size=10000))
        cls.lc = Lightcurve.make_lightcurve(photon_arrivals, dt=1.0)
        cls.ps = Powerspectrum(cls.lc, norm="frac")
        pl = models.PowerLaw1D()
        pl.x_0.fixed = True

        cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, pl, m=cls.ps.m)
def fitRagfb():
	x = [0.05, 0.1, 1, 8, 15]  #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
	y = [0.20, 0.35, 0.50, 0.70, 0.75]
	init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
	fitter = fitting.LevMarLSQFitter()
	fit = fitter(init, x, y)

	return fit
예제 #6
0
def power_model(files_o, files_c, ps=0.12):

    waves = [445, 551, 658, 806]

    fwhm_o = []
    fwhm_c = []
    fwhm_o_err = []
    fwhm_c_err = []
    for i in range(4):
        FWHM_min_o, sig_FWHM_min_o, FWHM_maj_o, sig_FWHM_maj_o = moffat.calc_mof_fwhm(
            files_o[i], filt=False, plate_scale=ps)
        FWHM_min_c, sig_FWHM_min_c, FWHM_maj_c, sig_FWHM_maj_c = moffat.calc_mof_fwhm(
            files_c[i], filt=False, plate_scale=ps)
        fwhm_o.append(np.median(FWHM_min_o) * ps)
        fwhm_o_err.append(np.std(FWHM_min_o) * ps / np.sqrt(len(FWHM_min_o)))
        fwhm_c.append(np.median(FWHM_min_c) * ps)
        fwhm_c_err.append(np.std(FWHM_min_c) * ps / np.sqrt(len(FWHM_min_c)))

    plt.errorbar(waves, fwhm_o, yerr=fwhm_o_err, fmt='bo', label='AO-Off Data')
    plt.errorbar(waves, fwhm_c, yerr=fwhm_c_err, fmt='ro', label='AO-On Data')

    init = models.PowerLaw1D(amplitude=1., x_0=1., alpha=1.)
    fit = fitting.LevMarLSQFitter()
    p_o = fit(init, waves, fwhm_o, weights=1.0 / np.array(fwhm_o_err))
    p_c = fit(init, waves, fwhm_c, weights=1.0 / np.array(fwhm_c_err))

    x = np.linspace(445, 806, 100)
    plt.plot(x, p_o(x), 'b-', label='AO-Off Model')
    plt.plot(x, p_c(x), 'r-', label='AO-On Model')

    χ2_o = np.sum(((p_o(waves) - fwhm_o) / fwhm_o_err)**2)
    χ2_c = np.sum(((p_c(waves) - fwhm_c) / fwhm_c_err)**2)

    α_o = p_o.alpha.value
    α_c = p_c.alpha.value

    #plt.text(450, 0.61, 'χ$^2$='+str(np.round(χ2_o,2)), color='b', fontsize=14)
    #plt.text(450, 0.5, 'χ$^2$='+str(np.round(χ2_c,2)), color='r', fontsize=14)
    #plt.text(450, 0.63, 'α='+str(np.round(α_o,2)), color='b', fontsize=14)
    #plt.text(450, 0.52, 'α='+str(np.round(α_c,2)), color='r', fontsize=14)
    print('χ$^2$=' + str(np.round(χ2_o, 2)))
    print('χ$^2$=' + str(np.round(χ2_c, 2)))
    print('α=' + str(np.round(α_o, 2)))
    print('α=' + str(np.round(α_c, 2)))

    plt.xlabel('Observation Wavelength (nm)', fontsize=16)
    plt.ylabel('Minor FWHM (arcsec)', fontsize=16)
    plt.title('Wavelength Dependence Model', fontsize=18)
    plt.legend()
    plt.xticks(fontsize=14)
    plt.yticks(fontsize=14)

    return
예제 #7
0
def calc_flux(res, cont_res):
    # Separate the parameter and construct integrating function
    fe2_func = FeII_template_obs(res[0], res[1], res[2], res[3], res[4],
                                 res[5])
    cont_func = models.PowerLaw1D(cont_res[0], cont_res[1], cont_res[2])
    # Integrate to get flux
    x = np.linspace(4000.0, 5500.0, 100000)
    fe2_flux = np.trapz(fe2_func(x), x)
    cont_flux = cont_func(5100.0)
    hbetan_flux = np.sqrt(2.0 * np.pi) * abs(res[8]) * res[6]
    hbetab_flux = np.sqrt(2.0 * np.pi) * abs(res[11]) * res[9]
    o3_flux = np.sqrt(2.0 * np.pi) * abs(res[23]) * res[21]
    return [fe2_flux, hbetan_flux, hbetab_flux, o3_flux, cont_flux]
예제 #8
0
def models_with_input_eq():
    # 1D model
    m1 = astmodels.Shift(1*u.kg)
    m1.input_units_equivalencies = {'x': u.mass_energy()}

    # 2D model
    m2 = astmodels.Const2D(10*u.Hz)
    m2.input_units_equivalencies = {'x': u.dimensionless_angles(),
                                    'y': u.dimensionless_angles()}

    # 2D model with only one input equivalencies
    m3 = astmodels.Const2D(10*u.Hz)
    m3.input_units_equivalencies = {'x': u.dimensionless_angles()}

    # model using equivalency that has args using units
    m4 = astmodels.PowerLaw1D(amplitude=1*u.m, x_0=10*u.pix, alpha=7)
    m4.input_units_equivalencies = {'x': u.equivalencies.pixel_scale(0.5*u.arcsec/u.pix)}

    return[m1, m2, m3, m4]
예제 #9
0
    def test_compute_lrt_works(self):

        m = 1
        nfreq = 100000
        freq = np.linspace(1, 10, nfreq)
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)

        pe = PSDParEst(ps)

        lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0],
                                             loglike2, [2.0, 1.0, 2.0],
                                             neg=True)
        lrt_sim = pe.simulate_lrts(s_all,
                                   loglike, [2.0],
                                   loglike2, [2.0, 1.0, 2.0],
                                   max_post=False,
                                   seed=100)

        assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
        assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
예제 #10
0
def get_fwhm_hb(rmid, mjd):
    day_dir = Location.project_loca + "result/fit_with_temp/data/" + \
        str(rmid) + "/" + str(mjd) + "-"
    hb_file = open(day_dir + "Fe2.pkl", "rb")
    hb = pickle.load(hb_file)[10:12]
    hb_file.close()
    cont_file = open(day_dir + "cont.pkl", "rb")
    cont = pickle.load(cont_file)
    cont_file.close()
    cont_func = models.PowerLaw1D(cont[0], cont[1], cont[2])
    [wave, flux, error] = read_raw_data(rmid, mjd)
    [wave, flux,
     error] = extract_fit_part(wave, flux, error, hb[0] - 2.0 * hb[1],
                               hb[0] + 2.0 * hb[1])
    [wave, flux, error] = mask_points(wave, flux, error)
    flux = flux - cont_func(wave)
    up_wave = wave[0:flux.argmax()]
    up_flux = flux[0:flux.argmax()]
    down_flux = flux[flux.argmax():-1]
    down_wave = wave[flux.argmax():-1]
    wave_min = find_wave(up_wave, up_flux, 0.5 * np.amax(flux))
    wave_max = find_wave(down_wave, down_flux, 0.5 * np.amax(flux))
    return (wave_max - wave_min) / hb[1]
예제 #11
0
    def test_calibrate_lrt_works_as_expected(self):

        m = 1
        df = 0.01
        freq = np.arange(df, 5 + df, df)
        nfreq = freq.size
        rng = np.random.RandomState(100)
        noise = rng.exponential(size=nfreq)
        model = models.Const1D()
        model.amplitude = 2.0
        p = model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = df
        ps.norm = "leahy"

        loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)

        s_all = np.atleast_2d(np.ones(10) * 2.0).T

        model2 = models.PowerLaw1D() + models.Const1D()
        model2.x_0_0.fixed = True
        loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, m=1)

        pe = PSDParEst(ps)

        pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
                                [2.0, 1.0, 2.0], sample=s_all,
                                max_post=False, nsim=5,
                                seed=100)

        assert pval > 0.001
예제 #12
0
def template_fit(wave, flux, error, image_control, init_value, rmid, mjd):
    img_directory = Location.project_loca + "result/fit_with_temp/fig/" + \
        str(rmid)
    # Fit continuum
    if image_control:  # Control image output
        fig = plt.figure()
        plt.plot(wave, flux)
    # Part of the spectra without any prominent emission lines
    no_line_part = [[4000.0, 4050.0], [4150.0, 4280.0], [4420, 4750],
                    [5050, 5500]]
    cont_wave = np.array([])
    cont_flux = np.array([])
    cont_error = np.array([])
    for each_part in no_line_part:
        [pwave, pflux, perror] = extract_fit_part(wave, flux, error,
                                                  each_part[0], each_part[1])
        cont_wave = np.append(cont_wave, pwave)
        cont_flux = np.append(cont_flux, pflux)
        cont_error = np.append(cont_error, perror)
    cont_fitter = fitting.LevMarLSQFitter()
    if init_value == []:
        cont = fe_temp_observed.FeII_template_obs(0.0, 2000.0, 2.6, 0.0, 2000.0, 2.6, bounds = {"i_r_l1": [0.0, 50.0], "i_r_n3": [0.0, 50.0]}) + \
            models.PowerLaw1D(flux[0], wave[0], - np.log(abs(flux[-1]/flux[0])+0.001) / np.log(abs(wave[-1]/wave[0]) + 0.001), fixed = {"x_0": True})
    else:
        fe2_param = init_value[1][0:6]
        cont = fe_temp_observed.FeII_template_obs(fe2_param[0], fe2_param[1],
                                                  fe2_param[2], fe2_param[3],
                                                  fe2_param[4], fe2_param[5],
                                                  bounds = {"i_r_l1": [0.0, 50.0], "i_r_n3": [0.0, 50.0]}) + \
            models.PowerLaw1D(init_value[0][0], init_value[0][1], init_value[0][2], fixed = {"x_0": True})
    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            cont_fit = cont_fitter(cont,
                                   cont_wave,
                                   cont_flux,
                                   weights=cont_error**(-2),
                                   maxiter=10000)
        except Exception as reason:
            if image_control:  # Control image output
                save_fig(fig, img_directory, str(mjd) + "-cont-failed")
                plt.close()
            raise SpectraException("Continuum fit failed because of " +
                                   str(reason))
    if image_control:  # Control image output
        para = cont_fit.parameters[6:9]
        cont_cont = models.PowerLaw1D(para[0], para[1], para[2])
        cont_spec = cont_cont(wave)
        fit_spec = cont_fit(wave)
        plt.plot(wave, fit_spec)
        plt.plot(wave, cont_spec)
        plt.plot(wave, fit_spec - cont_spec)
        save_fig(fig, img_directory, str(mjd) + "-cont-success")
        plt.close()
    # Fit emission lines
    flux = flux - cont_fit(wave)
    if image_control:  # Control image output
        fig1 = plt.figure()
        plt.plot(wave, flux)
    if init_value == []:
        hbeta_complex_fit_func = \
            models.Gaussian1D(3.6, 4853.30, 7.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4830, 4880], "stddev": [0.0001, 10.1]}) + \
            models.Gaussian1D(3.6, 4853.30, 40.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4830, 4880], "stddev": [10.1, 500.0]}) + \
            models.Gaussian1D(2.0, 4346.40, 2.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4323, 4369], "stddev": [0.0001, 50.0]}) + \
            models.Gaussian1D(2.0, 4101.73, 2.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4078, 4125], "stddev": [0.0001, 50.0]}) + \
            models.Gaussian1D(5.0, 4960.0, 6.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4937, 4983], "stddev": [0.0001, 23.8]}) + \
            models.Gaussian1D(20.0, 5008.0, 6.0, bounds = {"amplitude": [0.0, 50.0], "mean": [4985, 5031], "stddev": [0.0001, 23.8]})
    else:
        hbetan_param = init_value[1][6:9]
        hbetab_param = init_value[1][9:12]
        hother_param = init_value[1][12:18]
        o3_param = init_value[1][18:24]
        hbeta_complex_fit_func = \
                    models.Gaussian1D(hbetan_param[0], hbetan_param[1], hbetan_param[2], bounds = {"amplitude": [0.0, 50.0], "mean": [4830, 4880], "stddev": [0.0001, 10.1]}) + \
                    models.Gaussian1D(hbetab_param[0], hbetab_param[1], hbetab_param[2], bounds = {"amplitude": [0.0, 50.0], "mean": [4830, 4880], "stddev": [10.1, 500.0]}) + \
                    models.Gaussian1D(hother_param[0], hother_param[1], hother_param[2], bounds = {"amplitude": [0.0, 50.0], "mean": [4323, 4369], "stddev": [0.0001, 50.0]}) + \
                    models.Gaussian1D(hother_param[3], hother_param[4], hother_param[5], bounds = {"amplitude": [0.0, 50.0], "mean": [4078, 4125], "stddev": [0.0001, 50.0]}) + \
                    models.Gaussian1D(o3_param[0], o3_param[1], o3_param[2], bounds = {"amplitude": [0.0, 50.0], "mean": [4937, 4983], "stddev": [0.0001, 23.8]}) + \
                    models.Gaussian1D(o3_param[3], o3_param[4], o3_param[5], bounds = {"amplitude": [0.0, 50.0], "mean": [4985, 5031], "stddev": [0.0001, 23.8]})
    fitter = fitting.LevMarLSQFitter()
    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            fit = fitter(hbeta_complex_fit_func,
                         wave,
                         flux,
                         weights=error**(-2),
                         maxiter=3000)
        except Exception as reason:
            if image_control:  # Control image output
                save_fig(fig1, img_directory, str(mjd) + "-failed")
                plt.close()
            raise SpectraException("Fit failed because of " + str(reason))
    expected = np.array(fit(wave))
    if image_control:  # Control image output
        plt.plot(wave, expected)
        save_fig(fig1, img_directory, str(mjd) + "-succeed")
        plt.close()
    rcs = 0
    for i in range(len(flux)):
        rcs = rcs + (flux[i] - expected[i])**2.0
    rcs = rcs / np.abs(len(flux) - 23)
    if rcs > 10.0:
        raise SpectraException("Reduced chi-square too large: " + str(rcs))
    return np.append(
        cont_fit.parameters[0:6], fit.parameters), cont_fit.parameters[
            6:9], rcs, cont_fitter.fit_info['nfev'], fitter.fit_info['nfev']
def call_avg_plot_fullrange():

    # read in all arrays
    density_diambin_1_2, density_diambin_2_3, density_diambin_3_4, density_diambin_4_5, \
    density_diambin_5_6, density_diambin_6_7, density_diambin_7_8, density_diambin_8_9, \
    density_diambin_9_10, density_diambin_10_15, density_diambin_15_20, density_diambin_20_25, \
    density_diambin_25_30, density_diambin_30_35, slope_diambin_1_2, slope_diambin_2_3, \
    slope_diambin_3_4, slope_diambin_4_5, slope_diambin_5_6, slope_diambin_6_7, slope_diambin_7_8, \
    slope_diambin_8_9, slope_diambin_9_10, slope_diambin_10_15, slope_diambin_15_20, \
    slope_diambin_20_25, slope_diambin_25_30, slope_diambin_30_35 = read_no_overlap_arrays()

    density_diambin_1, density_diambin_2, density_diambin_3, density_diambin_4, \
    density_diambin_5, density_diambin_6, density_diambin_7, density_diambin_8, \
    density_diambin_9, density_diambin_10, density_diambin_15, density_diambin_20, \
    density_diambin_25, density_diambin_30, slope_diambin_1, slope_diambin_2, \
    slope_diambin_3, slope_diambin_4, slope_diambin_5, slope_diambin_6, slope_diambin_7, \
    slope_diambin_8, slope_diambin_9, slope_diambin_10, slope_diambin_15, slope_diambin_20, \
    slope_diambin_25, slope_diambin_30 = read_Nvalue_arrays()

    # get averages for these arrays
    # although there aren't any nans in the density arrays
    # there are some in the slope arrays.
    # no overlap
    density_diambin_1_2_avg, slope_diambin_1_2_avg, density_diambin_1_2_avgerror, slope_diambin_1_2_avgerror \
    = get_avg_finite_elements(density_diambin_1_2, slope_diambin_1_2)
    density_diambin_2_3_avg, slope_diambin_2_3_avg, density_diambin_2_3_avgerror, slope_diambin_2_3_avgerror \
    = get_avg_finite_elements(density_diambin_2_3, slope_diambin_2_3)
    density_diambin_3_4_avg, slope_diambin_3_4_avg, density_diambin_3_4_avgerror, slope_diambin_3_4_avgerror \
    = get_avg_finite_elements(density_diambin_3_4, slope_diambin_3_4)
    density_diambin_4_5_avg, slope_diambin_4_5_avg, density_diambin_4_5_avgerror, slope_diambin_4_5_avgerror \
    = get_avg_finite_elements(density_diambin_4_5, slope_diambin_4_5)
    density_diambin_5_6_avg, slope_diambin_5_6_avg, density_diambin_5_6_avgerror, slope_diambin_5_6_avgerror \
    = get_avg_finite_elements(density_diambin_5_6, slope_diambin_5_6)
    density_diambin_6_7_avg, slope_diambin_6_7_avg, density_diambin_6_7_avgerror, slope_diambin_6_7_avgerror \
    = get_avg_finite_elements(density_diambin_6_7, slope_diambin_6_7)
    density_diambin_7_8_avg, slope_diambin_7_8_avg, density_diambin_7_8_avgerror, slope_diambin_7_8_avgerror \
    = get_avg_finite_elements(density_diambin_7_8, slope_diambin_7_8)
    density_diambin_8_9_avg, slope_diambin_8_9_avg, density_diambin_8_9_avgerror, slope_diambin_8_9_avgerror \
    = get_avg_finite_elements(density_diambin_8_9, slope_diambin_8_9)
    density_diambin_9_10_avg, slope_diambin_9_10_avg, density_diambin_9_10_avgerror, slope_diambin_9_10_avgerror \
    = get_avg_finite_elements(density_diambin_9_10, slope_diambin_9_10)
    density_diambin_10_15_avg, slope_diambin_10_15_avg, density_diambin_10_15_avgerror, slope_diambin_10_15_avgerror \
    = get_avg_finite_elements(density_diambin_10_15, slope_diambin_10_15)
    density_diambin_15_20_avg, slope_diambin_15_20_avg, density_diambin_15_20_avgerror, slope_diambin_15_20_avgerror \
    = get_avg_finite_elements(density_diambin_15_20, slope_diambin_15_20)
    density_diambin_20_25_avg, slope_diambin_20_25_avg, density_diambin_20_25_avgerror, slope_diambin_20_25_avgerror \
    = get_avg_finite_elements(density_diambin_20_25, slope_diambin_20_25)
    density_diambin_25_30_avg, slope_diambin_25_30_avg, density_diambin_25_30_avgerror, slope_diambin_25_30_avgerror \
    = get_avg_finite_elements(density_diambin_25_30, slope_diambin_25_30)
    density_diambin_30_35_avg, slope_diambin_30_35_avg, density_diambin_30_35_avgerror, slope_diambin_30_35_avgerror \
    = get_avg_finite_elements(density_diambin_30_35, slope_diambin_30_35)

    # nvalue
    density_diambin_1_avg, slope_diambin_1_avg, density_diambin_1_avgerror, slope_diambin_1_avgerror \
    = get_avg_finite_elements(density_diambin_1, slope_diambin_1)
    density_diambin_2_avg, slope_diambin_2_avg, density_diambin_2_avgerror, slope_diambin_2_avgerror \
    = get_avg_finite_elements(density_diambin_2, slope_diambin_2)
    density_diambin_3_avg, slope_diambin_3_avg, density_diambin_3_avgerror, slope_diambin_3_avgerror \
    = get_avg_finite_elements(density_diambin_3, slope_diambin_3)
    density_diambin_4_avg, slope_diambin_4_avg, density_diambin_4_avgerror, slope_diambin_4_avgerror \
    = get_avg_finite_elements(density_diambin_4, slope_diambin_4)
    density_diambin_5_avg, slope_diambin_5_avg, density_diambin_5_avgerror, slope_diambin_5_avgerror \
    = get_avg_finite_elements(density_diambin_5, slope_diambin_5)
    density_diambin_6_avg, slope_diambin_6_avg, density_diambin_6_avgerror, slope_diambin_6_avgerror \
    = get_avg_finite_elements(density_diambin_6, slope_diambin_6)
    density_diambin_7_avg, slope_diambin_7_avg, density_diambin_7_avgerror, slope_diambin_7_avgerror \
    = get_avg_finite_elements(density_diambin_7, slope_diambin_7)
    density_diambin_8_avg, slope_diambin_8_avg, density_diambin_8_avgerror, slope_diambin_8_avgerror \
    = get_avg_finite_elements(density_diambin_8, slope_diambin_8)
    density_diambin_9_avg, slope_diambin_9_avg, density_diambin_9_avgerror, slope_diambin_9_avgerror \
    = get_avg_finite_elements(density_diambin_9, slope_diambin_9)
    density_diambin_10_avg, slope_diambin_10_avg, density_diambin_10_avgerror, slope_diambin_10_avgerror \
    = get_avg_finite_elements(density_diambin_10, slope_diambin_10)
    density_diambin_15_avg, slope_diambin_15_avg, density_diambin_15_avgerror, slope_diambin_15_avgerror \
    = get_avg_finite_elements(density_diambin_15, slope_diambin_15)
    density_diambin_20_avg, slope_diambin_20_avg, density_diambin_20_avgerror, slope_diambin_20_avgerror \
    = get_avg_finite_elements(density_diambin_20, slope_diambin_20)
    density_diambin_25_avg, slope_diambin_25_avg, density_diambin_25_avgerror, slope_diambin_25_avgerror \
    = get_avg_finite_elements(density_diambin_25, slope_diambin_25)
    density_diambin_30_avg, slope_diambin_30_avg, density_diambin_30_avgerror, slope_diambin_30_avgerror \
    = get_avg_finite_elements(density_diambin_30, slope_diambin_30)

    ### Lump all avg value arrays together ###
    all_density_averages_nooverlap = np.array([density_diambin_1_2_avg, density_diambin_2_3_avg, density_diambin_3_4_avg, \
    density_diambin_4_5_avg, density_diambin_5_6_avg, density_diambin_6_7_avg, density_diambin_7_8_avg, \
    density_diambin_8_9_avg, density_diambin_9_10_avg, density_diambin_10_15_avg, \
    density_diambin_15_20_avg, density_diambin_20_25_avg, density_diambin_25_30_avg, density_diambin_30_35_avg])

    all_slope_averages_nooverlap = np.array([slope_diambin_1_2_avg, slope_diambin_2_3_avg, slope_diambin_3_4_avg, \
    slope_diambin_4_5_avg, slope_diambin_5_6_avg, slope_diambin_6_7_avg, slope_diambin_7_8_avg, \
    slope_diambin_8_9_avg, slope_diambin_9_10_avg, slope_diambin_10_15_avg, \
    slope_diambin_15_20_avg, slope_diambin_20_25_avg, slope_diambin_25_30_avg, slope_diambin_30_35_avg])

    all_density_averages_nvalue = np.array([density_diambin_1_avg, density_diambin_2_avg, density_diambin_3_avg, \
    density_diambin_4_avg, density_diambin_5_avg, density_diambin_6_avg, density_diambin_7_avg, \
    density_diambin_8_avg, density_diambin_9_avg, density_diambin_10_avg, density_diambin_15_avg, \
    density_diambin_20_avg, density_diambin_25_avg, density_diambin_30_avg])

    all_slope_averages_nvalue = np.array([slope_diambin_1_avg, slope_diambin_2_avg, slope_diambin_3_avg, \
    slope_diambin_4_avg, slope_diambin_5_avg, slope_diambin_6_avg, slope_diambin_7_avg, \
    slope_diambin_8_avg, slope_diambin_9_avg, slope_diambin_10_avg, slope_diambin_15_avg, \
    slope_diambin_20_avg, slope_diambin_25_avg, slope_diambin_30_avg])

    ### Lump all error arrays together ###
    all_density_avgerrors_nooverlap = np.array([density_diambin_1_2_avgerror, density_diambin_2_3_avgerror, density_diambin_3_4_avgerror, \
    density_diambin_4_5_avgerror, density_diambin_5_6_avgerror, density_diambin_6_7_avgerror, density_diambin_7_8_avgerror, \
    density_diambin_8_9_avgerror, density_diambin_9_10_avgerror, density_diambin_10_15_avgerror, \
    density_diambin_15_20_avgerror, density_diambin_20_25_avgerror, density_diambin_25_30_avgerror, density_diambin_30_35_avgerror])

    all_slope_avgerrors_nooverlap = np.array([slope_diambin_1_2_avgerror, slope_diambin_2_3_avgerror, slope_diambin_3_4_avgerror, \
    slope_diambin_4_5_avgerror, slope_diambin_5_6_avgerror, slope_diambin_6_7_avgerror, slope_diambin_7_8_avgerror, \
    slope_diambin_8_9_avgerror, slope_diambin_9_10_avgerror, slope_diambin_10_15_avgerror, \
    slope_diambin_15_20_avgerror, slope_diambin_20_25_avgerror, slope_diambin_25_30_avgerror, slope_diambin_30_35_avgerror])

    all_density_avgerrors_nvalue = np.array([density_diambin_1_avgerror, density_diambin_2_avgerror, density_diambin_3_avgerror, \
    density_diambin_4_avgerror, density_diambin_5_avgerror, density_diambin_6_avgerror, density_diambin_7_avgerror, \
    density_diambin_8_avgerror, density_diambin_9_avgerror, density_diambin_10_avgerror, density_diambin_15_avgerror, \
    density_diambin_20_avgerror, density_diambin_25_avgerror, density_diambin_30_avgerror])

    all_slope_avgerrors_nvalue = np.array([slope_diambin_1_avgerror, slope_diambin_2_avgerror, slope_diambin_3_avgerror, \
    slope_diambin_4_avgerror, slope_diambin_5_avgerror, slope_diambin_6_avgerror, slope_diambin_7_avgerror, \
    slope_diambin_8_avgerror, slope_diambin_9_avgerror, slope_diambin_10_avgerror, slope_diambin_15_avgerror, \
    slope_diambin_20_avgerror, slope_diambin_25_avgerror, slope_diambin_30_avgerror])

    # plot
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$')
    ax.set_ylabel(r'$\mathrm{log(Density)}$')

    # add minor ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True, alpha=0.5)

    colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#543005','#fdbf6f',\
    '#ff7f00','#cab2d6','#bf812d','#6a3d9a','#b15928','#01665e']

    ax.scatter(all_slope_averages_nooverlap[0], all_density_averages_nooverlap[0], \
        s=50, marker='o', color=colors[0], label='1-2' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[1], all_density_averages_nooverlap[1], \
        s=50, marker='o', color=colors[1], label='2-3' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[2], all_density_averages_nooverlap[2], \
        s=50, marker='o', color=colors[2], label='3-4' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[3], all_density_averages_nooverlap[3], \
        s=50, marker='o', color=colors[3], label='4-5' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[4], all_density_averages_nooverlap[4], \
        s=50, marker='o', color=colors[4], label='5-6' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[5], all_density_averages_nooverlap[5], \
        s=50, marker='o', color=colors[5], label='6-7' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[6], all_density_averages_nooverlap[6], \
        s=50, marker='o', color=colors[6], label='7-8' + ' km', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nooverlap[7], all_density_averages_nooverlap[7], \
        s=50, marker='o', color=colors[7], label='8-9' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[8], all_density_averages_nooverlap[8], \
        s=50, marker='o', color=colors[8], label='9-10' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[9], all_density_averages_nooverlap[9], \
        s=50, marker='o', color=colors[9], label='10-15' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[10], all_density_averages_nooverlap[10],
        s=50, marker='o', color=colors[10], label='15-20' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[11], all_density_averages_nooverlap[11],
        s=20, marker='x', color=colors[11], label='20-25' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[12], all_density_averages_nooverlap[12],
        s=20, marker='x', color=colors[12], label='25-30' + ' km', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nooverlap[13], all_density_averages_nooverlap[13],
        s=20, marker='x', color=colors[13], label='30-35' + ' km', \
        edgecolors='none', zorder=4)

    # fitting
    init = models.PowerLaw1D(amplitude=1, x_0=1, alpha=1)
    fit = fitting.LevMarLSQFitter()
    f = fit(init, all_slope_averages_nooverlap[:11],
            all_density_averages_nooverlap[:11])
    print f
    x_plot_arr = np.linspace(3, 18, 1000)
    ax.plot(x_plot_arr, f(x_plot_arr), ls='-', color='skyblue', lw=2)

    # Find chi2 and put the value on the plot
    chi2 = np.sum(((all_density_averages_nooverlap[:11] -
                    f(all_slope_averages_nooverlap[:11])) /
                   all_density_avgerrors_nooverlap[:11])**2)

    # text on plot
    # equation
    ax.text(0.4, 0.86, r'$\mathrm{f(x) = A\left(\frac{x}{x_0}\right)^{-\alpha}}$', \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    # best fit parameters
    ax.text(0.315, 0.78, r'$\mathrm{Amplitude =\ }$' + str("{:.3}".format(f.parameters[0])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.417, 0.72, r'$\mathrm{x_0 =\ }$' +  str("{:.3}".format(f.parameters[1])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.428, 0.66, r'$\mathrm{\alpha =\ }$' +  str("{:.3}".format(f.parameters[2])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # chi2
    ax.text(0.416, 0.6, r'$\mathrm{\chi^2 =\ }$' + str("{:.3}".format(chi2)), verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    ax.set_xlim(3, 18)
    ax.set_ylim(-0.01, 0.2)

    ax.legend(loc=0)

    fig.savefig(slope_extdir + 'nooverlap_averages_plot.png',
                dpi=300,
                bbox_inches='tight')
    plt.clf()
    plt.cla()
    plt.close()

    # _----------------------------- Nvalue -------------------------- #
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$')
    ax.set_ylabel(r'$\mathrm{log(Density)}$')

    # add minor ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True, alpha=0.5)

    ax.scatter(all_slope_averages_nvalue[0], all_density_averages_nvalue[0], \
        s=50, marker='o', color=colors[0], label='N(1)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[1], all_density_averages_nvalue[1], \
        s=50, marker='o', color=colors[1], label='N(2)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[2], all_density_averages_nvalue[2], \
        s=50, marker='o', color=colors[2], label='N(3)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[3], all_density_averages_nvalue[3], \
        s=50, marker='o', color=colors[3], label='N(4)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[4], all_density_averages_nvalue[4], \
        s=50, marker='o', color=colors[4], label='N(5)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[5], all_density_averages_nvalue[5], \
        s=50, marker='o', color=colors[5], label='N(6)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[6], all_density_averages_nvalue[6], \
        s=50, marker='o', color=colors[6], label='N(7)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[7], all_density_averages_nvalue[7], \
        s=50, marker='o', color=colors[7], label='N(8)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[8], all_density_averages_nvalue[8], \
        s=50, marker='o', color=colors[8], label='N(9)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[9], all_density_averages_nvalue[9], \
        s=20, marker='x', color=colors[9], label='N(10)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[10], all_density_averages_nvalue[10],
        s=20, marker='x', color=colors[10], label='N(15)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[11], all_density_averages_nvalue[11],
        s=20, marker='x', color=colors[11], label='N(20)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[12], all_density_averages_nvalue[12],
        s=20, marker='x', color=colors[12], label='N(25)', \
        edgecolors='none', zorder=4)
    ax.scatter(all_slope_averages_nvalue[13], all_density_averages_nvalue[13],
        s=20, marker='x', color=colors[13], label='N(30)', \
        edgecolors='none', zorder=4)

    # fitting
    init = models.PowerLaw1D(amplitude=1, x_0=1, alpha=1)
    fit = fitting.LevMarLSQFitter()
    f = fit(init, all_slope_averages_nvalue[:9],
            all_density_averages_nvalue[:9])
    print f
    x_plot_arr = np.linspace(3, 18, 1000)
    ax.plot(x_plot_arr, f(x_plot_arr), ls='-', color='skyblue', lw=2)

    # Find chi2 and put the value on the plot
    chi2 = np.sum(
        ((all_density_averages_nvalue[:9] - f(all_slope_averages_nvalue[:9])) /
         all_density_avgerrors_nvalue[:9])**2)

    # text on plot
    # equation
    ax.text(0.4, 0.86, r'$\mathrm{f(x) = A\left(\frac{x}{x_0}\right)^{-\alpha}}$', \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    # best fit parameters
    ax.text(0.315, 0.78, r'$\mathrm{Amplitude =\ }$' + str("{:.3}".format(f.parameters[0])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.417, 0.72, r'$\mathrm{x_0 =\ }$' +  str("{:.3}".format(f.parameters[1])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.428, 0.66, r'$\mathrm{\alpha =\ }$' +  str("{:.3}".format(f.parameters[2])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # chi2
    ax.text(0.416, 0.6, r'$\mathrm{\chi^2 =\ }$' + str("{:.3}".format(chi2)), verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # Show residuals

    ax.set_xlim(9, 14)
    ax.set_ylim(-0.01, 0.1)

    ax.legend(loc=0)

    fig.savefig(slope_extdir + 'nvalue_averages_plot.png',
                dpi=300,
                bbox_inches='tight')
    plt.clf()
    plt.cla()
    plt.close()

    sys.exit(0)

    # ----------------------------- Nvalue normalized -------------------------- #
    all_diam_values = np.array([
        1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 12.5, 17.5, 22.5, 27.5,
        32.5
    ])
    norm_values = np.power(10, 4.9) * np.power(all_diam_values, -2)
    print norm_values

    all_density_averages_nvalue /= norm_values

    print all_density_averages_nvalue

    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$')
    ax.set_ylabel(r'$\mathrm{log(Density)}$')

    # add minor ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True, alpha=0.5)

    ax.scatter(all_slope_averages_nvalue,
               all_density_averages_nvalue,
               s=10,
               color='k',
               edgecolors='none')
    init = models.PowerLaw1D(amplitude=1, x_0=1, alpha=1)
    fit = fitting.LevMarLSQFitter()
    f = fit(init, all_slope_averages_nvalue[:9],
            all_density_averages_nvalue[:9])
    print f
    x_plot_arr = np.linspace(3, 18, 1000)
    ax.plot(x_plot_arr, f(x_plot_arr), ls='-', color='skyblue', lw=2)

    # Show residuals

    ax.set_xlim(9, 14)
    ax.set_ylim(0, 2e-5)

    fig.savefig(slope_extdir + 'nvalue_norm_averages_plot.png',
                dpi=300,
                bbox_inches='tight')
    plt.show()
    plt.clf()
    plt.cla()
    plt.close()

    return None
예제 #14
0
    astmodels.Voigt1D(x_0=0.55, amplitude_L=10., fwhm_L=0.5, fwhm_G=0.9),
    astmodels.BlackBody(scale=10.0, temperature=6000. * u.K),
    astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
    astmodels.Plummer1D(mass=10.0, r_plum=5.0),
    astmodels.BrokenPowerLaw1D(amplitude=10,
                               x_break=0.5,
                               alpha_1=2.0,
                               alpha_2=3.5),
    astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.),
    astmodels.LogParabola1D(
        amplitude=10,
        x_0=0.5,
        alpha=2.,
        beta=3.,
    ),
    astmodels.PowerLaw1D(amplitude=10., x_0=0.5, alpha=2.0),
    astmodels.SmoothlyBrokenPowerLaw1D(amplitude=10.,
                                       x_break=5.0,
                                       alpha_1=2.0,
                                       alpha_2=3.0,
                                       delta=0.5),
    custom_and_analytical_inverse(),
    custom_inputs_outputs(),
]

if HAS_SCIPY:
    test_models.append(
        astmodels.Spline1D(
            np.array([-3., -3., -3., -3., -1., 0., 1., 3., 3., 3., 3.]),
            np.array([
                0.10412331, 0.07013616, -0.18799552, 1.35953147, -0.15282581,
예제 #15
0
 'Lorentz1D':
 models.Lorentz1D(1.0, 1.0, 1.0),
 'MexicanHat1D':
 models.MexicanHat1D(1.0, 1.0, 1.0),
 'Trapezoid1D':
 models.Trapezoid1D(1.0, 1.0, 1.0, 1.0),
 'Moffat1D':
 models.Moffat1D(1.0, 1.0, 1.0, 1.0),
 'ExponentialCutoffPowerLaw1D':
 models.ExponentialCutoffPowerLaw1D(1.0, 1.0, 1.0, 1.0),
 'BrokenPowerLaw1D':
 models.BrokenPowerLaw1D(1.0, 1.0, 1.0, 1.0),
 'LogParabola1D':
 models.LogParabola1D(1.0, 1.0, 1.0, 1.0),
 'PowerLaw1D':
 models.PowerLaw1D(1.0, 1.0, 1.0),
 'Linear1D':
 models.Linear1D(1.0, 0.0),
 'Const1D':
 models.Const1D(0.0),
 'Redshift':
 models.Redshift(0.0),
 'Scale':
 models.Scale(1.0),
 'Shift':
 models.Shift(0.0),
 'Sine1D':
 models.Sine1D(1.0, 1.0),
 'Chebyshev1D':
 models.Chebyshev1D(1),
 'Legendre1D':
def fit_slopes_intercepts(slopes, intercepts, stds, waves, norm):
    """
    Fit the slopes, intercepts and standard deviations vs. wavelength

    Parameters
    ----------
    slopes : np.ndarray
        Numpy array with the slopes of the linear relationship

    intercepts : np.ndarray
        Numpy array with the intercepts of the linear relationship

    stds : np.ndarray
        Numpy array with the standard deviations about the linear fit

    waves : np.ndarray
        Numpy array with all wavelengths

    norm : string [default="V"]
        Band or wavelength for the normalization

    Returns
    -------
    spline_wave : np.ndarray
        Numpy array with the anchor wavelengths

    spline_slope : np.ndarray
        Numpy array with the anchor slopes

    spline_std : np.ndarray
        Numpy array with the anchor standard deviations

    fit_slopes : tuple
        The interpolated spline for the slopes

    fit_intercepts : astropy model
        The fitted model for the intercepts

    fit_stds : tuple
        The interpolated spline for the standard deviations
    """
    # define a mask for the good data
    mask = ~np.isnan(slopes)
    short_wave_mask = waves < 4.1

    # fit the intercepts with a power law
    fit_lev = fitting.LevMarLSQFitter()
    powerlaw = models.PowerLaw1D(fixed={"x_0": True})
    fit_intercepts = fit_lev(powerlaw, waves[mask], intercepts[mask])

    # define the anchor points for the spline interpolation
    # divide the data into 25 bins with the same number of data points in every bin
    alloc, bin_edges = pd.qcut(waves[mask * short_wave_mask],
                               q=25,
                               retbins=True)
    # calculate the median wavelength, slope and standard deviation in every bin
    meds, edges, indices = stats.binned_statistic(
        waves[mask * short_wave_mask],
        (
            waves[mask * short_wave_mask],
            slopes[mask * short_wave_mask],
            stds[mask * short_wave_mask],
        ),
        statistic="median",
        bins=bin_edges,
    )

    # use the median values as the anchor points for the spline interpolation
    spline_wave = meds[0][~np.isnan(meds[0])]
    spline_slope = meds[1][~np.isnan(meds[1])]
    spline_std = meds[2][~np.isnan(meds[2])]

    # interpolate the slopes with a spline function
    fit_slopes = interpolate.splrep(spline_wave, spline_slope)

    # interpolate the standard deviations with a spline function
    fit_stds = interpolate.splrep(spline_wave, spline_std)

    # create tables with the fitting results at certain wavelengths
    table_waves = np.arange(0.8, 4.05, 0.05)
    table_inv_rv_dep(table_path, table_waves, fit_slopes, fit_intercepts,
                     fit_stds, norm)

    # create a table with the anchor points of the spline interpolation
    table_spline(table_path, spline_wave, spline_slope, spline_std, norm)

    return spline_wave, spline_slope, spline_std, fit_slopes, fit_intercepts, fit_stds
def call_avg_plot_smallrange():
    """
    Have not changed the code for the N value stuff using the finer grid
    """

    # read in all arrays
    density_diambin_1_1p25, density_diambin_1p25_1p5, density_diambin_1p5_1p75, density_diambin_1p75_2, \
    density_diambin_2_2p25, density_diambin_2p25_2p5, density_diambin_2p5_2p75, density_diambin_2p75_3, \
    density_diambin_3_3p25, density_diambin_3p25_3p5, density_diambin_3p5_3p75, density_diambin_3p75_4, \
    density_diambin_4_4p25, density_diambin_4p25_4p5, density_diambin_4p5_4p75, density_diambin_4p75_5, \
    density_diambin_5_6, density_diambin_6_7, density_diambin_7_8, density_diambin_8_9, \
    density_diambin_9_10, density_diambin_10_15, density_diambin_15_20, density_diambin_20_25, \
    density_diambin_25_30, density_diambin_30_35, \
    slope_diambin_1_1p25, slope_diambin_1p25_1p5, slope_diambin_1p5_1p75, slope_diambin_1p75_2, \
    slope_diambin_2_2p25, slope_diambin_2p25_2p5, slope_diambin_2p5_2p75, slope_diambin_2p75_3, \
    slope_diambin_3_3p25, slope_diambin_3p25_3p5, slope_diambin_3p5_3p75, slope_diambin_3p75_4, \
    slope_diambin_4_4p25, slope_diambin_4p25_4p5, slope_diambin_4p5_4p75, slope_diambin_4p75_5, \
    slope_diambin_5_6, slope_diambin_6_7, slope_diambin_7_8, \
    slope_diambin_8_9, slope_diambin_9_10, slope_diambin_10_15, slope_diambin_15_20, \
    slope_diambin_20_25, slope_diambin_25_30, slope_diambin_30_35 = read_no_overlap_arrays()

    density_diambin_1, density_diambin_2, density_diambin_3, density_diambin_4, \
    density_diambin_5, density_diambin_6, density_diambin_7, density_diambin_8, \
    density_diambin_9, density_diambin_10, density_diambin_15, density_diambin_20, \
    density_diambin_25, density_diambin_30, slope_diambin_1, slope_diambin_2, \
    slope_diambin_3, slope_diambin_4, slope_diambin_5, slope_diambin_6, slope_diambin_7, \
    slope_diambin_8, slope_diambin_9, slope_diambin_10, slope_diambin_15, slope_diambin_20, \
    slope_diambin_25, slope_diambin_30 = read_Nvalue_arrays()

    # get averages for these arrays
    density_diambin_1_1p25_avg, slope_diambin_1_1p25_avg, density_diambin_1_1p25_avgerror, slope_diambin_1_1p25_avgerror \
    = get_avg_finite_elements(density_diambin_1_1p25, slope_diambin_1_1p25)
    density_diambin_1p25_1p5_avg, slope_diambin_1p25_1p5_avg, density_diambin_1p25_1p5_avgerror, slope_diambin_1p25_1p5_avgerror \
    = get_avg_finite_elements(density_diambin_1p25_1p5, slope_diambin_1p25_1p5)
    density_diambin_1p5_1p75_avg, slope_diambin_1p5_1p75_avg, density_diambin_1p5_1p75_avgerror, slope_diambin_1p5_1p75_avgerror \
    = get_avg_finite_elements(density_diambin_1p5_1p75, slope_diambin_1p5_1p75)
    density_diambin_1p75_2_avg, slope_diambin_1p75_2_avg, density_diambin_1p75_2_avgerror, slope_diambin_1p75_2_avgerror \
    = get_avg_finite_elements(density_diambin_1p75_2, slope_diambin_1p75_2)

    density_diambin_2_2p25_avg, slope_diambin_2_2p25_avg, density_diambin_2_2p25_avgerror, slope_diambin_2_2p25_avgerror \
    = get_avg_finite_elements(density_diambin_2_2p25, slope_diambin_2_2p25)
    density_diambin_2p25_2p5_avg, slope_diambin_2p25_2p5_avg, density_diambin_2p25_2p5_avgerror, slope_diambin_2p25_2p5_avgerror \
    = get_avg_finite_elements(density_diambin_2p25_2p5, slope_diambin_2p25_2p5)
    density_diambin_2p5_2p75_avg, slope_diambin_2p5_2p75_avg, density_diambin_2p5_2p75_avgerror, slope_diambin_2p5_2p75_avgerror \
    = get_avg_finite_elements(density_diambin_2p5_2p75, slope_diambin_2p5_2p75)
    density_diambin_2p75_3_avg, slope_diambin_2p75_3_avg, density_diambin_2p75_3_avgerror, slope_diambin_2p75_3_avgerror \
    = get_avg_finite_elements(density_diambin_2p75_3, slope_diambin_2p75_3)

    density_diambin_3_3p25_avg, slope_diambin_3_3p25_avg, density_diambin_3_3p25_avgerror, slope_diambin_3_3p25_avgerror \
    = get_avg_finite_elements(density_diambin_3_3p25, slope_diambin_3_3p25)
    density_diambin_3p25_3p5_avg, slope_diambin_3p25_3p5_avg, density_diambin_3p25_3p5_avgerror, slope_diambin_3p25_3p5_avgerror \
    = get_avg_finite_elements(density_diambin_3p25_3p5, slope_diambin_3p25_3p5)
    density_diambin_3p5_3p75_avg, slope_diambin_3p5_3p75_avg, density_diambin_3p5_3p75_avgerror, slope_diambin_3p5_3p75_avgerror \
    = get_avg_finite_elements(density_diambin_3p5_3p75, slope_diambin_3p5_3p75)
    density_diambin_3p75_4_avg, slope_diambin_3p75_4_avg, density_diambin_3p75_4_avgerror, slope_diambin_3p75_4_avgerror \
    = get_avg_finite_elements(density_diambin_3p75_4, slope_diambin_3p75_4)

    density_diambin_4_4p25_avg, slope_diambin_4_4p25_avg, density_diambin_4_4p25_avgerror, slope_diambin_4_4p25_avgerror \
    = get_avg_finite_elements(density_diambin_4_4p25, slope_diambin_4_4p25)
    density_diambin_4p25_4p5_avg, slope_diambin_4p25_4p5_avg, density_diambin_4p25_4p5_avgerror, slope_diambin_4p25_4p5_avgerror \
    = get_avg_finite_elements(density_diambin_4p25_4p5, slope_diambin_4p25_4p5)
    density_diambin_4p5_4p75_avg, slope_diambin_4p5_4p75_avg, density_diambin_4p5_4p75_avgerror, slope_diambin_4p5_4p75_avgerror \
    = get_avg_finite_elements(density_diambin_4p5_4p75, slope_diambin_4p5_4p75)
    density_diambin_4p75_5_avg, slope_diambin_4p75_5_avg, density_diambin_4p75_5_avgerror, slope_diambin_4p75_5_avgerror \
    = get_avg_finite_elements(density_diambin_4p75_5, slope_diambin_4p75_5)

    # -------------- Unupdated N value stuff --------------- #
    density_diambin_1_avg, slope_diambin_1_avg, density_diambin_1_avgerror, slope_diambin_1_avgerror \
    = get_avg_finite_elements(density_diambin_1, slope_diambin_1)
    density_diambin_2_avg, slope_diambin_2_avg, density_diambin_2_avgerror, slope_diambin_2_avgerror \
    = get_avg_finite_elements(density_diambin_2, slope_diambin_2)
    density_diambin_3_avg, slope_diambin_3_avg, density_diambin_3_avgerror, slope_diambin_3_avgerror \
    = get_avg_finite_elements(density_diambin_3, slope_diambin_3)
    density_diambin_4_avg, slope_diambin_4_avg, density_diambin_4_avgerror, slope_diambin_4_avgerror \
    = get_avg_finite_elements(density_diambin_4, slope_diambin_4)

    # Put the averages and the errors in arrays
    all_density_averages_nooverlap = \
    np.array([density_diambin_1_1p25_avg, density_diambin_1p25_1p5_avg, density_diambin_1p5_1p75_avg, density_diambin_1p75_2_avg, \
              density_diambin_2_2p25_avg, density_diambin_2p25_2p5_avg, density_diambin_2p5_2p75_avg, density_diambin_2p75_3_avg, \
              density_diambin_3_3p25_avg, density_diambin_3p25_3p5_avg, density_diambin_3p5_3p75_avg, density_diambin_3p75_4_avg, \
              density_diambin_4_4p25_avg, density_diambin_4p25_4p5_avg, density_diambin_4p5_4p75_avg, density_diambin_4p75_5_avg])

    all_slope_averages_nooverlap = \
    np.array([slope_diambin_1_1p25_avg, slope_diambin_1p25_1p5_avg, slope_diambin_1p5_1p75_avg, slope_diambin_1p75_2_avg, \
              slope_diambin_2_2p25_avg, slope_diambin_2p25_2p5_avg, slope_diambin_2p5_2p75_avg, slope_diambin_2p75_3_avg, \
              slope_diambin_3_3p25_avg, slope_diambin_3p25_3p5_avg, slope_diambin_3p5_3p75_avg, slope_diambin_3p75_4_avg, \
              slope_diambin_4_4p25_avg, slope_diambin_4p25_4p5_avg, slope_diambin_4p5_4p75_avg, slope_diambin_4p75_5_avg])

    all_density_averages_nvalue = np.array([density_diambin_1_avg, density_diambin_2_avg, density_diambin_3_avg, \
    density_diambin_4_avg])

    all_slope_averages_nvalue = np.array([slope_diambin_1_avg, slope_diambin_2_avg, slope_diambin_3_avg, \
    slope_diambin_4_avg])

    all_density_avgerrors_nooverlap = \
    np.array([density_diambin_1_1p25_avgerror, density_diambin_1p25_1p5_avgerror, density_diambin_1p5_1p75_avgerror, density_diambin_1p75_2_avgerror, \
              density_diambin_2_2p25_avgerror, density_diambin_2p25_2p5_avgerror, density_diambin_2p5_2p75_avgerror, density_diambin_2p75_3_avgerror, \
              density_diambin_3_3p25_avgerror, density_diambin_3p25_3p5_avgerror, density_diambin_3p5_3p75_avgerror, density_diambin_3p75_4_avgerror, \
              density_diambin_4_4p25_avgerror, density_diambin_4p25_4p5_avgerror, density_diambin_4p5_4p75_avgerror, density_diambin_4p75_5_avgerror])

    all_slope_avgerrors_nooverlap = \
    np.array([slope_diambin_1_1p25_avgerror, slope_diambin_1p25_1p5_avgerror, slope_diambin_1p5_1p75_avgerror, slope_diambin_1p75_2_avgerror, \
              slope_diambin_2_2p25_avgerror, slope_diambin_2p25_2p5_avgerror, slope_diambin_2p5_2p75_avgerror, slope_diambin_2p75_3_avgerror, \
              slope_diambin_3_3p25_avgerror, slope_diambin_3p25_3p5_avgerror, slope_diambin_3p5_3p75_avgerror, slope_diambin_3p75_4_avgerror, \
              slope_diambin_4_4p25_avgerror, slope_diambin_4p25_4p5_avgerror, slope_diambin_4p5_4p75_avgerror, slope_diambin_4p75_5_avgerror])

    # plot
    # ------------------------------------------- No overlap ------------------------------------------- #
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$')
    ax.set_ylabel(r'$\mathrm{log(Density)}$')

    # add minor ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True, alpha=0.5)

    colors = ['#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00',\
    '#cab2d6','#6a3d9a','#ffff99','#b15928','#878787','#c51b7d','#35978f','#b2182b']
    label_list = ['1-1.25 km', '1.25-1.5 km', '1.5-1.75 km', '1.75-2 km',\
                  '2-2.25 km', '2.25-2.5 km', '2.5-2.75 km', '2.75-3 km',\
                  '3-3.25 km', '3.25-3.5 km', '3.5-3.75 km', '3.75-4 km',\
                  '4-4.25 km', '4.25-4.5 km', '4.5-4.75 km', '4.75-5 km']

    for i in range(len(all_density_averages_nooverlap)):
        ax.scatter(all_slope_averages_nooverlap[i], all_density_averages_nooverlap[i], \
            s=50, marker='o', color=colors[i], label=label_list[i], \
            edgecolors='none', zorder=5)

    # fitting
    init = models.PowerLaw1D(amplitude=1, x_0=1, alpha=1)
    fit = fitting.LevMarLSQFitter()
    f = fit(init, all_slope_averages_nooverlap, all_density_averages_nooverlap)
    print f
    x_plot_arr = np.linspace(3, 18, 1000)
    ax.plot(x_plot_arr, f(x_plot_arr), ls='-', color='skyblue', lw=2)

    # Find chi2 and put the value on the plot
    chi2 = np.sum(
        ((all_density_averages_nooverlap - f(all_slope_averages_nooverlap)) /
         all_density_avgerrors_nooverlap)**2)

    # text on plot
    # equation
    ax.text(0.4, 0.86, r'$\mathrm{f(x) = A\left(\frac{x}{x_0}\right)^{-\alpha}}$', \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    # best fit parameters
    ax.text(0.315, 0.78, r'$\mathrm{Amplitude =\ }$' + str("{:.3}".format(f.parameters[0])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.417, 0.72, r'$\mathrm{x_0 =\ }$' +  str("{:.3}".format(f.parameters[1])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.428, 0.66, r'$\mathrm{\alpha =\ }$' +  str("{:.3}".format(f.parameters[2])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # chi2
    ax.text(0.416, 0.6, r'$\mathrm{\chi^2 =\ }$' + str("{:.3}".format(chi2)), verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    ax.set_xlim(3, 18)
    ax.set_ylim(-0.01, 0.2)

    ax.legend(loc=0)

    fig.savefig(slope_extdir +
                'nooverlap_averages_plot_smallrange_finegrid.png',
                dpi=300,
                bbox_inches='tight')
    plt.clf()
    plt.cla()
    plt.close()

    sys.exit(0)

    # ------------------------------------------- N value ------------------------------------------- #
    fig = plt.figure()
    ax = fig.add_subplot(111)

    ax.set_xlabel(r'$\mathrm{Slope}$')
    ax.set_ylabel(r'$\mathrm{log(Density)}$')

    # add minor ticks and grid
    ax.minorticks_on()
    ax.tick_params('both', width=1, length=3, which='minor')
    ax.tick_params('both', width=1, length=4.7, which='major')
    ax.grid(True, alpha=0.5)

    ax.scatter(all_slope_averages_nvalue[0], all_density_averages_nvalue[0], \
        s=50, marker='o', color=colors[0], label='N(1)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[1], all_density_averages_nvalue[1], \
        s=50, marker='o', color=colors[1], label='N(2)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[2], all_density_averages_nvalue[2], \
        s=50, marker='o', color=colors[2], label='N(3)', \
        edgecolors='none', zorder=5)
    ax.scatter(all_slope_averages_nvalue[3], all_density_averages_nvalue[3], \
        s=50, marker='o', color=colors[3], label='N(4)', \
        edgecolors='none', zorder=5)

    # fitting
    init = models.PowerLaw1D(amplitude=1, x_0=1, alpha=1)
    fit = fitting.LevMarLSQFitter()
    f = fit(init, all_slope_averages_nvalue[:4],
            all_density_averages_nvalue[:4])
    print f
    x_plot_arr = np.linspace(3, 18, 1000)
    ax.plot(x_plot_arr, f(x_plot_arr), ls='-', color='skyblue', lw=2)

    # Find chi2 and put the value on the plot
    chi2 = np.sum(
        ((all_density_averages_nvalue[:4] - f(all_slope_averages_nvalue[:4])) /
         all_density_avgerrors_nvalue[:4])**2)

    # text on plot
    # equation
    ax.text(0.4, 0.86, r'$\mathrm{f(x) = A\left(\frac{x}{x_0}\right)^{-\alpha}}$', \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    # best fit parameters
    ax.text(0.315, 0.78, r'$\mathrm{Amplitude =\ }$' + str("{:.3}".format(f.parameters[0])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.417, 0.72, r'$\mathrm{x_0 =\ }$' +  str("{:.3}".format(f.parameters[1])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)
    ax.text(0.428, 0.66, r'$\mathrm{\alpha =\ }$' +  str("{:.3}".format(f.parameters[2])), \
        verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # chi2
    ax.text(0.416, 0.6, r'$\mathrm{\chi^2 =\ }$' + str("{:.3}".format(chi2)), verticalalignment='top', horizontalalignment='left', \
        transform=ax.transAxes, color='k', size=10)

    # Show residuals

    ax.set_xlim(9, 14)
    ax.set_ylim(-0.01, 0.1)

    ax.legend(loc=0)

    fig.savefig(slope_extdir + 'nvalue_averages_plot_smallrange.png',
                dpi=300,
                bbox_inches='tight')
    plt.clf()
    plt.cla()
    plt.close()

    return None
예제 #18
0
    def _selective_fit(self):
        """Selection depending on Plot Units and Function Model
          Predefine Input Data in x and y
          We equate three components to y1, y2, y3. The value of x is the same for all cases
          x - independent variable, nominally energy in keV
          y - Plot Unit"""
        # load chosen file in Select Input section
        fname = Fitting.fname
        if fname is None:  # if file not choosen, print
            print('Please, choose input file')

        else:
            hdulist = fits.open(fname)
            header1 = hdulist[1].header
            header3 = hdulist[3].header
            data1 = hdulist[1].data
            data2 = hdulist[2].data
            Rate = data1.RATE
            Time = data1.TIME - 2
            Livetime = data1.LIVETIME
            Time_del = data1.TIMEDEL
            Channel = data1.CHANNEL
            Fitting.E_min = data2.E_MIN
            E_max = data2.E_MAX
            Area = header3[24]
            E_mean = np.mean(Fitting.E_min)
            """Define Spectrum Units: Rate, Counts, Flux"""

            # Define the range for Low and High energies
            n = len(Fitting.E_min)
            deltaE = np.zeros(shape=(n))
            for i in range(n):
                deltaE[i] = E_max[i] - Fitting.E_min[i]

            # Next, we determine the PLot Units components
            # Rate
            CountRate = np.zeros(shape=(n))
            for i in range(n):
                CountRate[i] = np.mean(Rate[:, i])

            # Counts
            Counts = np.zeros(shape=(n))
            for i in range(n):
                Counts[i] = np.mean(Rate[:, i] * Time_del[:])

            # Flux
            Flux = np.zeros(shape=(n))
            for i in range(n):
                Flux[i] = np.mean(Rate[:, i] / (Area * deltaE[i] - 2))

            # Set the conditions to Set Y axis
            if Fitting.setEVal is None:
                x = Fitting.E_min
                y1 = CountRate
                y2 = Counts
                y3 = Flux
            else:
                # Energy boundaries
                energy_min = int(Fitting.setEVal.split(' - ')[0])
                energy_max = int(Fitting.setEVal.split(' - ')[1])
                assert energy_max > energy_min
                # Energy value mask
                energy_mask = (Fitting.E_min >= energy_min) & (Fitting.E_min <=
                                                               energy_max)
                x = Fitting.E_min[energy_mask]
                y1 = CountRate[energy_mask]
                y2 = Counts[energy_mask]
                y3 = Flux[energy_mask]

            # def find_all_indexes(input_str, search_str):
            #     l1 = []
            #     length = len(input_str)
            #     index = 0
            #     while index < length:
            #         i = input_str.find(search_str, index)
            #         if i == -1:
            #             return l1
            #         l1.append(i)
            #         index = i + 1
            #     return l1
            # print(find_all_indexes(str(E_min), str(E_min[0:-1])))
            # indexesX = np.where((x <= x[-1]) & (x >= x[0]))
            # print(indexesX)
            # indexesY1 = np.where((y3 < y3[-1]) & (y3 > y3[0]))
            # print(indexesY1)
            # nX = int(input(self.e1.get()))
            # nY = int(input(self.e1.get()))
            # keyword_arrayX = []
            # keyword_arrayY = []
            # first_E_min = indexes[0]
            # last_E_min = indexes[-1]
            #
            # if first_E_min < arrayX[0] and last_E_min<arrayX[-1]:

#################################################### Define Fitters ######################################################

# Fitter creates a new model for x and у, with finding the best fit values
            fitg1 = fitting.LevMarLSQFitter()
            #print(fitg1)
            """ 
        Levenberg - Marquandt algorithm for non - linear least - squares optimization

        The algorithm works by minimizing the squared residuals, defined as:
            
                Residual^2 = (y - f(t))^2 ,
 
        where y is the measured dependent variable;

        f(t) is the calculated value

        The LM algorithm is an iterative process, guessing at the solution of the best minimum


         """

            #################################################### Fitting the data using astropy.modeling ###############################

            # Define a One dimensional power law model with initial guess
            PowerLaw1D = models.PowerLaw1D(
            )  #(amplitude=1, x_0=3, alpha=50, fixed = {'alpha': True})
            """
        PowerLaw1D(amplitude=1, x_0=1, alpha=1, **kwargs)

        One dimensional power law model.

        Parameters:	

            amplitude : float. Model amplitude at the reference point.

            x_0 : float. Reference point.

            alpha : float. Power law index.
        """

            # Define a One dimensional broken power law model
            BrokenPowerLaw1D = models.BrokenPowerLaw1D(amplitude=1,
                                                       x_break=3,
                                                       alpha_1=400,
                                                       alpha_2=1.93,
                                                       fixed={
                                                           'alpha_1': True,
                                                           'alpha_2': True
                                                       })
            """
        BrokenPowerLaw1D(amplitude=1, x_break=1, alpha_1=1, alpha_2=1, **kwargs)


        One dimensional power law model with a break.

        Parameters:	

            amplitude : float. Model amplitude at the break point.

            x_break : float. Break point.

            alpha_1 : float. Power law index for x < x_break.

            alpha_2 : float. Power law index for x > x_break.
        """

            # Define a Gaussian model
            ginit = models.Gaussian1D(1000,
                                      6.7,
                                      0.1,
                                      fixed={
                                          'mean': True,
                                          'stddev': True
                                      })
            #(1000, 6.7, 0.1)
            """
        One dimensional Gaussian model

        Parameters:

            amplitude: Amplitude of the Gaussian.
            
            mean: Mean of the Gaussian.

            stddev: Standard deviation of the Gaussian.
       
        Other Parameters:

            fixed : optional. A dictionary {parameter_name: boolean} of parameters to not be varied during fitting. True means the parameter is held fixed. 
            Alternatively the fixed property of a parameter may be used.

    
            tied: optional. A dictionary {parameter_name: callable} of parameters which are linked to some other parameter.

            The dictionary values are callables providing the linking relationship. Alternatively the tied property of a parameter may be used.

    
            bounds: optional. A dictionary {parameter_name: value} of lower and upper bounds of parameters. 
            Keys are parameter names. Values are a list or a tuple of length 2 giving the desired range for the parameter.
            Alternatively, the min and max properties of a parameter may be used.

            eqcons: optional. A list of functions of length n such that eqcons[j](x0,*args) == 0.0 in a successfully optimized problem.

        
            ineqcons: optional. A list of functions of length n such that ieqcons[j](x0,*args) >= 0.0 is a successfully optimized problem. 

        """
            p_init = models.Polynomial1D(
                2)  # Define 2nd order Polynomial function
            #p_init.parameters = [1,1,1]
            """
        1D Polynomial model.
        
        
        Parameters:

            degree: Degree of the series.

        
            domain: Optional.

            window: Optional. If None, it is set to [-1,1] Fitters will remap the domain to this window.

        
            **params: Keyword. Value pairs, representing parameter_name: value.

        

        Other Parameters:

            fixed: optional. A dictionary {parameter_name: boolean} of parameters to not be varied during fitting. True means the parameter is held fixed. 
            Alternatively the fixed property of a parameter may be used.

            tied: optional. A dictionary {parameter_name: callable} of parameters which are linked to some other parameter.
            The dictionary values are callables providing the linking relationship.
            Alternatively the tied property of a parameter may be used.
   
            bounds: optional. A dictionary {parameter_name: value} of lower and upper bounds of parameters. Keys are parameter names. 
            Values are a list or a tuple of length 2 giving the desired range for the parameter. 
            Alternatively, the min and max properties of a parameter may be used.

            eqcons: optional.  A list of functions of length n such that eqcons[j](x0,*args) == 0.0 in a successfully optimized problem.

       
            ineqcons: optional. A list of functions of length n such that ieqcons[j](x0,*args) >= 0.0 is a successfully optimized problem.
        """

            Model = ginit + p_init
            """ The Model(function) returns the sum of a Gaussian and 2nd order Polynomial """

            # Define 6th order Polynomial function
            Poly = models.Polynomial1D(5,
                                       window=[-10, 10],
                                       fixed={
                                           'c3': True,
                                           'c4': True
                                       })
            Poly.parameters = [1, 1, 1, 1, 1, 50]

            # Define Exponential function
            @custom_model
            def func_exponential(x, t1=1., t2=1.):
                return (np.exp(t1 - x / t2))

            exp = func_exponential(t1=1., t2=1.)
            """
        Purpose: Exponential function

        Category: spectral fitting

        Inputs:
        t0 - Normalization
        t1 - Pseudo temperature

        Outputs:
        result of function, exponential
        """

            # Define Single Power Law Times an Exponential
            @custom_model
            def func_exponential_powerlaw(x,
                                          p0=1.,
                                          p1=1.,
                                          p2=1.,
                                          e3=1.,
                                          e4=1.):
                return ((p0 * (x / p2)**p1) * (np.exp(e3 - x / e4)))

            exp_powerlaw = func_exponential_powerlaw(p0=1.,
                                                     p1=3.,
                                                     p2=50.,
                                                     e3=1.,
                                                     e4=1.,
                                                     fixed={'p2': True})
            """
        Purpose: single power - law times an exponential

        Category: spectral fitting

        Inputs:
        p - first 3 parameters describe the single power - law, e - describes the exponential
 
        p0 = noramlization at epivot for power - law
        p1 = negative power - law index
        p2 = epivot (keV) for power - law

        e3 = normalization for exponential
        e4 = pseudo temperature for exponential

        Outputs:
        result of function, a power - law times an exponential
        """

            ######################### Define the functions for Rate ###############################

            # If user select the Rate in Plot Units and PowerLaw1D in Choose Fit Function Model, plot:
            if (self.var.get() == 'Rate') & (self.lbox.curselection()[0] == 0):
                gPLRate = fitg1(PowerLaw1D, x, y1, weights=1.0 / y1)
                print(gPLRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         gPLRate(x),
                         drawstyle='steps-post',
                         color='red',
                         label="PowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100,
                         ymin=0.1)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.legend(loc=2)
                plt.title('Rate Fitting using 1D Power Law Model')
                plt.show()
                # print('RATE & PowerLaw1D')

            # If user select Rate in Plot Units and BrokenPowerLaw1D in Choose Fit Function Model, plot:
            elif (self.var.get() == 'Rate') & (self.lbox.curselection()[0]
                                               == 1):
                gBPLRate = fitg1(BrokenPowerLaw1D, x, y1, weights=1.0 / y1)
                print(gBPLRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         gBPLRate(x),
                         drawstyle='steps-post',
                         color='red',
                         label="BrokenPowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.legend(loc=2)
                plt.title('Rate Fitting using 1D Broken Power Law Model')
                plt.show()
                # print('RATE & BrokenPowerLaw1D')

            # If user select Rate in Plot Units and Gaussian in Choose Fit Function Model:
            elif (self.var.get() == 'Rate') & (self.lbox.curselection()[0]
                                               == 2):
                gaussianRate = fitg1(Model, x, y1, weights=1.0 / y1)
                print(gaussianRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         gaussianRate(x),
                         drawstyle='steps-pre',
                         label='Gaussian')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.title('Rate Fitting using Gaussian Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Rate in Plot Units and Polynomial in Choose Fit Function Model:
            elif (self.var.get() == 'Rate') & (self.lbox.curselection()[0]
                                               == 3):
                PolyRate = fitg1(Poly, x, y1, weights=1.0 / y1)
                print(PolyRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         PolyRate(x),
                         drawstyle='steps-pre',
                         label='Polynomial')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.title('Rate Fitting using Polynomial Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Rate in Plot Units and Exponential in Choose Fit Function Model:
            elif (self.var.get() == 'Rate') & (self.lbox.curselection()[0]
                                               == 4):
                expRate = fitg1(exp, x, y1)
                print(expRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         expRate(x),
                         drawstyle='steps-pre',
                         label='Exponential')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.title('Rate Fitting using Exponential Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Rate in Plot Units and Exponential Power Law in Choose Fit Function Model:
            elif (self.var.get() == 'Rate') & (self.lbox.curselection()[0]
                                               == 5):
                ExpPLRate = fitg1(exp_powerlaw, x, y1, weights=1.0 / y1)
                print(ExpPLRate)
                plt.figure()
                plt.plot(x, y1, drawstyle='steps-post', label="Rate")
                plt.plot(x,
                         ExpPLRate(x),
                         drawstyle='steps-post',
                         color='red',
                         label="ExpPowerLaw")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=100, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Rate(Counts/s)')
                plt.legend(loc=2)
                plt.title('Rate Fitting using Exponential Power Law Model')
                plt.show()

######################### Define the functions for Counts ###############################

# If user select Counts in Plot Units and PowerLaw1D in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 0):
                gPLCounts = fitg1(PowerLaw1D, x, y2, weights=1.0 / y2)
                print(gPLCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         gPLCounts(x),
                         drawstyle='steps-post',
                         color='red',
                         label="PowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000,
                         ymin=0.1)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.legend(loc=2)
                plt.title('Counts Fitting using 1D Power Law Model')
                plt.show()
                # print('COUNTS & PowerLaw1D')

            # If user select Counts in Plot Units and BrokenPowerLaw1D in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 1):
                gBPLCounts = fitg1(BrokenPowerLaw1D, x, y2, weights=1.0 / y2)
                print(gBPLCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         gBPLCounts(x),
                         drawstyle='steps-post',
                         color='red',
                         label="BrokenPowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.legend(loc=2)
                plt.title('Counts Fitting using 1D Broken Power Law Model')
                plt.show()
                # print('COUNTS & BrokenPowerLaw1D')

            # If user select Counts in Plot Units and Gaussian in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 2):
                gaussianCounts = fitg1(Model, x, y2, weights=1.0 / y2)
                print(gaussianCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         gaussianCounts(x),
                         drawstyle='steps-pre',
                         label='Gaussian')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.title('Counts Fitting using Gaussian Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Counts in Plot Units and Polynomial in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 3):
                PolyCounts = fitg1(Poly, x, y2, weights=1.0 / y2)
                print(PolyCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         PolyCounts(x),
                         drawstyle='steps-pre',
                         label='Polynomial')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.title('Counts Fitting using Polynomial Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Counts in Plot Units and Exponential in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 4):
                expCounts = fitg1(exp, x, y2, weights=1.0 / y2)
                print(expCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         expCounts(x),
                         drawstyle='steps-pre',
                         label='Exponential')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.title('Counts Fitting using Exponential Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Counts in Plot Units and Exponential Power Law in Choose Fit Function Model:
            elif (self.var.get() == 'Counts') & (self.lbox.curselection()[0]
                                                 == 5):
                ExpPLCounts = fitg1(exp_powerlaw, x, y2, weights=1.0 / y2)
                print(ExpPLCounts)
                plt.figure()
                plt.plot(x, y2, drawstyle='steps-post', label="Counts")
                plt.plot(x,
                         ExpPLCounts(x),
                         drawstyle='steps-post',
                         color='red',
                         label="ExpPowerLaw")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1000, ymin=0.1)
                plt.xlabel('Energy(keV)')
                plt.ylabel('Counts(Counts)')
                plt.legend(loc=2)
                plt.title('Counts Fitting using Exponential Power Law Model')
                plt.show()

######################### Define the functions for Flux ###############################

# If user select Flux in Plot Units and PowerLaw1D in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 0):
                gPLFlux = fitg1(PowerLaw1D, x, y3, weights=1.0 / y3)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         gPLFlux(x),
                         drawstyle='steps-post',
                         color='red',
                         label="PowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.legend(loc=2)
                plt.title('Flux Fitting using 1D Power Law Model')
                plt.show()
                # print('FLUX & PowerLaw1D')

            # If user select Flux in Plot Units and BrokenPowerLaw1D in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 1):

                # Apply Levenberg - Marquandt algorithm
                gBPLFlux = fitg1(BrokenPowerLaw1D, x, y3, weights=1.0 / y3)
                print(gBPLFlux)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         gBPLFlux(x),
                         drawstyle='steps-post',
                         color='red',
                         label="BrokenPowerLaw1D")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.legend(loc=2)
                plt.title('Flux Fitting using 1D Broken Power Law Model')
                plt.show()
                # print('FLUX & BrokenPowerLaw1D')

            # If user select Flux in Plot Units and Gaussian in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 2):
                gaussianFlux = fitg1(Model, x, y3, weights=1.0 / y3)
                print(gaussianFlux)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         gaussianFlux(x),
                         drawstyle='steps-pre',
                         label='Gaussian')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.title('Flux Fitting using Gaussian Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Flux in Plot Units and Polynomial in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 3):
                PolyFlux = fitg1(Poly, x, y3, weights=1.0 / y3)
                print(PolyFlux)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         PolyFlux(x),
                         drawstyle='steps-pre',
                         label='Polynomial')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.title('Flux Fitting using Polynomial Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Flux in Plot Units and Exponential in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 4):
                expFlux = fitg1(exp, x, y3)
                print(expFlux)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         expFlux(x),
                         drawstyle='steps-pre',
                         label='Exponential')
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.title('Flux Fitting using Exponential Model')
                plt.legend(loc=2)
                plt.show()

            # If user select Flux in Plot Units and Exponential Power Law in Choose Fit Function Model:
            elif (self.var.get() == 'Flux') & (self.lbox.curselection()[0]
                                               == 5):
                ExpPLFlux = fitg1(exp_powerlaw, x, y3, weights=1.0 / y3)
                print(ExpPLFlux)
                plt.figure()
                plt.plot(x, y3, drawstyle='steps-post', label="Flux")
                plt.plot(x,
                         ExpPLFlux(x),
                         drawstyle='steps-post',
                         color='red',
                         label="ExpPowerLaw")
                plt.yscale('log')
                plt.xscale('log')
                plt.ylim(ymax=1,
                         ymin=0.0001)  #FIXME: find a solution for general case
                plt.xlabel('Energy(keV)')
                plt.ylabel('Flux(Counts/s cm(-2) keV(-1))')
                plt.legend(loc=2)
                plt.title('Flux Fitting using Exponential Power Law Model')
                plt.show()
예제 #19
0
from stingray.modeling import PSDParEst

import astropy.units as u
from astropy.time import Time
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from spectral_models import LogLorentz1D
from spectral_model_parameter_estimators import InitialParameterEstimatePlC

# Output
directory = os.path.expanduser('~/Data/ts/project_data/test_dask2_output')
if not os.path.exists(directory):
    os.makedirs(directory)

# Power law component
power_law = models.PowerLaw1D()
power_law.amplitude.min = 0.0
power_law.amplitude.max = None
power_law.alpha.min = 0.0
power_law.alpha.max = 4.0

# fix x_0 of power law component
power_law.x_0.fixed = True

# Constant component
constant = models.Const1D()
constant.amplitude.min = 0.0
constant.amplitude.max = None

# Lorentz component
#log_lorentz = LogLorentz1D()
예제 #20
0
    def setup_class(cls):
        m = 1
        nfreq = 100000
        freq = np.linspace(1, 1000, nfreq)

        np.random.seed(100)  # set the seed for the random number generator
        noise = np.random.exponential(size=nfreq)

        cls.model = models.PowerLaw1D() + models.Const1D()
        cls.model.x_0_0.fixed = True

        cls.alpha_0 = 2.0
        cls.amplitude_0 = 100.0
        cls.amplitude_1 = 2.0

        cls.model.alpha_0 = cls.alpha_0
        cls.model.amplitude_0 = cls.amplitude_0
        cls.model.amplitude_1 = cls.amplitude_1

        p = cls.model(freq)
        power = noise * p

        ps = Powerspectrum()
        ps.freq = freq
        ps.power = power
        ps.m = m
        ps.df = freq[1] - freq[0]
        ps.norm = "leahy"

        cls.ps = ps
        cls.a_mean, cls.a_var = 2.0, 1.0
        cls.a2_mean, cls.a2_var = 100.0, 10.0

        p_amplitude_1 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)

        p_alpha_0 = lambda alpha: \
            scipy.stats.uniform(0.0, 5.0).pdf(alpha)

        p_amplitude_0 = lambda amplitude: \
            scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(
                amplitude)

        cls.priors = {
            "amplitude_1": p_amplitude_1,
            "amplitude_0": p_amplitude_0,
            "alpha_0": p_alpha_0
        }

        cls.lpost = PSDPosterior(cls.ps, cls.model)
        cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)

        cls.fitmethod = "BFGS"
        cls.max_post = True
        cls.t0 = [cls.amplitude_0, cls.alpha_0, cls.amplitude_1]
        cls.neg = True
        cls.opt = scipy.optimize.minimize(cls.lpost,
                                          cls.t0,
                                          method=cls.fitmethod,
                                          args=cls.neg,
                                          tol=1.e-5)

        cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
                                                      cls.opt,
                                                      neg=True)
예제 #21
0
    def estimate_galactic_extinction(self, ax=None, r_v: float = 3.1, **kwargs):
        import extinction
        if ax is None:
            fig, ax = plt.subplots()
        if "marker" not in kwargs:
            kwargs["marker"] = "x"

        self.retrieve_extinction_table()
        lambda_eff_tbl = self.irsa_extinction["LamEff"].to(
            units.Angstrom)
        power_law = models.PowerLaw1D()
        fitter = fitting.LevMarLSQFitter()
        fitted = fitter(power_law, lambda_eff_tbl, self.irsa_extinction["A_SandF"].value)

        tbl = self.photometry_to_table(fmts=["ascii.ecsv", "ascii.csv"])

        x = np.linspace(0, 80000, 1000) * units.Angstrom

        a_v = (r_v * self.ebv_sandf).value

        tbl["ext_gal_sandf"] = extinction.fitzpatrick99(tbl["lambda_eff"], a_v, r_v) * units.mag
        tbl["ext_gal_pl"] = fitted(tbl["lambda_eff"]) * units.mag
        tbl["ext_gal_interp"] = np.interp(
            tbl["lambda_eff"],
            lambda_eff_tbl,
            self.irsa_extinction["A_SandF"].value
        ) * units.mag

        ax.plot(
            x, extinction.fitzpatrick99(x, a_v, r_v),
            label="S\&F + F99 extinction law",
            c="red"
        )
        ax.plot(
            x, fitted(x),
            label=f"power law fit to IRSA",
            # , \\alpha={fitted.alpha.value}; $x_0$={fitted.x_0.value}; A={fitted.amplitude.value}",
            c="blue"
        )
        ax.scatter(
            lambda_eff_tbl, self.irsa_extinction["A_SandF"].value,
            label="from IRSA",
            c="green",
            **kwargs)
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_pl"].value,
            label="power law interpolation of IRSA",
            c="blue",
            **kwargs
        )
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_interp"].value,
            label="numpy interpolation from IRSA",
            c="violet",
            **kwargs
        )
        ax.scatter(
            tbl["lambda_eff"], tbl["ext_gal_sandf"].value,
            label="S\&F + F99 extinction law",
            c="red",
            **kwargs
        )
        ax.set_ylim(0, 0.6)
        ax.legend()
        plt.savefig(os.path.join(self.data_path, f"{self.name_filesys}_irsa_extinction.pdf"))
        plt.close()
        self.extinction_power_law = {
            "amplitude": fitted.amplitude.value * fitted.amplitude.unit,
            "x_0": fitted.x_0.value,
            "alpha": fitted.alpha.value
        }

        for row in tbl:
            instrument = row["instrument"]
            band = row["band"]
            epoch_name = row["epoch_name"]

            # if row["lambda_eff"] > max(lambda_eff_tbl) or row["lambda_eff"] < min(lambda_eff_tbl):
            #     key = "ext_gal_pl"
            #     self.photometry[instrument][band]["ext_gal_type"] = "power_law_fit"
            # else:
            #     key = "ext_gal_interp"
            #     self.photometry[instrument][band]["ext_gal_type"] = "interpolated"
            key = "ext_gal_sandf"
            self.photometry[instrument][band][epoch_name]["ext_gal_type"] = "s_and_f"
            self.photometry[instrument][band][epoch_name]["ext_gal"] = row[key]
            self.photometry[instrument][band][epoch_name]["mag_ext_corrected"] = row["mag"] - row[key]
            if "mag_sep" in row.colnames:
                self.photometry[instrument][band][epoch_name]["mag_sep_ext_corrected"] = row["mag_sep"] - row[key]

        # tbl_2 = self.photometry_to_table()
        # tbl_2.update(tbl)
        # tbl_2.write(self.build_photometry_table_path().replace("photometry", "photemetry_extended"))
        self.update_output_file()
        return ax