Beispiel #1
0
 def simulation(imc):
     # print(imc, "/", montecarlo)
     fnumax = (np.array([random.gauss(0,1) for i in range(Ndata)]) * numax_perturb[inumax] + 1)
     fdnu = (np.array([random.gauss(0,1) for i in range(Ndata)]) * dnu_perturb[idnu] + 1)
     xdata, ydata = (numax*fnumax), (dnu*fdnu)
     
     dist = distance_to_edge(xdata, ydata, xedge_pdv, yedge_pdv, tck_pdv, diagram=diagram, distance=distance)
     obj = distfit(dist, hist_model, bins=obj_obs.bins)
     obj.fit()
     sharpness_pdv = hist_model.sharpness(obj.para_fit)
     return xdata, ydata, obj, sharpness_pdv
Beispiel #2
0
def model(theta):  #, obj_obs, xpdv, ypdv):
    # tied to model6
    weight = np.zeros(obj_obs.histx.shape, dtype=bool)
    sigma, x0 = obj_obs.para_fit[0], obj_obs.para_fit[1]
    idx = (obj_obs.histx <= x0 + sigma) & (obj_obs.histx >= x0 - 2 * sigma)
    weight[idx] = True

    # theta[0]: offset in distance
    # theta[1]: perturb

    Ndata = xpdv.shape[0]

    if (e_xobs is None):
        fx = np.zeros(Ndata)
    else:
        # fx1 = np.array([random.gauss(0,1) for i in range(Ndata)]) * 10.0**scipy.signal.resample(np.log10(e_xobs), Ndata) * scalar
        fx2 = np.array([random.gauss(0, 1) for i in range(Ndata)]) * theta[1]
        fx = fx2
    if (e_yobs is None):
        fy = np.zeros(Ndata)
    else:
        # fy1 = np.array([random.gauss(0,1) for i in range(Ndata)]) * 10.0**scipy.signal.resample(np.log10(e_yobs), Ndata) * scalar
        fy2 = np.array([random.gauss(0, 1) for i in range(Ndata)]) * theta[1]
        fy = fy2

    # disturb with artificial scatter
    xdata, ydata = (xpdv + xpdv * (fx)), (ypdv + ypdv * (fy))

    hdist, xdata, ydata = distance_to_edge(xdata,
                                           ydata,
                                           xedge_pdv,
                                           yedge_pdv,
                                           tck_pdv,
                                           tp_pdv,
                                           diagram=diagram,
                                           distance=distance)
    hdist = hdist + theta[0]
    obj = distfit(hdist, hist_model, bins=obj_obs.bins)

    # normalize the number of points in the weighted region
    histy = obj.histy / np.sum(obj.histy[weight]) * np.sum(
        obj_obs.histy[weight])
    return histy, weight, xdata, ydata
Beispiel #3
0
                                                              1], tnu_samples_obs[:,
                                                                                  2]
    idx = (xobs**0.75 / yobs >= ycut) & (yobs >= np.min(yedge_obs)
                                         )  #& (yobs<=3.5)
    idx = idx & (feh_obs >= feh_limits[ifeh]) & (feh_obs <
                                                 feh_limits[ifeh + 1])
    xobs, yobs, feh_obs = xobs[idx], yobs[idx], feh_obs[idx]

    hdist_obs = distance_to_edge(xobs,
                                 yobs,
                                 xedge_obs,
                                 yedge_obs,
                                 tck_obs,
                                 diagram=diagram,
                                 distance=distance)
    obj_obs = distfit(hdist_obs, hist_model)
    obj_obs = distfit(hdist_obs, hist_model, bins=obj_obs.bins)
    obj_obs.fit()
    sharpness_obs = hist_model.sharpness(obj_obs.para_fit)
    Nobs = hdist_obs.shape[0]

    # set up models
    xedge_pdv, yedge_pdv = tnu_edges_pdv[:, 0], tnu_edges_pdv[:, 1]
    idx = (xedge_pdv**0.75 / yedge_pdv >= ycut)  #& (yedge<=3.5)
    xedge_pdv, yedge_pdv = xedge_pdv[idx], yedge_pdv[idx]

    numax, dnu, feh_pdv = tnu_samples_pdv[:,
                                          0], tnu_samples_pdv[:,
                                                              1], tnu_samples_pdv[:,
                                                                                  2]
    idx = (numax**0.75 / dnu >= ycut) & (dnu >= np.min(yedge_pdv)
Beispiel #4
0
    # edge
    xedge_obs, yedge_obs = mr_edges_obs[:, 0], mr_edges_obs[:, 1]

    filepath = rootpath + "sample/obs/sharpness/"
    if not os.path.exists(filepath): os.mkdir(filepath)

    # calculate observational distance
    dist = distance_to_edge(xobs,
                            yobs,
                            xedge_obs,
                            yedge_obs,
                            tck_obs,
                            diagram=diagram,
                            distance=distance)
    # dist = dist[(dist>-0.2) ]#& (dist<4)]
    obj_obs = distfit(dist, hist_model)

    hist_model.set_priors(obj_obs.histx, obj_obs.histy, dist)
    prior_guess = hist_model.prior_guess
    para_guess = hist_model.para_guess

    def lnprior(theta):
        for ip in range(len(prior_guess)):
            if not (prior_guess[ip][0] <= theta[ip] <= prior_guess[ip][1]):
                return -np.inf
        return 0.

    def lnlikelihood(theta):
        # return np.sum(np.log(hist_model.ymodel(theta, dist)))
        # sig = obj_obs.histy
        sig = 1
Beispiel #5
0
def fit(filepath, distance, diagram, hist_model, edges, tck, xobs, yobs):
        if not os.path.exists(filepath): os.mkdir(filepath)

        # edges
        xedge, yedge = edges[:,0], edges[:,1]

        # calculate observational distance
        dist = distance_to_edge(xobs, yobs, xedge, yedge, tck, diagram=diagram, distance=distance)
        # dist = dist[dist>0.]
        # dist = dist[(dist>-0.2) ]#& (dist<4)]
        obj_obs = distfit(dist, hist_model, density=True)

        hist_model.set_priors(obj_obs.histx, obj_obs.histy, dist)
        prior_guess = hist_model.prior_guess
        para_guess = hist_model.para_guess
        def lnprior(theta):
                for ip in range(len(prior_guess)):
                        if not (prior_guess[ip][0] <= theta[ip] <= prior_guess[ip][1]):
                                return -np.inf
                return 0.

        def lnlikelihood(theta):
                return np.sum(np.log(hist_model.ymodel(theta, dist)))
                # chi2 = (hist_model.ymodel(theta, obj_obs.histx) - obj_obs.histy)**2.0/2.0
                # return -np.sum((chi2))
        
        def lnpost(theta):
                lp = lnprior(theta)
                if not np.isfinite(lp):
                        return -np.inf
                else:
                        return lnlikelihood(theta)

        def minus_lnpost(theta):
                lp = lnprior(theta)
                if not np.isfinite(lp):
                        return np.inf
                else:
                        return -lnlikelihood(theta)

        ndim, nwalkers, nburn, nsteps = 3, 500, 1000, 1000

        ifmle = True
        if ifmle:
            # mle
            res = minimize(minus_lnpost, para_guess)
            para_fit = res.x

            xfit = np.linspace(obj_obs.histx.min(), obj_obs.histx.max(), 500)
            yfit = hist_model.ymodel(para_fit, xfit)

        else:
            # run mcmc with ensemble sampler
            print("enabling Ensemble sampler.")
            pos0 = [para_guess + 1.0e-6*np.random.randn(ndim) for j in range(nwalkers)]
            sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)

            # # burn-in
            print("start burning in. nburn:", nburn)
            for j, result in enumerate(sampler.sample(pos0, iterations=nburn, thin=10)):
                    display_bar(j, nburn)
            sys.stdout.write("\n")
            pos, lnpost, rstate = result
            sampler.reset()

            # actual iteration
            print("start iterating. nsteps:", nsteps)
            for j, result in enumerate(sampler.sample(pos, iterations=nsteps)):
                    display_bar(j, nsteps)
            sys.stdout.write("\n")

            # modify samples
            samples = sampler.chain[:,:,:].reshape((-1,ndim))

            # save estimation result
            # 16, 50, 84 quantiles
            result = np.array(list(map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                    zip(*np.percentile(samples, [16, 50, 84],axis=0)))))
            para_fit = result[:,0]

            result = np.concatenate([result, para_fit.reshape(ndim,1)], axis=1)

            # save acceptance fraction
            # acceptance_fraction = np.array([np.mean(sampler.acceptance_fraction)])
            # np.savetxt(filepath+"acceptance_fraction.txt", acceptance_fraction, delimiter=",", fmt=("%0.8f"), header="acceptance_fraction")

            # save samples if the switch is toggled on
            if False: np.save(filepath+"samples.npy", samples)

            # save guessed parameters
            # np.savetxt(filepath+"guess.txt", para_guess, delimiter=",", fmt=("%0.8f"), header="para_guess")

            # plot triangle and save
            para_names = hist_model.para_name
            fig = corner.corner(samples, labels=para_names, quantiles=(0.16, 0.5, 0.84))
            fig.savefig(filepath+"triangle.png")
            plt.close()

            # plot traces and save
            fig = plot_mcmc_traces(ndim, samples, para_names)
            plt.savefig(filepath+'traces.png')
            plt.close()

            # save estimation result
            np.savetxt(filepath+"summary.txt", result, delimiter=",", fmt=("%0.8f", "%0.8f", "%0.8f", "%0.8f"), header="50th quantile, 16th quantile sigma, 84th quantile sigma, maximum")

            xfit = np.linspace(obj_obs.histx.min(), obj_obs.histx.max(), 500)
            yfit = hist_model.ymodel(para_fit, xfit)

        # plot fitting results and save
        fig = plt.figure(figsize=(12,6))
        axes = fig.subplots(nrows=1, ncols=1)
        axes.hist(dist,density=True,bins=250)
        obj_obs.plot_hist(ax=axes, histkwargs={"color":"red", "label":"Kepler"})
        axes.plot(xfit, yfit, "k--", label="Kepler fit")
        # obj_obs.plot_fit(ax=axes, theta=para_fit, fitkwargs={"color":"black", "label":"Kepler fit"})

        axes.grid(True)
        axes.set_xlim(obj_obs.histx.min(), obj_obs.histx.max())
        axes.set_ylim(0., obj_obs.histy.max()*1.5)
        axes.legend()
        plt.savefig(filepath+"fitmedian.png")
        plt.close()