예제 #1
0
def plot_posteriors(outdir, N_gauss):
    """
    Plots posteriors using pair_posterior code
    """
    tab = load_results(outdir, N_gauss)
    weights = tab["weights"]
    tab.remove_columns(("weights", "logLike"))
    fileUtil.mkdir(outdir + "/plots/")

    # Number of gaussians determines which pair_posterior code is used
    pair_posterior(tab, weights, N_gauss, outfile=outdir + "/plots/posteriors.png", title=outdir)

    return
예제 #2
0
def plot_posteriors(outdir, N_gauss):
    """
    Plots posteriors using pair_posterior code
    """
    tab = load_results(outdir, N_gauss)
    weights = tab['weights']
    tab.remove_columns(('weights', 'logLike'))
    fileUtil.mkdir(outdir + '/plots/')

    # Number of gaussians determines which pair_posterior code is used
    pair_posterior(tab, weights, N_gauss, outfile=outdir+'/plots/posteriors.png', title=outdir)
        
    return
예제 #3
0
def run(catalogfile, vel_err, mag_err, N_gauss, outdir, rotate=True):
    """
    PyMultiNest run to determine cluster membership, using PM catalog and
    applying vel_err and mag_err cuts. Output is put in newly-created outdir
    directory (must be a string).

    Parameters:
    catalogflie --> String containing the name of a FITS catalog.
    vel_err --> The maximum allowed velocity error for stars to be included.
    mag_err --> The maximum allowed magnitude error for stars to be included.
    N_gauss --> number bivariate gaussian, where N_gauss <= 4
    outdir --> The output directory name.
    
    Keywords:
    rotate = 1 --> rotate star velocities into RA/DEC format, as opposed
    to X,Y
    """
    # Load data for full field, extract velocities (already converted to mas)
    d = loadData(catalogfile, vel_err, mag_err, rotate=rotate)

    star_Vx = d["fit_vx"]
    star_Vy = d["fit_vy"]
    star_Sigx = d["fit_vxe"]
    star_Sigy = d["fit_vye"]

    N_stars = len(d)

    def print_param(pname, val, logp, headerFirst=False):
        rowHead = "{0:6s}  "
        colHead = " val_{0} (  logp_{0} )"
        colVal = "{0:6.3f} ({1:9.2e})"

        if headerFirst:
            outhdr = "  ".join([colHead.format(k) for k in range(N_gauss)])
            print rowHead.format("") + outhdr

        outstr = "  ".join([colVal.format(val[k], logp[k]) for k in range(N_gauss)])
        print rowHead.format(pname) + outstr

        return

    def priors(cube, ndim, nparams):
        return

    def likelihood(cube, ndim, nparams):
        """
        Define the likelihood function (from Clarkson+12, Hosek+15)
        """
        # start the timer
        t0 = time.time()

        ####################
        # Set up model params
        ####################
        # Number of parameters per Gaussian:
        N_per_gauss = 6

        # Make arrays for the paramters of each Gaussian
        pi = np.arange(N_gauss, dtype=float)
        vx = np.arange(N_gauss, dtype=float)
        vy = np.arange(N_gauss, dtype=float)
        sigA = np.arange(N_gauss, dtype=float)
        sigB = np.arange(N_gauss, dtype=float)
        theta = np.arange(N_gauss, dtype=float)

        # Make arrays for the prior probability of each paramter
        logp_pi = np.arange(N_gauss, dtype=float)
        logp_vx = np.arange(N_gauss, dtype=float)
        logp_vy = np.arange(N_gauss, dtype=float)
        logp_sigA = np.arange(N_gauss, dtype=float)
        logp_sigB = np.arange(N_gauss, dtype=float)
        logp_theta = np.arange(N_gauss, dtype=float)

        # Set the fraction of stars in each Gaussian
        for kk in range(N_gauss):
            pi[kk], logp_pi[kk] = random_pi(cube[kk * N_per_gauss + 0])

        # Make sure all the sum(pi) = 1.
        pi /= pi.sum()

        # Sort the field pi values such that they are always ranked from
        # smallest to largest.
        sidx = pi[1:].argsort()
        pi[1:] = pi[1:][sidx]
        logp_pi[1:] = logp_pi[1:][sidx]

        # Re-set the cube values. Note this is AFTER sorting.
        for kk in range(N_gauss):
            cube[kk * N_per_gauss + 0] = pi[kk]

        # Set the other Gaussian parameters.
        for kk in range(N_gauss):
            # Treat the cluster gaussian (the first, most compact one)
            # with a special prior function.
            if kk == 0:
                rand_vx = random_clust_vx
                rand_vy = random_clust_vy
                rand_sigA = random_clust_sigA
                rand_sigB = random_clust_sigB
                rand_theta = random_clust_theta
            else:
                rand_vx = random_v
                rand_vy = random_v
                rand_sigA = random_sig
                rand_sigB = random_sig
                rand_theta = random_theta

            # Velocity centr
            vx[kk], logp_vx[kk] = rand_vx(cube[kk * N_per_gauss + 1])
            cube[kk * N_per_gauss + 1] = vx[kk]

            vy[kk], logp_vy[kk] = rand_vy(cube[kk * N_per_gauss + 2])
            cube[kk * N_per_gauss + 2] = vy[kk]

            # Major axis
            sigA[kk], logp_sigA[kk] = rand_sigA(cube[kk * N_per_gauss + 3])
            cube[kk * N_per_gauss + 3] = sigA[kk]

            # Minor axis
            sigB[kk], logp_sigB[kk] = rand_sigB(cube[kk * N_per_gauss + 4])
            cube[kk * N_per_gauss + 4] = sigB[kk]

            # Angle of major axis (in radians)
            theta[kk], logp_theta[kk] = rand_theta(cube[kk * N_per_gauss + 5])
            cube[kk * N_per_gauss + 5] = theta[kk]

            # Only want to consider gaussians where Sig A > Sig B
            if sigB[kk] > sigA[kk]:
                # print '#######################'
                # print '#######################'
                # print '#######################'
                # print '#######################'
                return -np.Inf

            # Check that all our prior probabilities are valid, otherwise abort
            # before expensive calculation.
            if (
                (logp_pi[kk] == -np.inf)
                or (logp_vx[kk] == -np.inf)
                or (logp_vy[kk] == -np.inf)
                or (logp_sigA[kk] == -np.inf)
                or (logp_sigB[kk] == -np.inf)
                or (logp_theta[kk] == -np.inf)
            ):
                return -np.Inf

        ################################
        # Calculating likelihood function
        #  Likelihood =
        #    \Sum(i=0 -> N_stars) \Sum(k=0 -> N_gauss)
        #        \pi_k * (2 \pi |\Sigma_k,i|)^{-1/2} *
        #        exp[ -1/2 * (\mu_i - \mu_k)^T \sigma_k,i (\mu_i - \mu_k) ]
        ################################
        # Keep track of the probability for each star, each gaussian
        # component. We will add over components and multiply over stars.
        prob_gauss = np.zeros((N_gauss, N_stars), dtype=float)

        # L_{i,k}  Loop through the different gaussian components.
        for kk in range(N_gauss):
            # N_stars long array
            prob_gauss[kk, :] = prob_ellipse(
                star_Vx, star_Vy, star_Sigx, star_Sigy, pi[kk], vx[kk], vy[kk], sigA[kk], sigB[kk], theta[kk]
            )

        # For each star, the total likelihood is the sum
        # of each component (before log).
        L_star = prob_gauss.sum(axis=0)  # This array should be N-stars long
        logL_star = np.log10(L_star)

        # Final likelihood
        logL = logL_star.sum()
        logL_tmp = logL

        # Add in log(prior probabilities) as well
        for kk in range(N_gauss):
            logL += logp_pi[kk]
            logL += logp_vx[kk]
            logL += logp_vy[kk]
            logL += logp_sigA[kk]
            logL += logp_sigB[kk]
            logL += logp_theta[kk]

        # Some printing
        print "*** logL = {0:9.2e}   w/priors = {1:9.2e}".format(logL_tmp, logL)

        print_param("pi", pi, logp_pi, headerFirst=True)
        print_param("vx", vx, logp_vx)
        print_param("vy", vy, logp_vy)
        print_param("sigA", sigA, logp_sigA)
        print_param("sigB", sigB, logp_sigB)
        print_param("theta", theta, logp_theta)

        t1 = time.time()

        total = t1 - t0

        print "TIME SPENT: " + str(total)
        # pdb.set_trace()
        return logL

    #########################################
    # End Likelihoods
    # Begin running multinest
    #########################################
    # Make new directory to hold output
    fileUtil.mkdir(outdir)
    outroot = outdir + "/mnest_"

    num_dims = 2 * 3 * N_gauss
    num_params = num_dims
    ev_tol = 0.3
    samp_eff = 0.8
    n_live_points = 300

    # Create param file
    _run = open(outroot + "params.run", "w")
    _run.write("Catalog: %s\n" % catalogfile)
    _run.write("Vel Err Cut: %.2f\n" % vel_err)
    _run.write("Mag Err Cut: %.2f\n" % mag_err)
    _run.write("Rotate: %s\n" % str(rotate))
    _run.write("Num Gauss: %d\n" % N_gauss)
    _run.write("Num Dimensions: %d\n" % num_dims)
    _run.write("Num Params: %d\n" % num_params)
    _run.write("Evidence Tolerance: %.1f\n" % ev_tol)
    _run.write("Sampling Efficiency: %.1f\n" % samp_eff)
    _run.write("Num Clustering Params: %d\n" % num_dims)
    _run.write("Num Live Points: %d\n" % n_live_points)
    _run.close()

    # Run multinest
    pymultinest.run(
        likelihood,
        priors,
        num_dims,
        n_params=num_params,
        outputfiles_basename=outroot,
        verbose=True,
        resume=False,
        evidence_tolerance=ev_tol,
        sampling_efficiency=samp_eff,
        n_live_points=n_live_points,
        multimodal=True,
        n_clustering_params=num_dims,
        importance_nested_sampling=False,
    )

    return
예제 #4
0
def mass_posterior(tab, outdir, outfile, bins=50, xlim=None):

    # Make a histogram of the mass using the weights. This creates the
    # marginalized 1D posteriors.
    fontsize1 = 18
    fontsize2 = 14

    massMax = np.ceil(tab['Mass'].max())

    py.figure(1)
    py.clf()
    bins = np.arange(0, massMax, massMax / bins)
    n, foo, patch = py.hist(tab['Mass'],
                            normed=True,
                            histtype='step',
                            weights=tab['weights'],
                            bins=bins,
                            linewidth=1.5)
    bin_centers = (bins[:-1] + bins[1:]) / 2
    py.xlabel('Mass')
    xtitle = r'Lens Mass (M$_{\odot}$)'
    py.xlabel(xtitle, fontsize=fontsize1, labelpad=10)
    if (xlim == None):
        py.xlim(0, np.ceil(tab['Mass'].max()))
    else:
        py.xlim(xlim[0], xlim[1])
    py.ylabel('Relative probability', fontsize=fontsize1, labelpad=10)
    py.xticks(fontsize=fontsize2)
    py.yticks(fontsize=fontsize2)

    ##########
    # Calculate 3-sigma boundaries for mass limits.
    ##########
    sig1_hi = 0.682689
    sig1_lo = 1.0 - sig1_hi
    sig_med = 0.5
    sig2_hi = 0.9545
    sig2_lo = 1.0 - sig2_hi
    sig3_hi = 0.9973
    sig3_lo = 1.0 - sig3_hi

    quantiles = [sig3_lo, sig2_lo, sig1_lo, sig_med, sig1_hi, sig2_hi, sig3_hi]

    mass_quants = weighted_quantile(tab['Mass'],
                                    quantiles,
                                    sample_weight=tab['weights'])

    for qq in range(len(quantiles)):
        print 'Mass at {0:.1f}% quantiles:  M = {1:5.2f}'.format(
            quantiles[qq] * 100, mass_quants[qq])

    ax = py.axis()
    py.axvline(mass_quants[0], color='red', linestyle='--')
    py.text(mass_quants[0] + 0.05,
            0.9 * ax[3],
            'M>{0:5.2f} with 99.7% confidence'.format(mass_quants[0]),
            fontsize=12)
    py.axvline(mass_quants[-1], color='green', linestyle='--')
    py.text(mass_quants[-1] + 0.05,
            0.8 * ax[3],
            'M<{0:5.2f} with 99.7% confidence'.format(mass_quants[-1]),
            fontsize=12)

    ##########
    # Save figure
    ##########
    outfile = outdir + outfile
    fileUtil.mkdir(outdir)
    print 'writing plot to file ' + outfile

    py.savefig(outfile)

    return
예제 #5
0
def plot_OB120169(
        root_dir='/Users/jlu/work/microlens/OB120169/analysis_2016_09_14/',
        analysis_dir='analysis_ob120169_2016_09_14_a4_m22_w4_MC100/',
        points_dir='points_d/',
        mnest_dir='multiNest/up/',
        mnest_root='up'):

    target = 'OB120169_R'
    display_name = 'OB120169'

    #Plot astrometric observations
    pointsFile = root_dir + analysis_dir + points_dir + target + '.points'
    pointsTab = Table.read(pointsFile, format='ascii')
    tobs = pointsTab['col1']
    xpix = pointsTab['col2']
    ypix = pointsTab['col3']
    xerr = pointsTab['col4']
    yerr = pointsTab['col5']

    pixScale = 9.952
    thetaSx_data = xpix * pixScale
    thetaSy_data = ypix * pixScale
    xerr_data = xerr * pixScale
    yerr_data = yerr * pixScale

    # Overplot Best-Fit Model
    tab = load_mnest_results_OB120169(root_dir=root_dir + analysis_dir,
                                      mnest_dir=mnest_dir,
                                      mnest_root=mnest_root)
    params = tab.keys()
    pcnt = len(params)
    Masslim = 0.0

    # Clean table and only keep valid results.
    ind = np.where((tab['Mass'] >= Masslim))[0]
    tab = tab[ind]

    best = np.argmax(tab['logLike'])
    maxLike = tab['logLike'][best]
    print 'Best-Fit Solution:'
    print '  Index = {0:d}'.format(best)
    print '  Log(L) = {0:6.2f}'.format(maxLike)
    print '  Params:'
    for i in range(pcnt):
        print '     {0:15s}  {1:10.3f}'.format(params[i], tab[params[i]][best])

    t0 = tab['t0'][best]
    tE = tab['tE'][best]
    thetaS0x = tab['thetaS0x'][best]
    thetaS0y = tab['thetaS0y'][best]
    muSx = tab['muSx'][best]
    muSy = tab['muSy'][best]
    muRelx = tab['muRelx'][best]
    muRely = tab['muRely'][best]
    beta = tab['beta'][best]
    piEN = tab['piEN'][best]
    piEE = tab['piEE'][best]
    print '     muLx = {0:5.2f} mas/yr'.format(muSx - muRelx)
    print '     muLy = {0:5.2f} mas/yr'.format(muSy - muRely)

    ##########
    # Get astrometry for best-fit model. Do this on a fine time grid
    # and also at the points of the observations.
    ##########
    tmod = np.arange(t0 - 20.0, t0 + 20.0, 0.01)
    model = MCMC_LensModel.LensModel_Trial1(tmod, t0, tE, [thetaS0x, thetaS0y],
                                            [muSx, muSy], [muRelx, muRely],
                                            beta, [piEN, piEE])
    model_tobs = MCMC_LensModel.LensModel_Trial1(tobs, t0, tE,
                                                 [thetaS0x, thetaS0y],
                                                 [muSx, muSy],
                                                 [muRelx, muRely], beta,
                                                 [piEN, piEE])

    thetaS_model = model[0]
    thetaE_amp = model[1]
    M = model[2]
    shift = model[3]
    thetaS_nolens = model[4]

    thetaS_model_tobs = model_tobs[0]
    thetaE_amp_tobs = model_tobs[1]
    M_tobs = model_tobs[2]
    shift_tobs = model_tobs[3]
    thetaS_nolens_tobs = model_tobs[4]

    thetaSx_model = thetaS_model[:, 0]
    thetaSy_model = thetaS_model[:, 1]
    thetaS_nolensx = thetaS_nolens[:, 0]
    thetaS_nolensy = thetaS_nolens[:, 1]
    shiftx = shift[:, 0]
    shifty = shift[:, 1]

    thetaSx_model_tobs = thetaS_model_tobs[:, 0]
    thetaSy_model_tobs = thetaS_model_tobs[:, 1]

    ##########
    # Calculate Chi-Squared Stat for comparison to velocity-only fit.
    ##########
    # Find the model points closest to the data.
    dx = thetaSx_data - thetaSx_model_tobs
    dy = thetaSy_data - thetaSy_model_tobs
    chi2_x = ((dx / xerr_data)**2).sum()
    chi2_y = ((dy / yerr_data)**2).sum()
    N_dof = (2 * len(dx)) - 9
    chi2_red = (chi2_x + chi2_y) / N_dof
    print 'Chi-Squared of Lens Model Fit:'
    print '        X Chi^2 = {0:5.2f}'.format(chi2_x)
    print '        Y Chi^2 = {0:5.2f}'.format(chi2_y)
    print '  Reduced Chi^2 = {0:5.2f}'.format(chi2_red)

    ##########
    # Plotting
    ##########
    fontsize1 = 18
    fontsize2 = 14

    py.clf()
    fig1 = py.figure(2, figsize=(10, 8))
    py.subplots_adjust(left=0.1, top=0.95, hspace=0.3, wspace=0.3)

    paxes = py.subplot(2, 2, 1)
    originX = np.mean(thetaSx_model)
    originY = np.mean(thetaSy_model)
    py.plot(tmod - t0, thetaSx_model - originX, 'r-')
    py.plot(tmod - t0, thetaS_nolensx - originX, 'r--')
    py.errorbar(tobs - t0, thetaSx_data - originX, yerr=xerr_data, fmt='k.')
    py.ylabel(r'$\Delta\alpha$ (mas)', fontsize=fontsize1)
    py.xlabel('t - t$_{\mathrm{o}}$ (yr)', fontsize=fontsize1)
    py.ylim(-4, 12)
    py.xlim(-0.5, 6.0)
    xticks = np.arange(0, 4.1, 1, dtype=int)
    py.xticks(xticks, fontsize=fontsize2)
    py.yticks(fontsize=fontsize2)

    paxes = py.subplot(2, 2, 2)
    py.plot(tmod - t0, thetaSy_model - originY, 'b-')
    py.plot(tmod - t0, thetaS_nolensy - originY, 'b--')
    py.errorbar(tobs - t0, thetaSy_data - originY, yerr=yerr_data, fmt='k.')
    py.ylabel(r'$\Delta\delta$ (mas)', fontsize=fontsize1)
    py.xlabel('t - t$_{\mathrm{o}}$ (yr)', fontsize=fontsize1)
    py.ylim(-2, 6)
    py.xlim(-0.5, 6.0)
    py.xticks(xticks, fontsize=fontsize2)
    py.yticks(fontsize=fontsize2)

    paxes = py.subplot(2, 2, 3)
    n = len(tmod)
    py.plot(thetaS_nolensx - originX, thetaS_nolensy - originY, 'g--')
    py.plot(thetaSx_model - originX, thetaSy_model - originY, 'g-')
    py.errorbar(thetaSx_data - originX,
                thetaSy_data - originY,
                xerr=xerr_data,
                yerr=yerr_data,
                fmt='k.')
    py.ylabel(r'$\Delta\delta$ (mas)', fontsize=fontsize1)
    py.xlabel(r'$\Delta\alpha$ (mas)', fontsize=fontsize1)
    py.axis('equal')
    py.ylim(-5, 7)
    py.xlim(-4, 8)
    py.xticks(fontsize=fontsize2)
    py.yticks(fontsize=fontsize2)
    py.xlim(py.xlim()[::-1])

    # Make a histogram of the mass using the weights. This creates the
    # marginalized 1D posteriors.
    paxes = py.subplot(2, 2, 4)
    bins = np.arange(0, 30, 0.25)
    n, bins, patch = py.hist(tab['Mass'],
                             normed=True,
                             histtype='step',
                             weights=tab['weights'],
                             bins=bins,
                             linewidth=1.5)
    bin_centers = (bins[:-1] + bins[1:]) / 2
    py.xlabel('Mass')
    xtitle = r'Lens Mass (M$_{\odot}$)'
    py.xlabel(xtitle, fontsize=fontsize1, labelpad=10)
    py.xlim(0, 20)
    py.ylim(0, 0.2)
    py.ylabel('Relative probability', fontsize=fontsize1, labelpad=10)
    py.xticks(fontsize=fontsize2)
    py.yticks(fontsize=fontsize2)

    outdir = root_dir + analysis_dir + mnest_dir + 'plots/'
    outfile = outdir + 'plot_OB120169_data_vs_model.png'
    fileUtil.mkdir(outdir)
    print 'writing plot to file ' + outfile

    py.savefig(outfile)

    return
예제 #6
0
def plot_lgs_atm_inst():
    """
    Plot PSFs and OTFs assuming stationary AO and instrumental structure function; 
    but variable anisoplanatism.
    """
    # Make an IDL batch file to produce the perfect Keck PSF and OTF.
    work_dir = '/Users/jlu/work/ao/ao_opt/papers/lgs_theory/'
    plot_dir = work_dir + 'plot_lgs_atm_inst/'
    fileUtil.mkdir(plot_dir)

    _idl = open(plot_dir + 'plot_lgs_atm_inst.idl', 'w')
    _idl.write('print, "test"\n')
    _idl.write('.r lu_airopa_lgs.pro\n')
    _idl.write('plot_lgs_atm_inst\n')
    _idl.write('exit')
    _idl.close()

    cmd = 'airopa_idl < ' + plot_dir + 'plot_lgs_atm_inst.idl >& '+ plot_dir + 'plot_lgs_atm_inst.log'

    print('### Runing IDL:')
    print(cmd)
    # subprocess.call(cmd, shell=True)
    pdb.set_trace()

    psf_on = fits.getdata(plot_dir + 'lgs_psf_0512_0512.fits')
    otf_on = fits.getdata(plot_dir + 'lgs_otf_0512_0512.fits')
    psf_off_atm = fits.getdata(plot_dir + 'lgs_psf_atm_0867_0867.fits')
    otf_off_atm = fits.getdata(plot_dir + 'lgs_otf_atm_0867_0867.fits')
    psf_off_both = fits.getdata(plot_dir + 'lgs_psf_both_0867_0867.fits')
    otf_off_both = fits.getdata(plot_dir + 'lgs_otf_both_0867_0867.fits')
    
    # Colormap Normalization
    norm = colors.PowerNorm(gamma=1./4.)
    extent_rng = (psf_on.shape[0] - (psf_on.shape[0]/2.0)) * 0.00996
    extent = [extent_rng, -extent_rng, -extent_rng, extent_rng]

    lim = 0.75
    
    plt.close(1)
    plt.figure(1, figsize=(15, 10))
    plt.clf()
    plt.subplots_adjust(left=0.2, bottom=0.0, right=1.0, hspace=0, wspace=0)
    
    plt.subplot(231)
    plt.imshow(psf_on, cmap="gray", norm=norm, extent=extent)
    plt.setp(plt.gca().get_yticklabels(), visible=False)
    plt.setp(plt.gca().get_xticklabels(), visible=False)
    plt.gca().get_xaxis().set_visible(False)
    plt.ylabel('PSF')
    plt.title('On Axis')
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)

    plt.subplot(234)
    plt.imshow(otf_on, cmap="plasma", extent=extent)
    plt.ylabel('OTF')
    plt.setp(plt.gca().get_yticklabels(), visible=False)
    plt.setp(plt.gca().get_xticklabels(), visible=False)
    plt.gca().get_xaxis().set_visible(False)
    # plt.gca().get_yaxis().set_visible(False)
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)
    plt.gca().set_yticks([])
    plt.gca().set_xticks([])

    plt.subplot(232)
    plt.imshow(psf_off_atm, cmap="gray", norm=norm, extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    plt.title('Off Axis: Atm')
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)

    plt.subplot(235)
    plt.imshow(otf_off_atm, cmap="plasma", extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)

    plt.subplot(233)
    plt.imshow(psf_off_both, cmap="gray", norm=norm, extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    plt.title('Off Axis: Atm + Inst')
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)

    plt.subplot(236)
    plt.imshow(otf_off_both, cmap="plasma", extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    plt.xlim(lim, -lim)
    plt.ylim(-lim, lim)
    
    plt.tight_layout()

    fig_dir = paper_dir + 'figures/lgs_atm_inst/'
    fileUtil.mkdir(fig_dir)
    
    fig_out = fig_dir + 'lgs_atm_inst.png'
    plt.savefig(fig_out)

    return
예제 #7
0
def plot_ngs_perfect():
    """
    Plot the perfect Keck NGS PSF and OTF.
    """
    # Make an IDL batch file to produce the perfect Keck PSF and OTF.
    work_dir = '/Users/jlu/work/ao/ao_opt/papers/lgs_theory/'
    plot_dir = work_dir + 'plot_ngs_perfect/'
    fileUtil.mkdir(plot_dir)

    _idl = open(plot_dir + 'plot_ngs_perfect.idl', 'w')
    _idl.write('print, "test"\n')
    _idl.write('.r lu_airopa_lgs.pro\n')
    _idl.write('plot_ngs_perfect\n')
    _idl.write('exit')
    _idl.close()

    cmd = 'airopa_idl < ' + plot_dir + 'plot_ngs_perfect.idl >& '+ plot_dir + 'plot_ngs_perfect.log'

    print('### Runing IDL:')
    print(cmd)
    # subprocess.call(cmd, shell=True)
    pdb.set_trace()

    psf_on = fits.getdata(plot_dir + 'ngs_perfect_psf.fits')
    otf_on = fits.getdata(plot_dir + 'ngs_perfect_otf.fits')

    psf_off = fits.getdata(plot_dir + 'ngs_psf_0867_0867.fits')
    otf_off = fits.getdata(plot_dir + 'ngs_otf_0867_0867.fits')
    
    # Colormap Normalization
    norm = colors.PowerNorm(gamma=1./4.)
    extent_rng = (psf_on.shape[0] - (psf_on.shape[0]/2.0)) * 0.00996
    extent = [extent_rng, -extent_rng, -extent_rng, extent_rng]
    
    plt.close(1)
    plt.figure(1, figsize=(10, 10))
    plt.clf()
    plt.subplots_adjust(left=0.15, bottom=0.0, right=1.0, hspace=0, wspace=0)
    
    plt.subplot(221)
    plt.imshow(psf_on, cmap="gray", norm=norm, extent=extent)
    plt.setp(plt.gca().get_yticklabels(), visible=False)
    plt.gca().get_xaxis().set_visible(False)
    plt.title('PSF')
    plt.ylabel('On Axis')

    plt.subplot(222)
    plt.imshow(otf_on, cmap="plasma", extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    plt.title('OTF')

    plt.subplot(223)
    plt.imshow(psf_off, cmap="gray", norm=norm, extent=extent)
    plt.setp(plt.gca().get_yticklabels(), visible=False)
    plt.gca().get_xaxis().set_visible(False)
    plt.ylabel('Off Axis')

    plt.subplot(224)
    plt.imshow(otf_off, cmap="plasma", extent=extent)
    plt.gca().get_xaxis().set_visible(False)
    plt.gca().get_yaxis().set_visible(False)
    
    plt.tight_layout()

    fig_dir = paper_dir + 'figures/ngs_perfect/'
    fileUtil.mkdir(fig_dir)
    
    fig_out = fig_dir + 'ngs_perfect.png'
    plt.savefig(fig_out)
    
    return
예제 #8
0
def run(catalogfile, vel_err, mag_err, N_gauss, outdir, rotate=True):
    """
    PyMultiNest run to determine cluster membership, using PM catalog and
    applying vel_err and mag_err cuts. Output is put in newly-created outdir
    directory (must be a string).

    Parameters:
    catalogflie --> String containing the name of a FITS catalog.
    vel_err --> The maximum allowed velocity error for stars to be included.
    mag_err --> The maximum allowed magnitude error for stars to be included.
    N_gauss --> number bivariate gaussian, where N_gauss <= 4
    outdir --> The output directory name.
    
    Keywords:
    rotate = 1 --> rotate star velocities into RA/DEC format, as opposed
    to X,Y
    """
    # Load data for full field, extract velocities (already converted to mas)
    d = loadData(catalogfile, vel_err, mag_err, rotate=rotate)
    
    star_Vx = d['fit_vx']
    star_Vy = d['fit_vy']
    star_Sigx = d['fit_vxe']
    star_Sigy = d['fit_vye']

    N_stars = len(d)
        
    def print_param(pname, val, logp, headerFirst=False):
        rowHead = '{0:6s}  '
        colHead = ' val_{0} (  logp_{0} )'
        colVal = '{0:6.3f} ({1:9.2e})'

        if headerFirst:
            outhdr = '  '.join([colHead.format(k) for k in range(N_gauss)])
            print(rowHead.format('') + outhdr)

        outstr = '  '.join([colVal.format(val[k], logp[k]) for k in range(N_gauss)])
        print(rowHead.format(pname) + outstr)

        return

    def priors(cube, ndim, nparams):
        return

    def likelihood(cube, ndim, nparams):
        """
        Define the likelihood function (from Clarkson+12, Hosek+15)
        """
        #start the timer
        t0 = time.time()
        
        ####################
        #Set up model params
        ####################
        # Number of parameters per Gaussian:
        N_per_gauss = 6

        # Make arrays for the paramters of each Gaussian
        pi = np.arange(N_gauss, dtype=float)
        vx = np.arange(N_gauss, dtype=float)
        vy = np.arange(N_gauss, dtype=float)
        sigA = np.arange(N_gauss, dtype=float)
        sigB = np.arange(N_gauss, dtype=float)
        theta = np.arange(N_gauss, dtype=float)

        # Make arrays for the prior probability of each paramter
        logp_pi = np.arange(N_gauss, dtype=float)
        logp_vx = np.arange(N_gauss, dtype=float)
        logp_vy = np.arange(N_gauss, dtype=float)
        logp_sigA = np.arange(N_gauss, dtype=float)
        logp_sigB = np.arange(N_gauss, dtype=float)
        logp_theta = np.arange(N_gauss, dtype=float)

        # Set the fraction of stars in each Gaussian
        for kk in range(N_gauss):
            pi[kk], logp_pi[kk] = random_pi(cube[kk*N_per_gauss + 0])
            
        # Make sure all the sum(pi) = 1.
        pi /= pi.sum()

        # Sort the field pi values such that they are always ranked from
        # smallest to largest.
        sidx = pi[1:].argsort()
        pi[1:] = pi[1:][sidx]
        logp_pi[1:] = logp_pi[1:][sidx]

        # Re-set the cube values. Note this is AFTER sorting.
        for kk in range(N_gauss):
            cube[kk*N_per_gauss + 0] = pi[kk]

        
        # Set the other Gaussian parameters.
        for kk in range(N_gauss):
            # Treat the cluster gaussian (the first, most compact one)
            # with a special prior function.
            if kk == 0:
                rand_vx = random_clust_vx
                rand_vy = random_clust_vy
                rand_sigA = random_clust_sigA
                rand_sigB = random_clust_sigB
                rand_theta = random_clust_theta
            else:
                rand_vx = random_v
                rand_vy = random_v
                rand_sigA = random_sig
                rand_sigB = random_sig
                rand_theta = random_theta

            # Velocity centr
            vx[kk], logp_vx[kk] = rand_vx(cube[kk*N_per_gauss + 1])
            cube[kk*N_per_gauss + 1] = vx[kk]
            
            vy[kk], logp_vy[kk] = rand_vy(cube[kk*N_per_gauss + 2])
            cube[kk*N_per_gauss + 2] = vy[kk]

            # Major axis
            sigA[kk], logp_sigA[kk] = rand_sigA(cube[kk*N_per_gauss + 3])
            cube[kk*N_per_gauss + 3] = sigA[kk]

            # Minor axis
            sigB[kk], logp_sigB[kk] = rand_sigB(cube[kk*N_per_gauss + 4])
            cube[kk*N_per_gauss + 4] = sigB[kk]

            # Angle of major axis (in radians)
            theta[kk], logp_theta[kk] = rand_theta(cube[kk*N_per_gauss + 5])
            cube[kk*N_per_gauss + 5] = theta[kk]

            #Only want to consider gaussians where Sig A > Sig B
            if sigB[kk] > sigA[kk]:
                # print '#######################'
                # print '#######################'
                # print '#######################'
                # print '#######################'
                return -np.Inf

            # Check that all our prior probabilities are valid, otherwise abort
            # before expensive calculation.
            if ((logp_pi[kk] == -np.inf) or
                (logp_vx[kk] == -np.inf) or (logp_vy[kk] == -np.inf) or
                (logp_sigA[kk] == -np.inf) or (logp_sigB[kk] == -np.inf) or
                (logp_theta[kk] == -np.inf)):
                return -np.Inf
        
        ################################
        # Calculating likelihood function
        #  Likelihood = 
        #    \Sum(i=0 -> N_stars) \Sum(k=0 -> N_gauss)
        #        \pi_k * (2 \pi |\Sigma_k,i|)^{-1/2} *
        #        exp[ -1/2 * (\mu_i - \mu_k)^T \sigma_k,i (\mu_i - \mu_k) ]
        ################################        
        # Keep track of the probability for each star, each gaussian
        # component. We will add over components and multiply over stars.
        prob_gauss = np.zeros((N_gauss, N_stars), dtype=float)
        
        # L_{i,k}  Loop through the different gaussian components.
        for kk in range(N_gauss):
            # N_stars long array
            prob_gauss[kk, :] = prob_ellipse(star_Vx, star_Vy, star_Sigx, star_Sigy,
                                            pi[kk], vx[kk], vy[kk],
                                            sigA[kk], sigB[kk], theta[kk])

        # For each star, the total likelihood is the sum
        # of each component (before log).
        L_star = prob_gauss.sum(axis=0)  # This array should be N-stars long
        logL_star = np.log10(L_star)

        # Final likelihood
        logL = logL_star.sum()
        logL_tmp = logL

        # Add in log(prior probabilities) as well
        for kk in range(N_gauss):
            logL += logp_pi[kk]
            logL += logp_vx[kk]
            logL += logp_vy[kk]
            logL += logp_sigA[kk]
            logL += logp_sigB[kk]
            logL += logp_theta[kk]

        # Some printing
        print('*** logL = {0:9.2e}   w/priors = {1:9.2e}'.format(logL_tmp, logL))

        print_param('pi', pi, logp_pi, headerFirst=True)
        print_param('vx', vx, logp_vx)
        print_param('vy', vy, logp_vy)
        print_param('sigA', sigA, logp_sigA)
        print_param('sigB', sigB, logp_sigB)
        print_param('theta', theta, logp_theta)

        t1 = time.time()

        total = t1 - t0
        
        print('TIME SPENT: ' + str(total))
        #pdb.set_trace()
        return logL


    #########################################
    # End Likelihoods
    # Begin running multinest
    #########################################
    #Make new directory to hold output
    fileUtil.mkdir(outdir)    
    outroot = outdir + '/mnest_'

    num_dims = 2 * 3 * N_gauss
    num_params = num_dims
    ev_tol = 0.3
    samp_eff = 0.8
    n_live_points = 300
    
    # Create param file
    _run = open(outroot + 'params.run', 'w')
    _run.write('Catalog: %s\n' % catalogfile)
    _run.write('Vel Err Cut: %.2f\n' % vel_err)
    _run.write('Mag Err Cut: %.2f\n' % mag_err)
    _run.write('Rotate: %s\n' % str(rotate))
    _run.write('Num Gauss: %d\n' % N_gauss)
    _run.write('Num Dimensions: %d\n' % num_dims)
    _run.write('Num Params: %d\n' % num_params)
    _run.write('Evidence Tolerance: %.1f\n' % ev_tol)
    _run.write('Sampling Efficiency: %.1f\n' % samp_eff)
    _run.write('Num Clustering Params: %d\n' % num_dims)
    _run.write('Num Live Points: %d\n' % n_live_points)
    _run.close()

    # Run multinest
    pymultinest.run(likelihood, priors, num_dims, n_params=num_params,
                    outputfiles_basename=outroot,
                    verbose=True, resume=False,
                    evidence_tolerance=ev_tol, sampling_efficiency=samp_eff,
                    n_live_points=n_live_points, multimodal=True,
                    n_clustering_params=num_dims,
                    importance_nested_sampling=False)

    return