Example #1
0
#GALSIM_DIR=os.path.join("/Path", "To", "Your", "Repo")
GALSIM_DIR = os.path.join("/Users", "browe", "great3", "galsim")

OUTFILE = os.path.join(
    'results', 'normalizationv3_G10_QuadPS_N' + str(NREPEAT) + '_noise_sigma' +
    str(NOISE_SIGMA) + '.pkl')

if __name__ == "__main__":

    reference_ps = g3metrics.read_ps(galsim_dir=GALSIM_DIR)

    # Make the truth catalogues (a list of 2D, NGRIDxNGRID numpy arrays), reusing the reference_ps
    # each time for simplicity
    g1true_list, g2true_list = g3metrics.make_var_truth_catalogs(
        1, NIMS, [
            reference_ps,
        ], ngrid=NGRID, grid_units=galsim.degrees)
    # Define some empty storage arrays
    qG10unnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))
    qQuadPSunnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))

    # TEMP: Make all the PS realizations (of the truth) the same to see if this alters noise props
    g1true_list = [
        g1true_list[0],
    ] * len(g1true_list)
    g2true_list = [
        g2true_list[0],
    ] * len(g2true_list)

    # Then generate submissions, and truth submissions
    for c, i in zip(CTEST, range(len(CTEST))):
    # Load up the reference PS
    reference_ps = g3metrics.read_ps(galsim_dir=GALSIM_DIR, scale=SCALEPS)
    # Define some empty storage arrays
    qG3S_AMD_unnorm = np.empty((len(CVALS), len(MVALS), NMONTE))
    qG3S_QMD_unnorm = np.empty((len(CVALS), len(MVALS), NMONTE))

    # Then loop
    for krepeat in range(NMONTE):

        # Make the truth catalogues (a list of 2D, NGRIDxNGRID numpy arrays), reusing the reference
        # ps each time for simplicity
        print "Generating truth tables for Monte Carlo realization = "+str(krepeat + 1)+"/"+\
            str(NMONTE)
        g1true_list, g2true_list = g3metrics.make_var_truth_catalogs(
            NFIELDS, NIMS, [reference_ps,] * NFIELDS, ngrid=NGRID, dx_grid=DX_GRID,
            grid_units=galsim.degrees)

        # Then generate submissions, and truth submissions
        for c, i in zip(CVALS, range(NBINS_TEST)):
            print "Calculating CF metrics with c_i = "+str(c)
            for m, j in zip(MVALS, range(NBINS_TEST)):
                print "Calculating CF metrics with m_i = "+str(m)
                # Make a fake submission
                theta, mapEsubs, mapBsubs, maperrsubs, mapEtrues, mapBtrues = \
                    g3metrics.make_submission_var_shear_CF(
                        c1=c, c2=c, m1=m, m2=m, g1true_list=g1true_list,
                        g2true_list=g2true_list, noise_sigma=NOISE_SIGMA, dx_grid=DX_GRID,
                        nbins=NBINS_ANGULAR, min_sep=MIN_SEP, max_sep=MAX_SEP)
                # Calculate the metrics
                qG3S_AMD_unnorm[i, j, krepeat] = g3metrics.metricG3S_AMD(
    reference_ps = g3metrics.read_ps(galsim_dir=GALSIM_DIR, scale=SCALEPS)
    # Define some empty storage arrays
    qG3S_AMD_unnorm = np.empty((len(CVALS), len(MVALS), NMONTE))
    qG3S_QMD_unnorm = np.empty((len(CVALS), len(MVALS), NMONTE))

    # Then loop
    for krepeat in range(NMONTE):

        # Make the truth catalogues (a list of 2D, NGRIDxNGRID numpy arrays), reusing the reference
        # ps each time for simplicity
        print "Generating truth tables for Monte Carlo realization = "+str(krepeat + 1)+"/"+\
            str(NMONTE)
        g1true_list, g2true_list = g3metrics.make_var_truth_catalogs(
            NFIELDS,
            NIMS, [
                reference_ps,
            ] * NFIELDS,
            ngrid=NGRID,
            dx_grid=DX_GRID,
            grid_units=galsim.degrees)

        # Then generate submissions, and truth submissions
        for c, i in zip(CVALS, range(NBINS_TEST)):
            print "Calculating CF metrics with c_i = " + str(c)
            for m, j in zip(MVALS, range(NBINS_TEST)):
                print "Calculating CF metrics with m_i = " + str(m)
                # Make a fake submission
                theta, mapEsubs, mapBsubs, maperrsubs, mapEtrues, mapBtrues = \
                    g3metrics.make_submission_var_shear_CF(
                        c1=c, c2=c, m1=m, m2=m, g1true_list=g1true_list,
                        g2true_list=g2true_list, noise_sigma=NOISE_SIGMA, dx_grid=DX_GRID,
                        nbins=NBINS_ANGULAR, min_sep=MIN_SEP, max_sep=MAX_SEP)
MTEST = [1.e-3, 3.e-3, 1.e-2, 3.e-2, 1.e-1]
NREPEAT = 1000

#GALSIM_DIR=os.path.join("/Path", "To", "Your", "Repo")
GALSIM_DIR=os.path.join("/Users", "browe", "great3", "galsim")

OUTFILE = os.path.join(
    'results', 'normalizationv3_G10_QuadPS_N'+str(NREPEAT)+'_noise_sigma'+str(NOISE_SIGMA)+'.pkl')

if __name__ == "__main__":

    reference_ps = g3metrics.read_ps(galsim_dir=GALSIM_DIR)

    # Make the truth catalogues (a list of 2D, NGRIDxNGRID numpy arrays), reusing the reference_ps
    # each time for simplicity
    g1true_list, g2true_list = g3metrics.make_var_truth_catalogs(
        1, NIMS, [reference_ps,], ngrid=NGRID, grid_units=galsim.degrees)
    # Define some empty storage arrays
    qG10unnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))
    qQuadPSunnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))

    # TEMP: Make all the PS realizations (of the truth) the same to see if this alters noise props
    g1true_list = [g1true_list[0],] * len(g1true_list)
    g2true_list = [g2true_list[0],] * len(g2true_list)
    
    # Then generate submissions, and truth submissions
    for c, i in zip(CTEST, range(len(CTEST))):

        print "Calculating PS metrics with c_i = "+str(c)
        for m, j in zip(MTEST, range(len(MTEST))):

            print "Calculating PS metrics with m_i = "+str(m)