Exemplo n.º 1
0
def incomplete_shear_preprocess(zmin, zmax):

    shear = fitsio.read("source_v2.fits",
                        columns=[
                            "ra_gal", "dec_gal", "observed_redshift_gal",
                            "gamma1", "gamma2"
                        ])
    ra, dec, z = shear["ra_gal"], shear["dec_gal"], shear[
        "observed_redshift_gal"]
    g1, g2 = shear["gamma1"], shear["gamma2"]

    dz = 0.05 * (1 + z)  #redshift uncertainties
    z = np.random.normal(z, dz)  #perturbed redshifts

    mask = (z > zmax + 0.1)
    z = z[mask]
    ra, dec = ra[mask], dec[mask]
    g1, g2 = g1[mask], g2[mask]

    print("length of the catalog after applying the cut", len(ra))
    coord = np.vstack([ra, dec]).T
    centers = np.loadtxt("flagship_jk_centers_v2.txt")
    NJK = centers.shape[0]
    print("Segmentation begins!")
    gal_labels_jk = kmeans_radec.find_nearest(coord, centers)
    print("Done with assigning jacknife labels to galaxies")

    gals = {
        "RA": ra,
        "DEC": dec,
        "gamma1": g1,
        "gamma2": g2,
        "redshift": z,
        "JK_LABEL": gal_labels_jk
    }

    fits = FITS('data/incomplete_shear_zmax_' + str(round(zmax, 1)) + '.fits',
                'rw')
    fits.write(gals,
               names=["RA", "DEC", "gamma1", "gamma2", "redshift", "JK_LABEL"])
    fits.close()

    for jk in range(len(centers)):

        gal_jk_mask = gals["JK_LABEL"] != jk
        gal_jk = {
            "RA": ra[gal_jk_mask],
            "DEC": dec[gal_jk_mask],
            "gamma1": g1[gal_jk_mask],
            "gamma2": g2[gal_jk_mask],
            "redshift": z[gal_jk_mask]
        }

        fits = FITS(
            'data/incomplete_shear_zmax_' + str(round(zmax, 1)) + '_jk_' +
            str(jk) + '.fits', 'rw')
        fits.write(gal_jk, names=["RA", "DEC", "gamma1", "gamma2", "redshift"])
        fits.close()

    return None
Exemplo n.º 2
0
 def read_maps(self, fname):
     f = FITS(fname, 'r')
     p = f[1].read()  # noqa
     m = f[2].read()
     m = np.array([m[n] for n in f[2].get_colnames()])
     f.close()
     return m
Exemplo n.º 3
0
def mcalcat_write(cat, path):
    from os import remove
    if exists(path):
        remove(path)
    fits = FITS(path, 'rw')
    for c in cat:
        fits.write(c.reset_index().to_records(index=False))
    fits.close()
Exemplo n.º 4
0
def mcalcat_read(path):
    cat = []
    fits = FITS(path, 'r')
    for c in fits[1:]:
        cat.append(
            DataFrame.from_records(
                c.read().byteswap().newbyteorder()).set_index(
                    'coadd_objects_id'))
    fits.close()
    return cat
Exemplo n.º 5
0
def mcalcat_addzbin(d, zbin):
    fits = FITS(zbin)
    zbin = fits[1].read(columns=[
        'coadd_objects_id', 'zbin_mcal', 'zbin_mcal_1p', 'zbin_mcal_1m',
        'zbin_mcal_2p', 'zbin_mcal_2m'
    ])
    fits.close()
    zbin = zbin.byteswap().newbyteorder()
    zbin = DataFrame.from_records(zbin).set_index('coadd_objects_id')
    zbin = zbin.rename(columns={"zbin_mcal": "zbin"})
    cat = concat([d, zbin], axis=1)

    return cat[cat['zbin'] > -1].dropna()
 def get_filename_from_fits(self, ffile, ftype):
     fname = None
     try:
         f = FITS(ffile)
         h = f[0].read_header()
         expnum = h['EXPNUM']
         fname = 'DECam_%08i' % expnum
         fname += '.' + ftype
         f.close()
     except Exception as e:
         self.logger.info('ERROR ' + str(e))
     finally:
         return fname
Exemplo n.º 7
0
def mcalcat_load(cat):
    fields = [
        'coadd_objects_id', 'ra', 'dec', 'psf_e1', 'psf_e2', 'e1', 'e2', 'R11',
        'R22', 'flags_select'
    ]
    fits = FITS(cat)
    cat = fits[1].read(columns=fields)
    fits.close()
    cat = cat.byteswap().newbyteorder()
    cat = DataFrame.from_records(cat).set_index('coadd_objects_id')
    # Select usable objects
    cat = cat[cat.flags_select == 0]
    # Weights
    cat['w'] = ones(cat.shape[0])
    return cat.drop(['flags_select'], axis=1)
Exemplo n.º 8
0
def random_preprocess():

    randoms = fitsio.read('flagship_randoms_v2.fits')
    ra, dec, jk_label = randoms["RA"], randoms["DEC"], randoms["JK_LABEL"]
    centers = np.loadtxt("flagship_jk_centers_v2.txt")

    for jk in range(len(centers)):

        jk_mask = jk_label != jk
        random_jk = {"RA": ra[jk_mask], "DEC": dec[jk_mask]}

        fits = FITS('data/random_jk_' + str(jk) + '.fits', 'rw')
        fits.write(random_jk, names=["RA", "DEC"])
        fits.close()

    return None
Exemplo n.º 9
0
def incomplete_gal_preprocess(zmin, zmax):

    lens = fitsio.read("lens.fits",
                       columns=["ra_gal", "dec_gal", "observed_redshift_gal"])
    ra, dec, z = lens["ra_gal"], lens["dec_gal"], lens["observed_redshift_gal"]
    mask = (z > zmin) & (z < zmax)
    ra, dec, z = ra[mask], dec[mask], z[mask]

    random_choice = np.random.choice(np.arange(len(ra)),
                                     int(0.45 * len(ra)),
                                     replace=False)
    ra, dec, z = ra[random_choice], dec[random_choice], z[random_choice]

    print("length of the catalog after applying the cut", len(ra))
    coord = np.vstack([ra, dec]).T
    centers = np.loadtxt("flagship_jk_centers_v2.txt")
    NJK = centers.shape[0]
    print("Segmentation begins!")
    gal_labels_jk = kmeans_radec.find_nearest(coord, centers)
    print("Done with assigning jacknife labels to galaxies")

    gals = {"RA": ra, "DEC": dec, "redshift": z, "JK_LABEL": gal_labels_jk}

    fits = FITS('data/incomplete_gal_zmax_' + str(round(zmax, 1)) + '.fits',
                'rw')
    fits.write(gals, names=["RA", "DEC", "redshift", "JK_LABEL"])
    fits.close()

    for jk in range(len(centers)):

        gal_jk_mask = gals["JK_LABEL"] != jk
        gal_jk = {
            "RA": ra[gal_jk_mask],
            "DEC": dec[gal_jk_mask],
            "redshift": z[gal_jk_mask]
        }

        fits = FITS(
            'data/incomplete_gal_zmax_' + str(round(zmax, 1)) + '_jk_' +
            str(jk) + '.fits', 'rw')
        fits.write(gal_jk, names=["RA", "DEC", "redshift"])
        fits.close()

    return None
Exemplo n.º 10
0
 def write_maps(self, maps, fname):
     f = FITS(fname, 'rw', clobber=True)
     f.write([self.goodpix], names=['pixels'])
     names = ['map_%d' % i for i in range(len(maps))]
     f.write(list(maps), names=names)
     f.close()
Exemplo n.º 11
0
 def read_maps(self, fname):
     f = FITS(fname, 'r')
     p = f[0].read()  # noqa
     m = f[1].read()
     f.close()
     return m
Exemplo n.º 12
0
 def write_maps(self, maps, fname):
     f = FITS(fname, 'rw', clobber=True)
     f.write(self.goodpix)
     f.write(maps)
     f.close()
Exemplo n.º 13
0
def write_catalog(fname,
                  x,
                  y,
                  z,
                  w,
                  type="DATA",
                  format="fits",
                  coord="PSEUDO_EQUATORIAL"):
    if format == "txt":
        np.savetxt(fname, np.transpose([x, y, z, w]))

    elif format == "fits":
        # current DM supported
        DM_VERSION = "2.5.0"

        # Extracted from AsciiToFits.py written by Daniele Tavagnacco

        if coord == "PSEUDO_EQUATORIAL":
            columns = [['SOURCE_ID', -1, 'f8'], ['RA', x, 'f8'],
                       ['DEC', y, 'f8'], ['REDSHIFT', z, 'f8'],
                       ['WEIGHT', -1, 'f8'], ['DENSITY', w, 'f8']]
        elif coord == "CARTESIAN":
            columns = [['SOURCE_ID', -1, 'f8'], ['COMOV_X', x, 'f8'],
                       ['COMOV_Y', y, 'f8'], ['COMOV_Z', z, 'f8'],
                       ['WEIGHT', -1, 'f8'], ['DENSITY', w, 'f8']]

        header_keywords = {
            "TELESCOP": "EUCLID",
            "INSTRUME": "LE3GC-MOCKS",
            "FILENAME": fname,
            "CAT_ID": "MOCK",
            "COORD": coord,
            "ANGLE": "DEGREES"
        }

        # fname_nodir = fname[[pos for pos, char in enumerate(fname) if char == '/'][-1]+1:]
        # zshell = fname[fname.find('zshell'):-5]
        # header_keywords = {'TELESCOP' : 'EUCLID  ',
        #                    'INSTRUME' : 'LE3IDSELID',
        #                    'FILENAME' : fname_nodir,
        #                    'CAT_TYPE' : type,
        #                    'CAT_NAME' : 'MOCK-LE3GC',
        #                    'CAT_NOBJ' : x.size,
        #                    'COORD   ' : 'EQUATORIAL',
        #                    'ANGLE   ' : 'DEG     ',
        #                    'COMPLETE' : 1.0,
        #                    'PURITY  ' : 1.0,
        #                    'SELECT  ' : zshell}

        extension = "CATALOG"

        xmlKeys = {
            "pf": "PK_LE3_GC_WindowMultipoles",
            "instr": "LE3_GC_MOCKS",
            "id": "MOCK",
            "coord": coord
        }

        print("Preparing FITS structure")
        types = []
        # keep just wanted columns ...bad but..
        for c in columns:
            types.append((c[0], c[2]))

        hdr = FITSHDR()
        print("+ Add keywords")
        for k in header_keywords:
            hdr[k] = header_keywords[k]

        keep_table = {}
        for c in columns:
            # if required but not existing (-1) fill with ones
            if (c[1] is -1):
                # add tmp column with correct name and position, bu only ones
                print(str("+  -Col '%s' filled" % c[0]))
                keep_table[c[0]] = np.ones_like(columns[1][1],
                                                dtype=np.float64)
            else:
                # keep column with  requested position in the input file
                print(str("+  -Col '%s' from '%s'" % (c[0], c[1])))
                keep_table[c[0]] = c[1].astype(np.float64)

        fullsize = len(keep_table) * len(c[1]) * 8 / 1024 / 1024.
        print(str("+   ~%.2f MB in memory" % fullsize))

        # now write some data
        print(str("+ Write FITS: %s" % fname))
        fits = FITS(fname, 'rw', clobber=True)
        fits.write_table(data=keep_table, header=hdr, extname=extension)
        fits.close()

        print("+ Preparing XML product")

        with open(fname.replace(input.cat4le3_format, "xml"), "w+") as f:

            f.write(
                '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'''
            )
            if (type == "RANDOM"):
                f.write(
                    '''<p1:DpdLE3gcInputRandCat xmlns:p1="http://euclid.esa.org/schema/dpd/le3/gc/inp/catrandin">\n'''
                )
            else:
                f.write(
                    '''<p1:DpdLE3gcInputDataCat xmlns:p1="http://euclid.esa.org/schema/dpd/le3/gc/inp/catdatain">\n'''
                )
            f.write('''  <Header>\n''')
            f.write(
                str('''    <ProductId>%s</ProductId>\n''' %
                    header_keywords["FILENAME"].split('.')[0]))
            f.write(
                '''    <ProductType>dpdLE3gcInputRandCat</ProductType>\n''')
            f.write('''    <SoftwareName>LE3_GC_test</SoftwareName>\n''')
            f.write('''    <SoftwareRelease>1.0</SoftwareRelease>\n''')
            f.write(
                '''    <ManualValidationStatus>UNKNOWN</ManualValidationStatus>\n'''
            )
            f.write('''    <PipelineRun>LE3_GC_Test_Inputs</PipelineRun>\n''')
            f.write('''    <ExitStatusCode>OK</ExitStatusCode>\n''')
            f.write(
                str('''    <DataModelVersion>%s</DataModelVersion>\n''' %
                    DM_VERSION))
            f.write(
                str('''    <MinDataModelVersion>%s</MinDataModelVersion>\n''' %
                    DM_VERSION))
            f.write('''    <ScientificCustodian>LE3</ScientificCustodian>\n''')
            f.write('''    <AccessRights>\n''')
            f.write(
                '''      <EuclidConsortiumRead>true</EuclidConsortiumRead>\n'''
            )
            f.write(
                '''      <EuclidConsortiumWrite>true</EuclidConsortiumWrite>\n'''
            )
            f.write(
                '''      <ScientificGroupRead>true</ScientificGroupRead>\n''')
            f.write(
                '''      <ScientificGroupWrite>true</ScientificGroupWrite>\n'''
            )
            f.write('''    </AccessRights>\n''')
            f.write('''    <Curator>\n''')
            f.write('''      <Name>SDC-IT</Name>\n''')
            f.write('''    </Curator>\n''')
            f.write(str('''    <Creator>%s</Creator>\n''' % xmlKeys["pf"]))
            f.write(
                '''    <CreationDate>2019-10-31T12:12:12Z</CreationDate>\n''')
            f.write('''  </Header>\n''')
            f.write('''  <Data>\n''')
            f.write(
                str('''  <Instrument>%s</Instrument>\n''' % xmlKeys["instr"]))
            f.write(str('''  <Catalog_ID>%s</Catalog_ID>\n''' % xmlKeys["id"]))
            f.write(str('''  <CoordType>%s</CoordType>\n''' %
                        xmlKeys["coord"]))
            f.write('''  <Catalog format="le3.gc.cat.test" version="0.2">\n''')
            f.write('''    <DataContainer filestatus="PROPOSED">\n''')
            f.write(str('''      <FileName>%s.fits</FileName>\n''' % fname))
            f.write('''    </DataContainer>\n''')
            f.write('''  </Catalog>\n''')
            f.write('''  </Data>\n''')
            if (type == "RANDOM"):
                f.write('''</p1:DpdLE3gcInputRandCat>\n''')
            else:
                f.write('''</p1:DpdLE3gcInputDataCat>\n''')
            f.close()

        print("files %s and %s written" %
              (fname, fname.replace(input.cat4le3_format, "xml")))

    else:

        print("ERROR: unrecognized format in write_catalog")
        sys.exit(-1)
Exemplo n.º 14
0
               ['cl_rand', cl_rand, 'f8']]

    header_keywords = {}

    types = []
    # keep just wanted columns ...bad but..
    for c in columns:
        types.append((c[0], c[2]))

    hdr = FITSHDR()
    print("+ Add keywords")
    for k in header_keywords:
        hdr[k] = header_keywords[k]

    keep_table = {}
    for c in columns:
        # keep column with  requested position in the input file
        print(str("+  -Col '%s'" % (c[0])))
        keep_table[c[0]] = c[1]  #.astype(np.float64)

    fullsize = len(keep_table) * len(c[1]) * 8 / 1024 / 1024.
    print(str("+   ~%.2f MB in memory" % fullsize))

    fname = input.cls_fname(zmin, zmax, run=input.pinocchio_first_run)
    print("## Writing FITS: %s" % fname)
    fitsfile = FITS(fname, 'rw')
    fitsfile.write_table(data=keep_table, header=hdr)
    fitsfile.close()

print("# DONE!")
Exemplo n.º 15
0
def summary_result(aim, mock_n, reff, mock_g1, mock_g2, dx, dy, lens_g1,
                   lens_g2, init_flux, KSB, ID):
    if aim == 'mock_image' or aim == 'HST_decon':
        gain = 1.0
        zero_point = 1.0
        expt = 1.0
        read_noise = 1.0
    elif aim == 'Eu_conv' or aim == 'Euclidiz':
        gain = config.Eu_gain
        zero_point = config.Eu_zp
        expt = config.Eu_expt
        read_noise = config.Eu_rn
    elif aim == 'HST_conv':
        gain = config.HST_gain
        zero_point = config.HST_zp
        expt = config.HST_expt
        read_noise = config.HST_rn

    #Dictionary to store a row after each run of the code
    dict = np.zeros(1,
                    dtype=[('aim', 'S32'), ('truth_pxscale', 'f8'),
                           ('truth_size', 'f8'), ('mock_idx', 'f8'),
                           ('mock_flux_mag', 'f8'), ('mock_SNR', 'f8'),
                           ('mock_mu', 'f8'), ('mock_n', 'f8'),
                           ('mock_reff', 'f8'), ('mock_g1', 'f8'),
                           ('mock_g2', 'f8'), ('mock_flux', 'f8'),
                           ('dx', 'f8'), ('dy', 'f8'), ('lens_g1', 'f8'),
                           ('lens_g2', 'f8'), ('Eu_rn', 'f8'),
                           ('Eu_gain', 'f8'), ('Eu_expt', 'f8'),
                           ('HST_rn', 'f8'), ('HST_gain', 'f8'),
                           ('HST_expt', 'f8'), ('init_n', 'f8'),
                           ('init_reff', 'f8'), ('init_flux', 'f8'),
                           ('init_flux_mag', 'f8'), ('g1_obs', 'f8'),
                           ('g2_obs', 'f8'),
                           ('g1_cor', 'f8'), ('g2_cor', 'f8'),
                           ('shape_cor_err', 'f8'), ('m_sigma', 'f8'),
                           ('m_amp', 'f8'), ('m_rho4', 'f8'), ('m_n', 'f8'),
                           ('KSB_status', 'f8')])

    dict['aim'] = aim
    dict['truth_pxscale'] = config.truth_pxscale
    dict['truth_size'] = config.truth_size
    dict['mock_idx'] = inp.idx
    dict['mock_flux_mag'] = inp.mock_mag
    dict['mock_SNR'] = inp.Eu_snr
    dict['mock_mu'] = config.mock_mu
    dict['mock_n'] = '%.2f' % mock_n
    dict['mock_reff'] = '%.2f' % reff
    dict['mock_g1'] = '%.4f' % mock_g1
    dict['mock_g2'] = '%.4f' % mock_g2
    dict['mock_flux'] = '%.1f' % config.mock_flux
    dict['dx'] = dx
    dict['dy'] = dy
    dict['lens_g1'] = '%.5f' % lens_g1
    dict['lens_g2'] = '%.5f' % lens_g2
    dict['Eu_rn'] = config.Eu_rn
    dict['Eu_gain'] = config.Eu_gain
    dict['Eu_expt'] = config.Eu_expt
    dict['HST_rn'] = config.HST_rn
    dict['HST_gain'] = config.HST_gain
    dict['HST_expt'] = config.HST_expt
    dict['init_n'] = config.init_n
    dict['init_reff'] = config.init_reff
    dict[
        'init_flux'] = init_flux / 1000  # 10000 is the same value you can find in the model fitting to avoid the fitting failure
    dict['init_flux_mag'] = '%.2f' % (zero_point - 2.5 * np.log10(
        (init_flux / 1000) * gain / expt))
    #dict['bestfit_flux']='%.5f'%(fit_mod.flux[0]/1000)  # 10000 is the same value you can find in the model fitting to avoid the fitting failure
    #dict['bestfit_flux_mag']='%.2f'%(zero_point - 2.5*np.log10((fit_mod.flux[0]/1000)*gain/expt))
    #dict['bestfit_reff_pix']='%.5f'%fit_mod.r_eff[0]
    #dict['bestfit_reff_arcsec']='%.5f'%(config.truth_pxscale*fit_mod.r_eff[0])
    #dict['bestfit_n']='%.5f'%fit_mod.n[0]
    #dict['bestfit_x0']='%.5f'%fit_mod.x_0[0]
    #dict['bestfit_y0']='%.5f'%fit_mod.y_0[0]
    #dict['bestfit_g1']='%.5f'%fit_mod.g1[0]
    #dict['bestfit_g2']='%.5f'%fit_mod.g2[0]
    dict['g1_obs'] = '%.5f' % KSB.observed_shape.g1
    dict['g2_obs'] = '%.5f' % KSB.observed_shape.g2
    dict['g1_cor'] = '%.5f' % KSB.corrected_g1
    dict['g2_cor'] = '%.5f' % KSB.corrected_g2
    dict['shape_cor_err'] = '%.5f' % KSB.corrected_shape_err
    dict['m_sigma'] = '%.5f' % KSB.moments_sigma
    dict['m_amp'] = '%.5f' % KSB.moments_amp
    dict['m_rho4'] = '%.5f' % KSB.moments_rho4
    dict['m_n'] = float(KSB.moments_n_iter)
    #dict['chi2']=chi2
    #dict['red_chi2']=red_chi2
    #dict['SNR']=SNR
    #dict['nfev']='%.5f'%fitter.fit_info['nfev']
    #dict['fvec']=fitter.fit_info['fvec']
    #dict['ierr']='%.5f'%fitter.fit_info['ierr']
    dict['KSB_status'] = float(KSB.moments_status)

    #Creation of fits table containing one lines of results
    table = make_table_file()
    table.write(config.result_filename + '_' + str(aim) + '_' + 'ID' +
                str(ID) + config.fits)
    f = FITS(
        config.result_filename + '_' + str(aim) + '_' + 'ID' + str(ID) +
        config.fits, 'rw')
    f[-1].append(dict)
    f.close()

    return