Ejemplo n.º 1
0
 def read_maps(self, fname):
     f = FITS(fname, 'r')
     p = f[1].read()  # noqa
     m = f[2].read()
     m = np.array([m[n] for n in f[2].get_colnames()])
     f.close()
     return m
Ejemplo n.º 2
0
def mcalcat_read(path):
    cat = []
    fits = FITS(path, 'r')
    for c in fits[1:]:
        cat.append(
            DataFrame.from_records(
                c.read().byteswap().newbyteorder()).set_index(
                    'coadd_objects_id'))
    fits.close()
    return cat
Ejemplo n.º 3
0
def mcalcat_addzbin(d, zbin):
    fits = FITS(zbin)
    zbin = fits[1].read(columns=[
        'coadd_objects_id', 'zbin_mcal', 'zbin_mcal_1p', 'zbin_mcal_1m',
        'zbin_mcal_2p', 'zbin_mcal_2m'
    ])
    fits.close()
    zbin = zbin.byteswap().newbyteorder()
    zbin = DataFrame.from_records(zbin).set_index('coadd_objects_id')
    zbin = zbin.rename(columns={"zbin_mcal": "zbin"})
    cat = concat([d, zbin], axis=1)

    return cat[cat['zbin'] > -1].dropna()
 def get_filename_from_fits(self, ffile, ftype):
     fname = None
     try:
         f = FITS(ffile)
         h = f[0].read_header()
         expnum = h['EXPNUM']
         fname = 'DECam_%08i' % expnum
         fname += '.' + ftype
         f.close()
     except Exception as e:
         self.logger.info('ERROR ' + str(e))
     finally:
         return fname
Ejemplo n.º 5
0
def mcalcat_load(cat):
    fields = [
        'coadd_objects_id', 'ra', 'dec', 'psf_e1', 'psf_e2', 'e1', 'e2', 'R11',
        'R22', 'flags_select'
    ]
    fits = FITS(cat)
    cat = fits[1].read(columns=fields)
    fits.close()
    cat = cat.byteswap().newbyteorder()
    cat = DataFrame.from_records(cat).set_index('coadd_objects_id')
    # Select usable objects
    cat = cat[cat.flags_select == 0]
    # Weights
    cat['w'] = ones(cat.shape[0])
    return cat.drop(['flags_select'], axis=1)
def write_fit(data, filename, count=1, clobber=False):
    import fitsio
    from fitsio import FITS, FITSHDR
    import os.path
    from astropy.io import fits
    #If the first time it enters the file exist does not write
    #anything write only if clobber is set to True
    if count == 1:
        #If does not exist, create it and write data
        if not os.path.isfile(filename):
            hdu = fits.PrimaryHDU()
            hdul = fits.HDUList([hdu])
            corrhdu = fits.BinTableHDU(data, name='')
            hdul.insert(1, corrhdu)
            hdul.writeto(filename, overwrite=clobber)
            print("Creating file: ", filename)
            #If exist open it and add data
        else:
            if clobber:
                hdu = fits.PrimaryHDU()
                hdul = fits.HDUList([hdu])
                corrhdu = fits.BinTableHDU(data, name='')
                hdul.insert(1, corrhdu)
                hdul.writeto(filename, overwrite=clobber)
                print("Clobering file: ", filename)
            else:
                raise Exception(
                    'You tried to overwrite an existing file, without setting clobber'
                )
    else:
        fits = FITS(filename, 'rw')
        fits[-1].append(data)
        print("Apending File: ", filename)
Ejemplo n.º 7
0
def write_fit(data, file_name):
    import fitsio
    from fitsio import FITS, FITSHDR
    import os.path
    if not os.path.isfile(file_name):
        fitsio.write(file_name, data, clobber=False)
    else:
        fits = FITS(file_name, 'rw')
        fits[-1].append(data)
    def load(cls, fname):
        fits_inventory = DESFITSInventory(fname)
        hdus_present = sorted(fits_inventory.raws)

        images = cls()
        with FITS(fname) as fits:
            images.images = [
                DESDataImage.load_from_open(fits, ext) for ext in hdus_present
            ]
        return images
Ejemplo n.º 9
0
 def read_table(filename, hdu=1, subset=None):
     """ 
         Read the first HDU table from a fits file
     """
     file = FITS(filename, upper=True)
     if subset is not None:
         column, start, end = subset
         return numpy.array(file[hdu][column][start:end], copy=True)
     else:
         return numpy.array(file[hdu].read(), copy=True)
Ejemplo n.º 10
0
def write_fit(data, file_name):
    import fitsio
    from fitsio import FITS, FITSHDR
    import os.path
    import pandas
    if not os.path.isfile(file_name):
        fitsio.write(file_name, data, clobber=False)
    else:
        fits = FITS(file_name, 'rw')
        #appending data to the las hdu
        fits[-1].append(data)
Ejemplo n.º 11
0
    def read_image(filename, hdu=0):
        """
            Read the zeroth image HDU from a fits file

        """
        file = FITS(filename, upper=True)
        #    A copy of data is made before the file object
        #    is dereferenced. This is to ensure no back references
        #    to the fits object and file gets closed in a timely
        #    fashion.
        return numpy.array(file[hdu].read(), copy=True)
Ejemplo n.º 12
0
def mcalcat_write(cat, path):
    from os import remove
    if exists(path):
        remove(path)
    fits = FITS(path, 'rw')
    for c in cat:
        fits.write(c.reset_index().to_records(index=False))
    fits.close()
Ejemplo n.º 13
0
def write_fit(data, file_name):
    import fitsio
    from fitsio import FITS,FITSHDR
    import os.path
    if not os.path.isfile(file_name):
        fitsio.write(file_name,  data, clobber=False)
    else:
        try:
            fits = FITS(file_name,'rw')
            fits[-1].append(data)
        except OSError as e:
            print("Ignore OSError from writing:",  data['expnum'])
            print(e)
            pass
Ejemplo n.º 14
0
def write_fit(data, names, filename):
    import fitsio
    from fitsio import FITS, FITSHDR
    import os.path
    if not os.path.isfile(filename):
        fits = FITS(filename, 'rw')
        fits.write(data, names=names, clobber=False)
        print("Writing file: ", filename)
    else:
        fits = FITS(filename, 'rw')
        fits[-1].append(data)
        print("Apending File: ", filename)
Ejemplo n.º 15
0
def random_preprocess():

    randoms = fitsio.read('flagship_randoms_v2.fits')
    ra, dec, jk_label = randoms["RA"], randoms["DEC"], randoms["JK_LABEL"]
    centers = np.loadtxt("flagship_jk_centers_v2.txt")

    for jk in range(len(centers)):

        jk_mask = jk_label != jk
        random_jk = {"RA": ra[jk_mask], "DEC": dec[jk_mask]}

        fits = FITS('data/random_jk_' + str(jk) + '.fits', 'rw')
        fits.write(random_jk, names=["RA", "DEC"])
        fits.close()

    return None
Ejemplo n.º 16
0
 def read_maps(self, fname):
     f = FITS(fname, 'r')
     p = f[0].read()  # noqa
     m = f[1].read()
     f.close()
     return m
Ejemplo n.º 17
0
 def write_maps(self, maps, fname):
     f = FITS(fname, 'rw', clobber=True)
     f.write(self.goodpix)
     f.write(maps)
     f.close()
Ejemplo n.º 18
0
def write_catalog(fname,
                  x,
                  y,
                  z,
                  w,
                  type="DATA",
                  format="fits",
                  coord="PSEUDO_EQUATORIAL"):
    if format == "txt":
        np.savetxt(fname, np.transpose([x, y, z, w]))

    elif format == "fits":
        # current DM supported
        DM_VERSION = "2.5.0"

        # Extracted from AsciiToFits.py written by Daniele Tavagnacco

        if coord == "PSEUDO_EQUATORIAL":
            columns = [['SOURCE_ID', -1, 'f8'], ['RA', x, 'f8'],
                       ['DEC', y, 'f8'], ['REDSHIFT', z, 'f8'],
                       ['WEIGHT', -1, 'f8'], ['DENSITY', w, 'f8']]
        elif coord == "CARTESIAN":
            columns = [['SOURCE_ID', -1, 'f8'], ['COMOV_X', x, 'f8'],
                       ['COMOV_Y', y, 'f8'], ['COMOV_Z', z, 'f8'],
                       ['WEIGHT', -1, 'f8'], ['DENSITY', w, 'f8']]

        header_keywords = {
            "TELESCOP": "EUCLID",
            "INSTRUME": "LE3GC-MOCKS",
            "FILENAME": fname,
            "CAT_ID": "MOCK",
            "COORD": coord,
            "ANGLE": "DEGREES"
        }

        # fname_nodir = fname[[pos for pos, char in enumerate(fname) if char == '/'][-1]+1:]
        # zshell = fname[fname.find('zshell'):-5]
        # header_keywords = {'TELESCOP' : 'EUCLID  ',
        #                    'INSTRUME' : 'LE3IDSELID',
        #                    'FILENAME' : fname_nodir,
        #                    'CAT_TYPE' : type,
        #                    'CAT_NAME' : 'MOCK-LE3GC',
        #                    'CAT_NOBJ' : x.size,
        #                    'COORD   ' : 'EQUATORIAL',
        #                    'ANGLE   ' : 'DEG     ',
        #                    'COMPLETE' : 1.0,
        #                    'PURITY  ' : 1.0,
        #                    'SELECT  ' : zshell}

        extension = "CATALOG"

        xmlKeys = {
            "pf": "PK_LE3_GC_WindowMultipoles",
            "instr": "LE3_GC_MOCKS",
            "id": "MOCK",
            "coord": coord
        }

        print("Preparing FITS structure")
        types = []
        # keep just wanted columns ...bad but..
        for c in columns:
            types.append((c[0], c[2]))

        hdr = FITSHDR()
        print("+ Add keywords")
        for k in header_keywords:
            hdr[k] = header_keywords[k]

        keep_table = {}
        for c in columns:
            # if required but not existing (-1) fill with ones
            if (c[1] is -1):
                # add tmp column with correct name and position, bu only ones
                print(str("+  -Col '%s' filled" % c[0]))
                keep_table[c[0]] = np.ones_like(columns[1][1],
                                                dtype=np.float64)
            else:
                # keep column with  requested position in the input file
                print(str("+  -Col '%s' from '%s'" % (c[0], c[1])))
                keep_table[c[0]] = c[1].astype(np.float64)

        fullsize = len(keep_table) * len(c[1]) * 8 / 1024 / 1024.
        print(str("+   ~%.2f MB in memory" % fullsize))

        # now write some data
        print(str("+ Write FITS: %s" % fname))
        fits = FITS(fname, 'rw', clobber=True)
        fits.write_table(data=keep_table, header=hdr, extname=extension)
        fits.close()

        print("+ Preparing XML product")

        with open(fname.replace(input.cat4le3_format, "xml"), "w+") as f:

            f.write(
                '''<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'''
            )
            if (type == "RANDOM"):
                f.write(
                    '''<p1:DpdLE3gcInputRandCat xmlns:p1="http://euclid.esa.org/schema/dpd/le3/gc/inp/catrandin">\n'''
                )
            else:
                f.write(
                    '''<p1:DpdLE3gcInputDataCat xmlns:p1="http://euclid.esa.org/schema/dpd/le3/gc/inp/catdatain">\n'''
                )
            f.write('''  <Header>\n''')
            f.write(
                str('''    <ProductId>%s</ProductId>\n''' %
                    header_keywords["FILENAME"].split('.')[0]))
            f.write(
                '''    <ProductType>dpdLE3gcInputRandCat</ProductType>\n''')
            f.write('''    <SoftwareName>LE3_GC_test</SoftwareName>\n''')
            f.write('''    <SoftwareRelease>1.0</SoftwareRelease>\n''')
            f.write(
                '''    <ManualValidationStatus>UNKNOWN</ManualValidationStatus>\n'''
            )
            f.write('''    <PipelineRun>LE3_GC_Test_Inputs</PipelineRun>\n''')
            f.write('''    <ExitStatusCode>OK</ExitStatusCode>\n''')
            f.write(
                str('''    <DataModelVersion>%s</DataModelVersion>\n''' %
                    DM_VERSION))
            f.write(
                str('''    <MinDataModelVersion>%s</MinDataModelVersion>\n''' %
                    DM_VERSION))
            f.write('''    <ScientificCustodian>LE3</ScientificCustodian>\n''')
            f.write('''    <AccessRights>\n''')
            f.write(
                '''      <EuclidConsortiumRead>true</EuclidConsortiumRead>\n'''
            )
            f.write(
                '''      <EuclidConsortiumWrite>true</EuclidConsortiumWrite>\n'''
            )
            f.write(
                '''      <ScientificGroupRead>true</ScientificGroupRead>\n''')
            f.write(
                '''      <ScientificGroupWrite>true</ScientificGroupWrite>\n'''
            )
            f.write('''    </AccessRights>\n''')
            f.write('''    <Curator>\n''')
            f.write('''      <Name>SDC-IT</Name>\n''')
            f.write('''    </Curator>\n''')
            f.write(str('''    <Creator>%s</Creator>\n''' % xmlKeys["pf"]))
            f.write(
                '''    <CreationDate>2019-10-31T12:12:12Z</CreationDate>\n''')
            f.write('''  </Header>\n''')
            f.write('''  <Data>\n''')
            f.write(
                str('''  <Instrument>%s</Instrument>\n''' % xmlKeys["instr"]))
            f.write(str('''  <Catalog_ID>%s</Catalog_ID>\n''' % xmlKeys["id"]))
            f.write(str('''  <CoordType>%s</CoordType>\n''' %
                        xmlKeys["coord"]))
            f.write('''  <Catalog format="le3.gc.cat.test" version="0.2">\n''')
            f.write('''    <DataContainer filestatus="PROPOSED">\n''')
            f.write(str('''      <FileName>%s.fits</FileName>\n''' % fname))
            f.write('''    </DataContainer>\n''')
            f.write('''  </Catalog>\n''')
            f.write('''  </Data>\n''')
            if (type == "RANDOM"):
                f.write('''</p1:DpdLE3gcInputRandCat>\n''')
            else:
                f.write('''</p1:DpdLE3gcInputDataCat>\n''')
            f.close()

        print("files %s and %s written" %
              (fname, fname.replace(input.cat4le3_format, "xml")))

    else:

        print("ERROR: unrecognized format in write_catalog")
        sys.exit(-1)
Ejemplo n.º 19
0
               ['cl_rand', cl_rand, 'f8']]

    header_keywords = {}

    types = []
    # keep just wanted columns ...bad but..
    for c in columns:
        types.append((c[0], c[2]))

    hdr = FITSHDR()
    print("+ Add keywords")
    for k in header_keywords:
        hdr[k] = header_keywords[k]

    keep_table = {}
    for c in columns:
        # keep column with  requested position in the input file
        print(str("+  -Col '%s'" % (c[0])))
        keep_table[c[0]] = c[1]  #.astype(np.float64)

    fullsize = len(keep_table) * len(c[1]) * 8 / 1024 / 1024.
    print(str("+   ~%.2f MB in memory" % fullsize))

    fname = input.cls_fname(zmin, zmax, run=input.pinocchio_first_run)
    print("## Writing FITS: %s" % fname)
    fitsfile = FITS(fname, 'rw')
    fitsfile.write_table(data=keep_table, header=hdr)
    fitsfile.close()

print("# DONE!")
Ejemplo n.º 20
0
 def read_metadata(filename, hdu=0):
     """ 
         Read the metadata of a HDU table
     """
     file = FITS(filename, upper=True)
     return dict(file[hdu].read_header())
Ejemplo n.º 21
0
 def size_table(filename, hdu=1):
     """ 
         Read the first HDU table from a fits file
     """
     file = FITS(filename, upper=True)
     return file[hdu].get_nrows()
Ejemplo n.º 22
0
Archivo: run.py Proyecto: DavidT3/XGA
def execute_cmd(x_script: str, out_file: str, src: str, run_type: str, timeout: float) \
        -> Tuple[Union[FITS, str], str, bool, list, list]:
    """
    This function is called for the local compute option. It will run the supplied XSPEC script, then check
    parse the output for errors and check that the expected output file has been created.

    :param str x_script: The path to an XSPEC script to be run.
    :param str out_file: The expected path for the output file of that XSPEC script.
    :param str src: A string representation of the source object that this fit is associated with.
    :param str run_type: A flag that tells this function what type of run this is; e.g. fit or conv_factors.
    :param float timeout: The length of time (in seconds) which the XSPEC script is allowed to run for before being
        killed.
    :return: FITS object of the results, string repr of the source associated with this fit, boolean variable
        describing if this fit can be used, list of any errors found, list of any warnings found.
    :rtype: Tuple[Union[FITS, str], str, bool, list, list]
    """
    if XSPEC_VERSION is None:
        raise XSPECNotFoundError(
            "There is no XSPEC installation detectable on this machine.")

    # We assume the output will be usable to start with
    usable = True

    cmd = "xspec - {}".format(x_script)
    # I add exec to the beginning to make sure that the command inherits the same process ID as the shell, which
    #  allows the timeout to kill the XSPEC run rather than the shell process. Entirely thanks to slayton on
    #   https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true
    xspec_proc = Popen("exec " + cmd, shell=True, stdout=PIPE, stderr=PIPE)

    # This makes sure the process is killed if it does timeout
    try:
        out, err = xspec_proc.communicate(timeout=timeout)
    except TimeoutExpired:
        xspec_proc.kill()
        out, err = xspec_proc.communicate()
        # Need to infer the name of the source to supply it in the warning
        source_name = x_script.split('/')[-1].split("_")[0]
        warnings.warn("An XSPEC fit for {} has timed out".format(source_name))
        usable = False

    out = out.decode("UTF-8").split("\n")
    err = err.decode("UTF-8").split("\n")

    err_out_lines = [
        line.split("***Error: ")[-1] for line in out if "***Error" in line
    ]
    warn_out_lines = [
        line.split("***Warning: ")[-1] for line in out if "***Warning" in line
    ]
    err_err_lines = [
        line.split("***Error: ")[-1] for line in err if "***Error" in line
    ]
    warn_err_lines = [
        line.split("***Warning: ")[-1] for line in err if "***Warning" in line
    ]

    if usable and len(err_out_lines) == 0 and len(err_err_lines) == 0:
        usable = True
    else:
        usable = False

    error = err_out_lines + err_err_lines
    warn = warn_out_lines + warn_err_lines
    if os.path.exists(out_file + "_info.csv") and run_type == "fit":
        # The original version of the xga_output.tcl script output everything as one nice neat fits file
        #  but life is full of extraordinary inconveniences and for some reason it didn't work if called from
        #  a Jupyter Notebook. So now I'm going to smoosh all the csv outputs into one fits.
        results = pd.read_csv(out_file + "_results.csv", header="infer")
        # This is the csv with the fit results in, creates new fits file and adds in
        fitsio.write(out_file + ".fits",
                     results.to_records(index=False),
                     extname="results",
                     clobber=True)
        del results

        # The information about individual spectra, exposure times, luminosities etc.
        spec_info = pd.read_csv(out_file + "_info.csv", header="infer")
        # Gets added into the existing file
        fitsio.write(out_file + ".fits",
                     spec_info.to_records(index=False),
                     extname="spec_info")
        del spec_info

        # This finds all of the matching spectrum plot csvs were generated
        rel_path = "/".join(out_file.split('/')[0:-1])
        # This is mostly just used to find how many files there are
        spec_tabs = [
            rel_path + "/" + sp for sp in os.listdir(rel_path)
            if "{}_spec".format(out_file) in rel_path + "/" + sp
        ]
        for spec_i in range(1, len(spec_tabs) + 1):
            # Loop through and redefine names like this to ensure they're in the right order
            spec_plot = pd.read_csv(out_file + "_spec{}.csv".format(spec_i),
                                    header="infer")
            # Adds all the plot tables into the existing fits file in the right order
            fitsio.write(out_file + ".fits",
                         spec_plot.to_records(index=False),
                         extname="plot{}".format(spec_i))
            del spec_plot

        # This reads in the fits we just made
        with FITS(out_file + ".fits") as res_tables:
            tab_names = [tab.get_extname() for tab in res_tables]
            if "results" not in tab_names or "spec_info" not in tab_names:
                usable = False
        # I'm going to try returning the file path as that should be pickleable
        res_tables = out_file + ".fits"
    elif os.path.exists(out_file) and run_type == "conv_factors":
        res_tables = out_file
        usable = True
    else:
        res_tables = None
        usable = False

    return res_tables, src, usable, error, warn
Ejemplo n.º 23
0
def force_phot (filenames, ra_deg, dec_deg, fits_out, nsigma=5,
                keys2add=None, keys2add_dtypes=None, tel='ML1'):

    
    # initialize rows to be converted to fits table
    rows = []

    # convert keys2add and keys2add_dtypes to lists
    keys2add_list = keys2add.upper().split(',')
    dtypes_list = keys2add_dtypes.split(',')
    

    # loop through [filenames]
    for nfile, filename in enumerate(filenames):

        # start row
        row = [filename]
        
        # read in header and data
        data, header = read_hdulist (filename, get_header=True)


        # add header keywords to output table
        for key in keys2add_list:
            if key in header:
                row.append (header[key])
            else:
                row.append (None)
                log.error ('key {} not in header of {}'
                           .format(key, filename))


        # convert input [ra_deg] and [dec_deg] to pixel coordinates
        xcoord, ycoord = WCS(header).all_world2pix(ra_deg, dec_deg, 1)

        # skip if coordinates not on the image
        ysize, xsize = data.shape
        dpix_edge = 10
        if (xcoord < dpix_edge or xcoord > xsize-dpix_edge or
            ycoord < dpix_edge or ycoord > ysize-dpix_edge):

            log.warning ('pixel coordinates (x,y)=({},{}) not on image for {}'
                         '; skipping it'.format(int(xcoord), int(ycoord),
                                                filename))
            continue


        # determine optimal flux
        # ----------------------

        # read PSFEx binary fits table
        base = filename.split('.fits')[0]
        psfex_bintable = '{}_psf.fits'.format(base)


        # background STD
        fits_bkg_std = '{}_bkg_std.fits'.format(base)
        if os.path.exists(fits_bkg_std):
            data_bkg_std = read_hdulist (fits_bkg_std, dtype='float32')
        else:
            # if it does not exist, create it from the background mesh
            fits_bkg_std_mini = '{}_bkg_std_mini.fits'.format(base)
            data_bkg_std_mini, header_mini = read_hdulist (
                fits_bkg_std_mini, get_header=True, dtype='float32')

            if 'BKG-SIZE' in header_mini:
                bkg_size = header_mini['BKG-SIZE']
            else:
                bkg_size = get_par(set_zogy.bkg_boxsize,tel)


            if True:
                # determine scalar bkg_std value from mini image at
                # xcoord, ycoord
                x_index_mini = int(int(xcoord-0.5)/bkg_size)
                y_index_mini = int(int(ycoord-0.5)/bkg_size)
                data_bkg_std = data_bkg_std_mini[y_index_mini, x_index_mini]

            else:
                # determine full bkg_std image from mini image

                # determine whether interpolation is allowed across different
                # channels in [mini2back] using function get_Xchan_bool
                chancorr = get_par(set_zogy.MLBG_chancorr,tel)
                interp_Xchan_std = get_Xchan_bool (tel, chancorr, 'new',
                                                   std=True)

                data_bkg_std = mini2back (data_bkg_std_mini, data.shape,
                                          order_interp=1, bkg_boxsize=bkg_size,
                                          interp_Xchan=interp_Xchan_std,
                                          timing=get_par(set_zogy.timing,tel))


        # data mask
        fits_mask = filename.replace('_red.fits', '_mask.fits')
        data_mask = read_hdulist (fits_mask, dtype='uint8')


        # object mask - needs to be created with source extractor; for
        # now, assume entire image is masked, leading to local
        # background not being used
        objmask = np.ones (data.shape, dtype=bool)


        # determine optimal fluxes at pixel coordinates
        flux_opt, fluxerr_opt = get_psfoptflux (
            psfex_bintable, data, data_bkg_std**2, data_mask,
            np.array([xcoord]), np.array([ycoord]),
            imtype='new', fwhm=header['PSF-FWHM'],
            D_objmask=objmask, set_zogy=set_zogy, tel=tel)

        # flux_opt and fluxerr_opt are 1-element arrays
        s2n = flux_opt[0] / fluxerr_opt[0]

        # convert fluxes to magnitudes by applying the zeropoint
        keys = ['EXPTIME', 'FILTER', 'DATE-OBS']
        exptime, filt, obsdate = [header[key] for key in keys]

        # get zeropoint from [header]
        if 'PC-ZP' in header:
            zp = header['PC-ZP']
        else:
            log.warning ('keyword PC-ZP not in header of {}; skipping it'
                         .format(filename))
            continue

        # determine object airmass, unless input image is a combined
        # image
        if 'R-V' in header or 'R-COMB-M' in header:
            airmass = 1.0
        else:
            lat = get_par(set_zogy.obs_lat,tel)
            lon = get_par(set_zogy.obs_lon,tel)
            height = get_par(set_zogy.obs_height,tel)
            airmass = get_airmass(ra_deg, dec_deg, obsdate, lat, lon, height)


        log.info ('airmass: {}'.format(airmass))
        ext_coeff = get_par(set_zogy.ext_coeff,tel)[filt]       
        mag_opt, magerr_opt = apply_zp (flux_opt, zp, airmass, exptime, filt,
                                        ext_coeff, fluxerr=fluxerr_opt)


        # read limiting magnitude at pixel coordinates
        filename_limmag = filename.replace('_red.fits', '_red_limmag.fits')

        if os.path.isfile(filename_limmag):

            x_index = int(xcoord-0.5)
            y_index = int(ycoord-0.5)

            t0 = time.time()
            try:
                data_limmag = FITS(filename_limmag)[-1]
                limmag = data_limmag[y_index, x_index][0][0]
            except:
                data_limmag = read_hdulist(filename_limmag)
                limmag = data_limmag[y_index, x_index]

            # convert limmag from 5-sigma to nsigma
            limmag += -2.5*np.log10(nsigma/5)



        row += [mag_opt[0], magerr_opt[0], s2n, limmag]

        # append row to rows
        rows.append(row)



    # initialize output table
    names = ['FILENAME']
    dtypes = ['U100']
    for ikey, key in enumerate(keys2add_list):
        names.append(key)
        dtype_str = dtypes_list[ikey]
        if dtype_str in ['float', 'int', 'bool']:
            dtype = eval(dtype_str)
        else:
            dtype = dtype_str

        dtypes.append(dtype)
            
    names += ['MAG_OPT', 'MAGERR_OPT', 'S2N', 'LIMMAG_{}SIGMA'.format(nsigma)]
    dtypes += [float, float, float, float]
    
    if len(rows) == 0:
        # rows without entries: create empty table
        table = Table(names=names, dtype=dtypes)
    else: 
        table = Table(rows=rows, names=names, dtype=dtypes)
        
        
    # save output fits table
    table.write(fits_out, overwrite=True)
Ejemplo n.º 24
0
 def __init__(self, fname):
     self.hdr = []
     with FITS(fname) as fits:
         for hdu in fits:
             self.hdr.append(hdu.read_header())
Ejemplo n.º 25
0
 def write_maps(self, maps, fname):
     f = FITS(fname, 'rw', clobber=True)
     f.write([self.goodpix], names=['pixels'])
     names = ['map_%d' % i for i in range(len(maps))]
     f.write(list(maps), names=names)
     f.close()
Ejemplo n.º 26
0
def summary_result(aim, mock_n, reff, mock_g1, mock_g2, dx, dy, lens_g1,
                   lens_g2, init_flux, KSB, ID):
    if aim == 'mock_image' or aim == 'HST_decon':
        gain = 1.0
        zero_point = 1.0
        expt = 1.0
        read_noise = 1.0
    elif aim == 'Eu_conv' or aim == 'Euclidiz':
        gain = config.Eu_gain
        zero_point = config.Eu_zp
        expt = config.Eu_expt
        read_noise = config.Eu_rn
    elif aim == 'HST_conv':
        gain = config.HST_gain
        zero_point = config.HST_zp
        expt = config.HST_expt
        read_noise = config.HST_rn

    #Dictionary to store a row after each run of the code
    dict = np.zeros(1,
                    dtype=[('aim', 'S32'), ('truth_pxscale', 'f8'),
                           ('truth_size', 'f8'), ('mock_idx', 'f8'),
                           ('mock_flux_mag', 'f8'), ('mock_SNR', 'f8'),
                           ('mock_mu', 'f8'), ('mock_n', 'f8'),
                           ('mock_reff', 'f8'), ('mock_g1', 'f8'),
                           ('mock_g2', 'f8'), ('mock_flux', 'f8'),
                           ('dx', 'f8'), ('dy', 'f8'), ('lens_g1', 'f8'),
                           ('lens_g2', 'f8'), ('Eu_rn', 'f8'),
                           ('Eu_gain', 'f8'), ('Eu_expt', 'f8'),
                           ('HST_rn', 'f8'), ('HST_gain', 'f8'),
                           ('HST_expt', 'f8'), ('init_n', 'f8'),
                           ('init_reff', 'f8'), ('init_flux', 'f8'),
                           ('init_flux_mag', 'f8'), ('g1_obs', 'f8'),
                           ('g2_obs', 'f8'),
                           ('g1_cor', 'f8'), ('g2_cor', 'f8'),
                           ('shape_cor_err', 'f8'), ('m_sigma', 'f8'),
                           ('m_amp', 'f8'), ('m_rho4', 'f8'), ('m_n', 'f8'),
                           ('KSB_status', 'f8')])

    dict['aim'] = aim
    dict['truth_pxscale'] = config.truth_pxscale
    dict['truth_size'] = config.truth_size
    dict['mock_idx'] = inp.idx
    dict['mock_flux_mag'] = inp.mock_mag
    dict['mock_SNR'] = inp.Eu_snr
    dict['mock_mu'] = config.mock_mu
    dict['mock_n'] = '%.2f' % mock_n
    dict['mock_reff'] = '%.2f' % reff
    dict['mock_g1'] = '%.4f' % mock_g1
    dict['mock_g2'] = '%.4f' % mock_g2
    dict['mock_flux'] = '%.1f' % config.mock_flux
    dict['dx'] = dx
    dict['dy'] = dy
    dict['lens_g1'] = '%.5f' % lens_g1
    dict['lens_g2'] = '%.5f' % lens_g2
    dict['Eu_rn'] = config.Eu_rn
    dict['Eu_gain'] = config.Eu_gain
    dict['Eu_expt'] = config.Eu_expt
    dict['HST_rn'] = config.HST_rn
    dict['HST_gain'] = config.HST_gain
    dict['HST_expt'] = config.HST_expt
    dict['init_n'] = config.init_n
    dict['init_reff'] = config.init_reff
    dict[
        'init_flux'] = init_flux / 1000  # 10000 is the same value you can find in the model fitting to avoid the fitting failure
    dict['init_flux_mag'] = '%.2f' % (zero_point - 2.5 * np.log10(
        (init_flux / 1000) * gain / expt))
    #dict['bestfit_flux']='%.5f'%(fit_mod.flux[0]/1000)  # 10000 is the same value you can find in the model fitting to avoid the fitting failure
    #dict['bestfit_flux_mag']='%.2f'%(zero_point - 2.5*np.log10((fit_mod.flux[0]/1000)*gain/expt))
    #dict['bestfit_reff_pix']='%.5f'%fit_mod.r_eff[0]
    #dict['bestfit_reff_arcsec']='%.5f'%(config.truth_pxscale*fit_mod.r_eff[0])
    #dict['bestfit_n']='%.5f'%fit_mod.n[0]
    #dict['bestfit_x0']='%.5f'%fit_mod.x_0[0]
    #dict['bestfit_y0']='%.5f'%fit_mod.y_0[0]
    #dict['bestfit_g1']='%.5f'%fit_mod.g1[0]
    #dict['bestfit_g2']='%.5f'%fit_mod.g2[0]
    dict['g1_obs'] = '%.5f' % KSB.observed_shape.g1
    dict['g2_obs'] = '%.5f' % KSB.observed_shape.g2
    dict['g1_cor'] = '%.5f' % KSB.corrected_g1
    dict['g2_cor'] = '%.5f' % KSB.corrected_g2
    dict['shape_cor_err'] = '%.5f' % KSB.corrected_shape_err
    dict['m_sigma'] = '%.5f' % KSB.moments_sigma
    dict['m_amp'] = '%.5f' % KSB.moments_amp
    dict['m_rho4'] = '%.5f' % KSB.moments_rho4
    dict['m_n'] = float(KSB.moments_n_iter)
    #dict['chi2']=chi2
    #dict['red_chi2']=red_chi2
    #dict['SNR']=SNR
    #dict['nfev']='%.5f'%fitter.fit_info['nfev']
    #dict['fvec']=fitter.fit_info['fvec']
    #dict['ierr']='%.5f'%fitter.fit_info['ierr']
    dict['KSB_status'] = float(KSB.moments_status)

    #Creation of fits table containing one lines of results
    table = make_table_file()
    table.write(config.result_filename + '_' + str(aim) + '_' + 'ID' +
                str(ID) + config.fits)
    f = FITS(
        config.result_filename + '_' + str(aim) + '_' + 'ID' + str(ID) +
        config.fits, 'rw')
    f[-1].append(dict)
    f.close()

    return
Ejemplo n.º 27
0
def incomplete_shear_preprocess(zmin, zmax):

    shear = fitsio.read("source_v2.fits",
                        columns=[
                            "ra_gal", "dec_gal", "observed_redshift_gal",
                            "gamma1", "gamma2"
                        ])
    ra, dec, z = shear["ra_gal"], shear["dec_gal"], shear[
        "observed_redshift_gal"]
    g1, g2 = shear["gamma1"], shear["gamma2"]

    dz = 0.05 * (1 + z)  #redshift uncertainties
    z = np.random.normal(z, dz)  #perturbed redshifts

    mask = (z > zmax + 0.1)
    z = z[mask]
    ra, dec = ra[mask], dec[mask]
    g1, g2 = g1[mask], g2[mask]

    print("length of the catalog after applying the cut", len(ra))
    coord = np.vstack([ra, dec]).T
    centers = np.loadtxt("flagship_jk_centers_v2.txt")
    NJK = centers.shape[0]
    print("Segmentation begins!")
    gal_labels_jk = kmeans_radec.find_nearest(coord, centers)
    print("Done with assigning jacknife labels to galaxies")

    gals = {
        "RA": ra,
        "DEC": dec,
        "gamma1": g1,
        "gamma2": g2,
        "redshift": z,
        "JK_LABEL": gal_labels_jk
    }

    fits = FITS('data/incomplete_shear_zmax_' + str(round(zmax, 1)) + '.fits',
                'rw')
    fits.write(gals,
               names=["RA", "DEC", "gamma1", "gamma2", "redshift", "JK_LABEL"])
    fits.close()

    for jk in range(len(centers)):

        gal_jk_mask = gals["JK_LABEL"] != jk
        gal_jk = {
            "RA": ra[gal_jk_mask],
            "DEC": dec[gal_jk_mask],
            "gamma1": g1[gal_jk_mask],
            "gamma2": g2[gal_jk_mask],
            "redshift": z[gal_jk_mask]
        }

        fits = FITS(
            'data/incomplete_shear_zmax_' + str(round(zmax, 1)) + '_jk_' +
            str(jk) + '.fits', 'rw')
        fits.write(gal_jk, names=["RA", "DEC", "gamma1", "gamma2", "redshift"])
        fits.close()

    return None
Ejemplo n.º 28
0
Archivo: run.py Proyecto: DavidT3/XGA
    def wrapper(*args, **kwargs):
        # The first argument of all of these XSPEC functions will be the source object (or a list of),
        # so rather than return them from the XSPEC model function I'll just access them like this.
        if isinstance(args[0], BaseSource):
            sources = [args[0]]
        elif isinstance(args[0], (list, BaseSample)):
            sources = args[0]
        else:
            raise TypeError(
                "Please pass a source object, or a list of source objects.")

        # This is the output from whatever function this is a decorator for
        # First return is a list of paths of XSPEC scripts to execute, second is the expected output paths,
        #  and 3rd is the number of cores to use.
        # run_type describes the type of XSPEC script being run, for instance a fit or a fakeit run to measure
        #  countrate to luminosity conversion constants
        script_list, paths, cores, run_type, src_inds, radii, timeout = xspec_func(
            *args, **kwargs)
        src_lookup = {
            repr(src): src_ind
            for src_ind, src in enumerate(sources)
        }
        rel_src_repr = [repr(sources[src_ind]) for src_ind in src_inds]

        # Make sure the timeout is converted to seconds, then just stored as a float
        timeout = timeout.to('second').value

        # This is what the returned information from the execute command gets stored in before being parceled out
        #  to source and spectrum objects
        results = {s: [] for s in src_lookup}
        if run_type == "fit":
            desc = "Running XSPEC Fits"
        elif run_type == "conv_factors":
            desc = "Running XSPEC Simulations"

        if len(script_list) > 0:
            # This mode runs the XSPEC locally in a multiprocessing pool.
            with tqdm(total=len(script_list),
                      desc=desc) as fit, Pool(cores) as pool:

                def callback(results_in):
                    """
                    Callback function for the apply_async pool method, gets called when a task finishes
                    and something is returned.
                    """
                    nonlocal fit  # The progress bar will need updating
                    nonlocal results  # The dictionary the command call results are added to
                    if results_in[0] is None:
                        fit.update(1)
                        return
                    else:
                        res_fits, rel_src, successful, err_list, warn_list = results_in
                        results[rel_src].append(
                            [res_fits, successful, err_list, warn_list])
                        fit.update(1)

                for s_ind, s in enumerate(script_list):
                    pth = paths[s_ind]
                    src = rel_src_repr[s_ind]
                    pool.apply_async(execute_cmd,
                                     args=(s, pth, src, run_type, timeout),
                                     callback=callback)
                pool.close()  # No more tasks can be added to the pool
                pool.join(
                )  # Joins the pool, the code will only move on once the pool is empty.

        elif len(script_list) == 0:
            warnings.warn("All XSPEC operations had already been run.")

        # Now we assign the fit results to source objects
        for src_repr in results:
            # Made this lookup list earlier, using string representations of source objects.
            # Finds the ind of the list of sources that we should add these results to
            ind = src_lookup[src_repr]
            s = sources[ind]

            # This flag tells this method if the current set of fits are part of an annular spectra or not
            ann_fit = False
            ann_results = {}
            ann_lums = {}
            ann_obs_order = {}

            for res_set in results[src_repr]:
                if len(res_set) != 0 and res_set[1] and run_type == "fit":
                    with FITS(res_set[0]) as res_table:
                        global_results = res_table["RESULTS"][0]
                        model = global_results["MODEL"].strip(" ")

                        # Just define this to check if this is an annular fit or not
                        first_key = res_table["SPEC_INFO"][0][
                            "SPEC_PATH"].strip(" ").split("/")[-1].split(
                                'ra')[-1]
                        first_key = first_key.split('_spec.fits')[0]
                        if "_ident" in first_key:
                            ann_fit = True

                        inst_lums = {}
                        obs_order = []
                        for line_ind, line in enumerate(
                                res_table["SPEC_INFO"]):
                            sp_info = line["SPEC_PATH"].strip(" ").split(
                                "/")[-1].split("_")
                            # Want to derive the spectra storage key from the file name, this strips off some
                            #  unnecessary info
                            sp_key = line["SPEC_PATH"].strip(" ").split(
                                "/")[-1].split('ra')[-1].split('_spec.fits')[0]

                            # If its not an AnnularSpectra fit then we can just fetch the spectrum from the source
                            #  the normal way
                            if not ann_fit:
                                # This adds ra back on, and removes any ident information if it is there
                                sp_key = 'ra' + sp_key
                                # Finds the appropriate matching spectrum object for the current table line
                                spec = s.get_products("spectrum",
                                                      sp_info[0],
                                                      sp_info[1],
                                                      extra_key=sp_key)[0]
                            else:
                                obs_order.append([sp_info[0], sp_info[1]])
                                ann_id = int(
                                    sp_key.split("_ident")[-1].split("_")[1])
                                sp_key = 'ra' + sp_key.split('_ident')[0]
                                first_part = sp_key.split('ri')[0]
                                second_part = "_" + "_".join(
                                    sp_key.split('ro')[-1].split("_")[1:])

                                ann_sp_key = first_part + "ar" + "_".join(
                                    radii[ind].value.astype(str)) + second_part
                                ann_specs = s.get_products(
                                    "combined_spectrum", extra_key=ann_sp_key)
                                if len(ann_specs) > 1:
                                    raise MultipleMatchError(
                                        "I have found multiple matches for that AnnularSpectra, "
                                        "this is the developers fault, not yours."
                                    )
                                elif len(ann_specs) == 0:
                                    raise NoMatchFoundError(
                                        "Somehow I haven't found the AnnularSpectra that you "
                                        "fitted, this is the developers fault, not yours"
                                    )
                                else:
                                    ann_spec = ann_specs[0]
                                    spec = ann_spec.get_spectra(
                                        ann_id, sp_info[0], sp_info[1])

                            # Adds information from this fit to the spectrum object.
                            spec.add_fit_data(
                                str(model), line,
                                res_table["PLOT" + str(line_ind + 1)])

                            # The add_fit_data method formats the luminosities nicely, so we grab them back out
                            #  to help grab the luminosity needed to pass to the source object 'add_fit_data' method
                            processed_lums = spec.get_luminosities(model)
                            if spec.instrument not in inst_lums:
                                inst_lums[spec.instrument] = processed_lums

                        # Ideally the luminosity reported in the source object will be a PN lum, but its not impossible
                        #  that a PN value won't be available. - it shouldn't matter much, lums across the cameras are
                        #  consistent
                        if "pn" in inst_lums:
                            chosen_lums = inst_lums["pn"]
                        # mos2 generally better than mos1, as mos1 has CCD damage after a certain point in its life
                        elif "mos2" in inst_lums:
                            chosen_lums = inst_lums["mos2"]
                        else:
                            chosen_lums = inst_lums["mos1"]

                        if ann_fit:
                            ann_results[spec.annulus_ident] = global_results
                            ann_lums[spec.annulus_ident] = chosen_lums
                            ann_obs_order[spec.annulus_ident] = obs_order

                        elif not ann_fit:
                            # Push global fit results, luminosities etc. into the corresponding source object.
                            s.add_fit_data(model, global_results, chosen_lums,
                                           sp_key)

                elif len(res_set
                         ) != 0 and res_set[1] and run_type == "conv_factors":
                    res_table = pd.read_csv(res_set[0],
                                            dtype={
                                                "lo_en": str,
                                                "hi_en": str
                                            })
                    # Gets the model name from the file name of the output results table
                    model = res_set[0].split("_")[-3]

                    # We can infer the storage key from the name of the results table, just makes it easier to
                    #  grab the correct spectra
                    storage_key = res_set[0].split('/')[-1].split(
                        s.name)[-1][1:].split(model)[0][:-1]

                    # Grabs the ObsID+instrument combinations from the headers of the csv. Makes sure they are unique
                    #  by going to a set (because there will be two columns for each ObsID+Instrument, rate and Lx)
                    # First two columns are skipped because they are energy limits
                    combos = list(
                        set([c.split("_")[1] for c in res_table.columns[2:]]))
                    # Getting the spectra for each column, then assigning rates and lums
                    for comb in combos:
                        spec = s.get_products("spectrum",
                                              comb[:10],
                                              comb[10:],
                                              extra_key=storage_key)[0]
                        spec.add_conv_factors(
                            res_table["lo_en"].values,
                            res_table["hi_en"].values,
                            res_table["rate_{}".format(comb)].values,
                            res_table["Lx_{}".format(comb)].values, model)

                elif len(res_set) != 0 and not res_set[1]:
                    for err in res_set[2]:
                        raise XSPECFitError(err)

            if ann_fit:
                # We fetch the annular spectra object that we just fitted, searching by using the set ID of
                #  the last spectra that was opened in the loop
                ann_spec = s.get_annular_spectra(set_id=spec.set_ident)
                try:
                    ann_spec.add_fit_data(model, ann_results, ann_lums,
                                          ann_obs_order)

                    # The most likely reason for running XSPEC fits to a profile is to create a temp. profile
                    #  so we check whether constant*tbabs*apec (single_temp_apec function)has been run and if so
                    #  generate a Tx profile automatically
                    if model == "constant*tbabs*apec":
                        temp_prof = ann_spec.generate_profile(
                            model, 'kT', 'keV')
                        s.update_products(temp_prof)

                        # Normalisation profiles can be useful for many things, so we generate them too
                        norm_prof = ann_spec.generate_profile(
                            model, 'norm', 'cm^-5')
                        s.update_products(norm_prof)

                        if 'Abundanc' in ann_spec.get_results(
                                0, 'constant*tbabs*apec'):
                            met_prof = ann_spec.generate_profile(
                                model, 'Abundanc', '')
                            s.update_products(met_prof)

                    else:
                        raise NotImplementedError(
                            "How have you even managed to fit this model to a profile?! Its not"
                            " supported yet.")
                except ValueError:
                    warnings.warn(
                        "{src} annular spectra profile fit was not successful".
                        format(src=ann_spec.src_name))

        # If only one source was passed, turn it back into a source object rather than a source
        # object in a list.
        if len(sources) == 1:
            sources = sources[0]
        return sources
Ejemplo n.º 29
0
def gal_preprocess(zmin, zmax):

    lens = fitsio.read("lens.fits",
                       columns=["ra_gal", "dec_gal", "observed_redshift_gal"])
    ra, dec, z = lens["ra_gal"], lens["dec_gal"], lens["observed_redshift_gal"]
    mask = (z > zmin) & (z < zmax)
    ra, dec, z = ra[mask], dec[mask], z[mask]
    print("length of the catalog after applying the cut", len(ra))
    coord = np.vstack([ra, dec]).T
    centers = np.loadtxt("flagship_jk_centers_v2.txt")
    NJK = centers.shape[0]
    print("Segmentation begins!")
    gal_labels_jk = kmeans_radec.find_nearest(coord, centers)
    print("Done with assigning jacknife labels to galaxies")

    gals = {"RA": ra, "DEC": dec, "redshift": z, "JK_LABEL": gal_labels_jk}

    fits = FITS('data/gal_zmax_' + str(zmax) + '.fits', 'rw')
    fits.write(gals, names=["RA", "DEC", "redshift", "JK_LABEL"])
    fits.close()

    for jk in range(len(centers)):

        gal_jk_mask = gals["JK_LABEL"] != jk
        gal_jk = {
            "RA": ra[gal_jk_mask],
            "DEC": dec[gal_jk_mask],
            "redshift": z[gal_jk_mask]
        }

        fits = FITS('data/gal_zmax_' + str(zmax) + '_jk_' + str(jk) + '.fits',
                    'rw')
        fits.write(gal_jk, names=["RA", "DEC", "redshift"])
        fits.close()

    return None