Example #1
0
    def fromfits(cls, f):
        mbsc = cls()

        sc = np.squeeze(f['ARRAYDATA-MBFITS'].data.DATA)
        ts_mjd = f['DATAPAR-MBFITS'].data['MJD']
        ts_az  = f['DATAPAR-MBFITS'].data['AZIMUTH']
        ts_el  = f['DATAPAR-MBFITS'].data['ELEVATIO']
        ts_daz = f['DATAPAR-MBFITS'].data['LONGOFF'] * 3600.0 # arcsec
        ts_del = f['DATAPAR-MBFITS'].data['LATOFF'] * 3600.0 # arcsec
        alist = [ts_mjd, ts_az, ts_el, ts_daz, ts_del]
        names = ['MJD', 'AZ', 'EL', 'DAZ', 'DEL']
        dtypes = ['f8', 'f8', 'f8', 'f8', 'f8']
        record = np.rec.fromarrays(alist, zip(names, dtypes))

        hdu_data = fits.PrimaryHDU(sc)
        hdu_record = fits.BinTableHDU(record)

        hdu_data.header['ORIGIN'] = os.path.basename(f.filename())
        hdu_data.header['EXTNAME'] = 'DATA'
        hdu_record.header['EXTNAME'] = 'RECORD'

        mbsc.append(hdu_data)
        mbsc.append(hdu_record)

        return mbsc
Example #2
0
 def write(self, outfile):
     """
     Write the current object catalog to fits file.
     """
         
     hdu = pyfits.BinTableHDU(self.data)
     hdu.writeto(outfile, clobber=True)
Example #3
0
    def __init__(self, hdu=None):
        """
        An LDAC table can be instantiated either as am empty table
        or with a pyfits BinaryTable HDU (existing table).
        """

        if hdu == None:
            self.hdu = pyfits.BinTableHDU()
            self.hdu.data = None

            # We make sure that the table has 'some' proper name:
            self.hdu.name = "DEFAULT"
        else:
            self.hdu = hdu
Example #4
0
File: lsd.py Project: adrn/PTF
def saveLightCurvesRadial(ra,
                          dec,
                          filename="",
                          radius=10,
                          overwrite=False,
                          skip=False):
    """ Given one set of coordinates, download all
        light curves around x arcminutes from the cluster center

        Parameters
        -----------
        filename : string
            The name of the output file
        ra : float, apwlib.geometry.RA
            A Right Ascension in decimal degrees
        dec : float, apwlib.geometry.Dec
            A Declination in decimal degrees
        radius : float, optional
            Radius in arcminutes to search for light curves around the given ra, dec
        overwrite : bool, optional
            If a pickle exists, do you want to overwrite it?
    """
    ra = c.parseDegrees(ra)
    dec = c.parseDegrees(dec)
    radiusDegrees = radius / 60.0

    logging.debug("{0},{1} with radius={2} deg".format(ra, dec, radiusDegrees))

    if filename.strip() == "":
        outputFilename = os.path.join(
            "data", "lightcurves",
            "{0}_{1}.fits".format(ra.degrees, dec.degrees))
    else:
        outputFilename = filename
    logging.debug("Output file: {0}".format(outputFilename))

    if os.path.exists(outputFilename) and not overwrite:
        raise IOError("{0} already exists!".format(outputFilename))
    elif os.path.exists(outputFilename) and overwrite:
        logging.debug("You've chosen to overwrite the file!")
        os.remove(outputFilename)
        logging.debug("File deleted.")

    lightCurves = getLightCurvesRadial(ra, dec, radiusDegrees)

    hdu = pf.BinTableHDU(lightCurves)
    hdu.writeto(outputFilename)

    return
Example #5
0
def mwrfits(mydict, filename, clobber=False, ascii=False):
    """
    Write an dictionary as a binary table in a FITS file.

    Shape order is reverted to satisfy the FITS conventions
    (quick axis is first in FITS and second in python)

    Arguments
    ---------
    mydict: dict of ndarrays
    filename: string
      The name of the written fits file.
    clobber: bool
       Overwrites the output file if True.

    Returns
    --------
    Returns nothing.
    
    Exceptions
    ---------
    ValueError if mydict is not a dict of arrays
    """
    # check that dict contains only arrays
    for k, v in mydict.iteritems():
        if not isinstance(v, np.ndarray):
            raise ValueError("Expected a dict of arrays.")

    # convert dict of ndarray as an array of size 1
    mydtype = list()
    for k, v in mydict.iteritems():
        mydtype.append((k, str(v.shape) + v.dtype.str))
    arr = np.zeros(1, dtype=mydtype)
    for k, v in mydict.iteritems():
        arr[k] = v
    if ascii:
        raise NotImplemented()  # hdu = pyfits.TableHDU(arr) # not working !
    else:
        hdu = pyfits.BinTableHDU(arr)
    # shape order is reverted to satisfy the FITS conventions
    # (quick axis is first in FITS and second in python)
    for i, v in enumerate(mydict.values()):
        hdu.header.update('TDIM' + str(i + 1), str(v.shape[::-1]))
    hdu.writeto(filename, clobber=clobber)
Example #6
0
 def _get(self, key):
     '''Attempts to load the right index and populate a new value'''
     tmp = Cat(self.filename, setup=False)
     # Convert a single integer key into a slice
     if isinstance(key,int):
         if key < 0:
             key += len(self)
         key = slice(key,key+1)
     # tmp.fits = pyfits.new_table(self.data[key].columns)
     tmp.fits = pyfits.BinTableHDU(self.data[key],
                                   header=self.header,
                                   name=self.name)
     tmp.name = self.name
     tmp.data = tmp.fits.data
     tmp.columns = tmp.fits.data.columns
     tmp.header = self.header
     tmp.names = self.names
     
     return tmp
Example #7
0
File: IO.py Project: kvyh/kpy
def write_cube(spectra):
    """Create a FITS file with all spectra written."""

    recarr = convert_spectra_to_recarray(spectra)

    img, img2 = convert_spectra_to_img(spectra, CRVAL1, CRPIX1)

    f1 = pf.PrimaryHDU(img)
    f2 = pf.ImageHDU(np.zeros((10, 10)))
    t3 = pf.BinTableHDU(recarr)
    f4 = pf.ImageHDU(img2)
    f5 = pf.ImageHDU(np.zeros((10, 10)))

    f4.header['CRVAL1'] = CRVAL1
    f4.header['CRPIX1'] = -CRPIX1
    f4.header['CTYPE1'] = 'WAVE-LOG'
    f4.header['CUNIT1'] = 'NM'

    towrite = pf.HDUList([f1, f2, t3, f4, f5])
    towrite.writeto('test.fits', clobber=True)
Example #8
0
def precomputeCoordinates(infile, outfile):
    import numpy.lib.recfunctions as rec
    hdu = pyfits.open(infile)
    data = hdu[1].data
    columns = hdu[1].columns
    names = [n.lower() for n in hdu[1].data.names]

    if 'glon' not in names and 'glat' not in names:
        logger.info("Writing 'GLON' and 'GLAT' columns")
        glon, glat = ugali.utils.projector.celToGal(data['RA'], data['DEC'])
        out = rec.append_fields(data,['GLON','GLAT'],[glon,glat],
                                usemask=False,asrecarray=True)
    elif 'ra' not in names and 'dec' not in names:
        logger.info("Writing 'RA' and 'DEC' columns")
        ra, dec = ugali.utils.projector.galToCel(data['GLAT'], data['GLON'])
        out = rec.append_fields(data,['RA','DEC'],[ra,dec],
                                usemask=False,asrecarray=True)
    
    hdu_out = pyfits.BinTableHDU(out)
    hdu_out.writeto(outfile, clobber=True)
Example #9
0
def select_rows(tbhdu, indices):
    """
    Read rows(indexes) from given HDU (catalog)
    
    A new table with only the asked table indexes,
    'indices', is output.
    
    Input:
     - tbhdu     [BinTableHDU] : FITS table HDU
     - indices          [int,] : List of indexes to read from tbhdu
    
    Output:
     -> (new) BinTableHDU : sub-selection (rows) of tbhdu

    ---
    """
    
    data = tbhdu.data.take(indices);

    return pyfits.BinTableHDU(data);
Example #10
0
    def _parsercp(rcpfile):
        with open(rcpfile) as f:
            d = json.load(f, object_pairs_hook=OrderedDict)

        pixel = np.array(d.keys(), int)
        g_pnt = np.array([d[key]['Gain_point'] for key in d], float)
        g_ext = np.array([d[key]['Gain_extended'] for key in d], float)
        p_daz = -np.array([d[key]['dAz'] for key in d], float)
        p_del = -np.array([d[key]['dEl'] for key in d], float)

        alist = [pixel, p_daz, p_del, g_pnt, g_ext]
        names = ['PIXEL', 'DAZ', 'DEL', 'GAIN_PNT', 'GAIN_EXT']
        dtypes = ['i8', 'f8', 'f8', 'f8', 'f8']
        rcp = np.rec.fromarrays(alist, zip(names, dtypes))

        hdu_rcp = fits.BinTableHDU(rcp)
        hdu_rcp.header['EXTNAME'] = 'RCP'
        hdu_rcp.header['ORIGIN'] = os.path.basename(rcpfile)

        return hdu_rcp
Example #11
0
def bootstrap_realization(realization, filename, photonList, photonData,
                          counts):
    """Create random samples of the photon list and write them to fits files"""

    np.random.seed()
    indices = np.random.randint(0, counts, size=counts)
    photons = np.sort(photonList[indices])
    bootstrapHDU = pyfits.BinTableHDU(photonData[1].data[photons],
                                      photonData[1].header)

    bootstrapHDU.header[NAXIS2] = counts
    bootstrapList = pyfits.HDUList(
        [photonData[0], bootstrapHDU, photonData[2]])
    # Here we assume filename ends in .fits
    fn, ext = os.path.splitext(filename)
    outfile = '%s_bs_%d%s' % (fn, realization, ext)

    if os.path.isfile(outfile):
        os.remove(outfile)

    bootstrapList.writeto(outfile)
Example #12
0
    def writein(self, fitsname):
        f = pyfits.open(fitsname)
        newcol = pyfits.Column(name='EFFICIENCY_PARS',
                               format='6E',
                               array=self.final_results)
        newdata = pyfits.FITS_rec.from_columns([newcol])
        table_hdu = pyfits.BinTableHDU(newdata, name='EFFICIENCY_PARAMS')
        headkeys = []
        for key in [
                'EXTNAME', 'TELESCOP', 'INSTRUME', 'FILTER', 'HDUCLASS',
                'HDUCLAS1', 'HDUCLAS2', 'HDUVERS', 'CCLS0001', 'CDTP0001',
                'CCNM0001', 'CBD10001', 'CBD20001', 'CBD30001', 'CBD40001',
                'CBD50001', 'CBD60001', 'CBD70001', 'CBD80001', 'CBD90001',
                'CVSD0001', 'CVST0001', 'CDES0001'
        ]:
            headkeys.append([key, f[3].header[key]])
        f[3] = table_hdu
        for pars in headkeys:
            f[3].header[pars[0]] = pars[1]

        f.writeto(fitsname, clobber=True, checksum=True)
        f.close()
Example #13
0
    def write_fits(self):
        """
        Write the FITS log
        """

        import time
        import getpass

        formats = {}
        formats['bool'] = 'L'
        formats['int16'] = 'I'
        formats['int32'] = 'J'
        formats['int64'] = 'K'
        formats['float32'] = 'E'
        formats['float64'] = 'D'

        formats['>i8'] = 'K'
        formats['>f8'] = 'D'

        #### Make the table columns, translating numpy data types to "TFORM"
        coldefs = []
        TFORM = 'A' + str(np.array(self.images).dtype).split('S')[1]

        coldefs.append(
            pyfits.Column(name='images',
                          array=np.array(self.images),
                          format=TFORM))

        for column in self.params.keys():
            if column == 'comment':
                coldata = np.array(self.params['comment'])
            else:
                coldata = self.params[column]
            #
            dtype = str(coldata.dtype)
            #print column, dtype
            if dtype in formats.keys():
                TFORM = formats[dtype]
            else:
                if 'S' not in dtype:
                    print 'Unrecognized data type in: %s' % (dtype)
                    return False
                #
                TFORM = 'A' + dtype.split('S')[1]
            #
            #data = self.params[column]
            if '>' in dtype:
                cast_types = {'>i8': np.int64, '>f8': np.float64}
                coldata = np.cast[cast_types[dtype]](coldata)
            #
            coldefs.append(
                pyfits.Column(name=column, array=coldata, format=TFORM))

        #### Done, now make the binary table
        if pyfits.__version__ < '3.3':
            tbhdu = pyfits.new_table(coldefs)
        else:
            tbhdu = pyfits.BinTableHDU().from_columns(coldefs)

        linehdu = pyfits.ImageHDU(data=self.marked_lines, name='LINELIST')

        #### Primary HDU
        hdu = pyfits.PrimaryHDU()
        thdulist = pyfits.HDUList([hdu, tbhdu, linehdu])

        #### Add modification time of "infile" to FITS header
        infile_mod_time = time.strftime(
            "%m/%d/%Y %I:%M:%S %p",
            time.localtime())  # os.path.getmtime(self.filename)))

        if pyfits.__version__ < '3.3':
            thdulist[0].header.update('MODTIME', infile_mod_time)
            thdulist[0].header.update('USER', getpass.getuser())
        else:
            thdulist[0].header['MODTIME'] = infile_mod_time
            thdulist[0].header['USER'] = getpass.getuser()

        thdulist.writeto(self.logfile, clobber=True)

        print 'Log to file %s' % (self.logfile)
Example #14
0
    new.write(DirIni + '/' + nameOutput)
else:
    print 'Merged file already found.'

#Generate tables for each tomographic bin

#TomoBin=[0.101, 0.301, 0.501, 0.701, 0.901, 10]
TomoBin = [0.101, 0.301, 0.501, 0.701, 0.901, 1.201, 2.001]
print 'Splitting the merged catalogue into different tomographic bins'
for kk in range(0, len(TomoBin) - 1):
    t = pyfits.open(DirIni + '/' + nameOutput)
    tbdata = t[1].data

    ## Old 4-band photometry
    mask = (tbdata['ZB4_in'] >= TomoBin[kk]) & (tbdata['ZB4_in'] <
                                                TomoBin[kk + 1])
    newtbdata = tbdata[mask]
    hdu = pyfits.BinTableHDU(data=newtbdata)
    print DirIni, kk
    hdu.writeto(DirIni + '/' + 'MasterCat_Tomo4Bin_' + str(kk + 1) + '.fits',
                clobber=True)

    ## New 9-bandp photometry
    mask = (tbdata['ZB9_in'] >= TomoBin[kk]) & (tbdata['ZB9_in'] <
                                                TomoBin[kk + 1])
    newtbdata = tbdata[mask]
    hdu = pyfits.BinTableHDU(data=newtbdata)
    print DirIni, kk
    hdu.writeto(DirIni + '/' + 'MasterCat_Tomo9Bin_' + str(kk + 1) + '.fits',
                clobber=True)
Example #15
0
def main(rerun, dataIds, fakes, root='/lustre/Subaru/SSP', rad=10):

    doCoadd = 'tract' in dataIds[0].keys()
    butler = dafPer.Butler(os.path.join(root, "rerun", rerun))

    #read in fits file, replace with txt file or anything else
    fits = pyfits.open(fakes)
    data = fits[1].data
    radecCat = loadRaDec(data)
    ndata = len(data)
    datamask = np.ones(ndata, dtype=bool)
    ids = data["ID"] if "ID" in data.names else range(len(data))
    idDict = dict(zip(ids, xrange(ndata)))

    for dataId in dataIds:
        print dataId
        try:
            sources = butler.get('deepCoadd_src' if doCoadd else 'src',
                                 dataId,
                                 immediate=True,
                                 flags=afwTable.SOURCE_IO_NO_FOOTPRINTS)
            cal_md = butler.get('deepCoadd_md' if doCoadd else 'calexp_md',
                                dataId,
                                immediate=True)
            calexp = butler.get('deepCoadd' if doCoadd else 'calexp',
                                dataId,
                                immediate=True)
        except:
            print "skipping", dataId
            continue

        if False:
            matches = afwTable.matchRaDec(sources, radecCat,
                                          3.3 * afwGeom.arcseconds)
            for (src, fake, d) in matches:
                datamask[idDict[fake.getId()]] = False

        msk = calexp.getMaskedImage().getMask()
        detected = msk.clone()
        detected &= msk.getPlaneBitMask("DETECTED")
        wcs = calexp.getWcs()
        count, good_count = 0, 0
        for i_d, datum in enumerate(radecCat):
            pixCoord = afwGeom.Point2I(wcs.skyToPixel(datum.getCoord()))
            pixBox = afwGeom.BoxI(pixCoord, afwGeom.Extent2I(1, 1))
            pixBox.grow(rad)
            pixBox.clip(calexp.getBBox(afwImage.PARENT))
            if pixBox.isEmpty():
                continue
            else:
                count += 1
                subMask = afwImage.MaskU(detected, pixBox, afwImage.PARENT)
                if sum(subMask.getArray().ravel()) != 0:
                    datamask[i_d] = False
                else:
                    good_count += 1
        print count, good_count

    newdata = data[datamask]
    print ndata, len(newdata)
    hdu = pyfits.BinTableHDU(newdata)
    hdu.writeto('blank_sources.fits', clobber=True)
Example #16
0
def solve(args):
    """ 
    Calculate posterior distributions for model parameters given the data.
    """

    # Make some checks
    _check_analysis_args(args)

    # Load the model and the data
    model = sick.models.Model(args.model)
    all_spectra = _parse_and_load_spectra(args)

    # Display some information about the model
    logger.info("Model information: {0}".format(model))
    logger.info("Configuration:")
    map(logger.info, yaml.dump(model.configuration).split("\n"))

    # Define headers that we want in the results filename
    default_headers = (
        "RA",
        "DEC",
        "COMMENT",
        "ELAPSED",
        "FIBRE_NUM",
        "LAT_OBS",
        "LONG_OBS",
        "MAGNITUDE",
        "NAME",
        "OBJECT",
        "RO_GAIN",
        "RO_NOISE",
        "UTEND",
        "UTDATE",
        "UTSTART",
    )
    default_metadata = {
        "model": model.hash,
        "input_filenames": ", ".join(args.spectrum_filenames),
        "sick_version": sick.__version__,
    }

    if args.read_from_filename:
        with open(args.spectrum_filenames[0], "r") as fp:
            all_filenames = map(str.strip, fp.readlines())

    # For each source, solve
    for i, spectra in enumerate(all_spectra, start=1):

        # Force spectra as a list
        if not isinstance(spectra, (list, tuple)):
            spectra = [spectra]

        logger.info("Starting on object #{0} (RA {1}, DEC {2} -- {3})".format(
            i, spectra[0].headers.get("RA", "None"),
            spectra[0].headers.get("DEC", "None"),
            spectra[0].headers.get("OBJECT", "Unknown")))

        # Create metadata and put header information in
        if args.skip > i - 1:
            logger.info("Skipping object #{0}".format(i))
            continue

        if args.number_to_solve != "all" and i > (int(args.number_to_solve) +
                                                  args.skip):
            logger.info("We have analysed {0} spectra. Exiting..".format(
                args.number_to_solve))
            break

        # If there are many spectra to analyse, include the run ID in the output filenames.
        # Update filename prefix if we are reading from a file
        if args.read_from_filename:
            filename_prefix = sick.utils.default_output_prefix(
                all_filenames[i].split())

        else:
            filename_prefix = args.filename_prefix

        if len(all_spectra) > 1 and not args.read_from_filename:
            output = lambda x: os.path.join(
                args.output_dir, "-".join([filename_prefix,
                                           str(i), x]))
        else:
            output = lambda x: os.path.join(args.output_dir, "-".join(
                [filename_prefix, x]))

        # Does a solution already exist for this star? If so are we authorised to clobber it?
        if os.path.exists(output("result.json")):
            if not args.clobber:
                logger.info("Skipping object #{0} as a results file already exists"\
                    " ({1}) and we have been asked not to clobber it".format(i,
                        output("result.json")))
                continue
            else:
                logger.warn("Overwriting existing file {0}".format(
                    output("result.json")))

        metadata = {}
        header_columns = []
        for header in default_headers:
            if header not in spectra[0].headers: continue
            header_columns.append(header)
            metadata[header] = spectra[0].headers[header]

        metadata.update({"run_id": i})
        metadata.update(default_metadata)

        # Determine an initial point
        try:
            initial_theta, initial_r_chi_sq = model.initial_theta(spectra)

        except:
            logger.exception("Failed to get initial point")
            if args.debug: raise
            continue


        logger.info("Initial theta point with reduced chi_sq = {0:.2f} is {1}"\
            .format(initial_r_chi_sq, model._dictify_theta(initial_theta)))

        # Save metadata about the initial point
        metadata["initial_theta"] = model._dictify_theta(initial_theta)
        metadata["initial_r_chi_sq"] = initial_r_chi_sq

        # Produce a plot projecting the initial value
        if args.plotting:
            projected_filename = output("projected-initial-theta.{}".format(
                args.plot_format))

            fig = sick.plot.projection(model, spectra, theta=initial_theta)
            fig.savefig(projected_filename)
            logger.info("Created figure {}".format(projected_filename))

        # Optimise the point
        if model.configuration["settings"]["optimise"]:
            optimised_theta, optimised_r_chi_sq, info = model.optimise(
                spectra,
                initial_theta=initial_theta,
                fixed=["z"] + ["z.{}".format(c) for c in model.channels])

            logger.info("Optimised theta is {0}".format(
                model._dictify_theta(optimised_theta)))
            walker_theta = optimised_theta

            # Save metadata about the optimised value
            metadata["optimised_theta"] = model._dictify_theta(optimised_theta)
            metadata["optimised_r_chi_sq"] = optimised_r_chi_sq

            if args.plotting:
                projected_filename = output(
                    "projected-optimised-theta.{}".format(args.plot_format))

                fig = sick.plot.projection(model,
                                           spectra,
                                           theta=optimised_theta)
                fig.savefig(projected_filename)
                logger.info("Created figure {}".format(projected_filename))

        else:
            # MCMC initial point will be the initial point
            walker_theta = initial_theta

        try:
            posteriors, sampler, info = model.infer(spectra,
                                                    theta=walker_theta)

        except:
            logger.exception("Failed to analyse source #{0}:".format(i))
            if args.debug: raise

        else:

            # Update results with the posteriors
            logger.info("Posteriors:")
            max_parameter_len = max(map(len, model.parameters))
            for parameter in model.parameters:
                posterior_value, pos_uncertainty, neg_uncertainty = posteriors[
                    parameter]
                logger.info("    {0}: {1:.2e} ({2:+.2e}, {3:+.2e})".format(
                    parameter.rjust(max_parameter_len), posterior_value,
                    pos_uncertainty, neg_uncertainty))

                metadata.update({
                    parameter:
                    posterior_value,
                    "u_maxabs_{0}".format(parameter):
                    np.abs([neg_uncertainty, pos_uncertainty]).max(),
                    "u_pos_{0}".format(parameter):
                    pos_uncertainty,
                    "u_neg_{0}".format(parameter):
                    neg_uncertainty,
                })

            # Save information related to the analysis
            metadata.update(
                dict(
                    zip(["mean_snr_{}".format(c) for c in model.channels], [
                        np.nanmean(s.flux / (s.variance**0.5)) for s in spectra
                    ])))

            chain_filename = output("chain.fits")
            metadata.update({
                "reduced_chi_sq":
                info["reduced_chi_sq"],
                "maximum_log_probability":
                np.nanmax(info["lnprobability"]),
                "chain_filename":
                chain_filename if args.save_chain_files else "",
                "time_elapsed":
                info["time_elapsed"],
                "final_mean_acceptance_fraction":
                info["mean_acceptance_fractions"][-1],
                "model_configuration":
                model.configuration
            })
            for channel, length in info["autocorrelation_times"].iteritems():
                metadata["tau_{}".format(channel)] = length

            walkers = model.configuration["settings"]["walkers"]
            chain_length = info["chain"].shape[0] * info["chain"].shape[1]
            chain = np.core.records.fromarrays(
                np.vstack([
                    np.arange(1, 1 + chain_length),
                    np.arange(1, 1 + chain_length) % walkers,
                    info["chain"].reshape(-1, len(model.parameters)).T,
                    info["lnprobability"].reshape(-1, 1).T
                ]),
                names=["Iteration", "Sample"] + model.parameters +
                ["ln_probability"],
                formats=["i4", "i4"] + ["f8"] * (1 + len(model.parameters)))

            # Save the chain
            if args.save_chain_files:
                logger.info("Saving chains to {0}".format(chain_filename))
                primary_hdu = pyfits.PrimaryHDU()
                table_hdu = pyfits.BinTableHDU(chain)
                hdulist = pyfits.HDUList([primary_hdu, table_hdu])
                hdulist.writeto(chain_filename, clobber=True)

            else:
                logger.warn("Chain not saved to disk.")

            # Write the result to disk
            logger.info("Saving results to {0}".format(output("result.json")))
            with open(output("result.json"), "wb+") as fp:
                json.dump(metadata, fp, indent=2)

            # Close sampler pool
            if model.configuration["settings"].get("threads", 1) > 1:
                sampler.pool.close()
                sampler.pool.join()

            # Save sampler state
            with open(output("model.state"), "wb+") as fp:
                pickle.dump([
                    sampler.chain[:, -1, :], sampler.lnprobability[:, -1],
                    sampler.random_state
                ], fp, -1)

            # Plot results
            if args.plotting:

                # Plot the mean acceptance fractions
                acceptance_plot_filename = output("acceptance.{0}".format(
                    args.plot_format))
                fig = sick.plot.acceptance_fractions(
                    info["mean_acceptance_fractions"],
                    burn_in=model.configuration["settings"]["burn"])
                fig.savefig(acceptance_plot_filename)
                logger.info(
                    "Created figure {0}".format(acceptance_plot_filename))

                # Plot the chains
                chain_plot_filename = output("chain.{0}".format(
                    args.plot_format))
                fig = sick.plot.chains(
                    info["chain"],
                    labels=sick.utils.latexify(model.parameters),
                    truth_color='r',
                    burn_in=model.configuration["settings"]["burn"],
                    truths=[posteriors[p][0] for p in model.parameters])
                fig.savefig(chain_plot_filename)
                logger.info("Created figure {0}".format(chain_plot_filename))

                # Make a corner plot with just the astrophysical parameters
                corner_plot_filename = output("corner.{0}".format(
                    args.plot_format))
                indices = np.arange(len(model.grid_points.dtype.names))
                fig = sick.plot.corner(
                    sampler.chain.reshape(-1, len(model.parameters))[:,
                                                                     indices],
                    labels=sick.utils.latexify(model.grid_points.dtype.names),
                    truth_color='r',
                    quantiles=[.16, .50, .84],
                    verbose=False,
                    truths=[
                        posteriors[p][0] for p in model.grid_points.dtype.names
                    ])
                fig.savefig(corner_plot_filename)
                logger.info("Created figure {0}".format(corner_plot_filename))

                # Plot the autocorrelation
                autocorrelation_filename = output(
                    "auto-correlation.{0}".format(args.plot_format))
                fig = sick.plot.autocorrelation(sampler.chain)
                fig.savefig(autocorrelation_filename)
                logger.info(
                    "Created figure {0}".format(autocorrelation_filename))

                # Plot some spectra
                pp_spectra_plot_filename = output(
                    "projected-spectra.{0}".format(args.plot_format))
                fig = sick.plot.projection(model, spectra, chain=sampler.chain)
                fig.savefig(pp_spectra_plot_filename)
                logger.info(
                    "Created figure {0}".format(pp_spectra_plot_filename))

                # Closing the figures isn't enough; matplotlib leaks memory
                plt.close("all")

            # Delete some things
            del sampler, chain
            if args.save_chain_files:
                del primary_hdu, table_hdu, hdulist

    logger.info("Fin.")
    return True
Example #17
0
datadir = '/work/bow/data/'

#file = ['lat_photon_weekly_w0%d_p302_v001.fits'%(i+10) for i in range(90)]
file = ['lat_photon_weekly_w%d_p302_v001.fits'%(i) for i in range(100,400)]

#file = pyfits.open(datadir+'lat_photon_weekly_w200_p302_v001.fits')
#photon = file[1].data


for f in range(len(file)):
	
	fileph = pyfits.open(datadir+file[f])
	photon = fileph[1].data
	
	
	energy = photon.field('ENERGY') > 8000  
	energy_cut = photon[energy]
	
	print len(energy_cut)
	
	zenith = energy_cut.field('ZENITH_ANGLE') > 100
	zenith_cut = energy_cut[zenith]
	
	print len(zenith_cut)
	
		
	hdu = pyfits.BinTableHDU(zenith_cut)
	#hdu.writeto('select_photon_w%d.fits'%(f+10))
	hdu.writeto('select_photon_w%d.fits'%(f+100))

Example #18
0
                        scaledata = [bzero3, bscale3, bzero4, bscale4]
                    except:
                        #scaledata = [32768,1,2147483648,1]
                        scaledata = [0, 1, 0, 1]

                    data1 = hitrandom(table.header['NHITS'], NS, FS, scaledata,
                                      table.header['BEAMPOL'])
                    data2 = data1.data
                    of[ind].header = hdr
                    of[ind].data = data2
                    #pyfits.append(outfile,data2,hdr)
                    #pyfits.update(outfile,data2,hdr,ind)
                else:
                    print "Error1"
            elif any(elem in table.header['EXTNAME'] for elem in delim):
                hdr1 = pyfits.BinTableHDU()
                hdr1 = table
                #pyfits.append(outfile,hdr1.data,hdr1.header)
                #print table.header['EXTNAME']
                nSteps += 1
            else:
                print "Error2"
        except:
            print "Error3"
            pass

    of.verify()
    of.writeto(outfile, clobber=True)
    of.close()

    #To test
Example #19
0
def aggregate(args):
    """ Aggregate JSON-formatted results into a single tabular FITS file. """

    if os.path.exists(args.output_filename):
        if not args.clobber:
            raise IOError("output filename {0} already exists and we have been "\
                "asked not to clobber it".format(args.output_filename))
        else:
            logger.warn("Overwriting existing filename {0}".format(
                args.output_filename))

    # Let's just assume it all aggregates from JSON to a FITS filename
    results = []
    for filename in args.result_filenames:
        with open(filename, "r") as fp:
            try:
                results.append(json.load(fp))
            except:
                logger.exception(
                    "Could not read results filename {0}".format(filename))
                if args.debug: raise

            else:
                logging.debug(
                    "Successfully loaded results from {0}".format(filename))

    # Get header order and sort them
    columns = results[0].keys()

    sorted_columns = []
    # Logic: RA, DEC then all other uppercase fields in alphabetical order
    # Then any other fields that have associated u_* headers in alphabetical order, as
    # well as their u_* columns
    # Then all the others in alphabetical order
    if "RA" in columns:
        sorted_columns.append("RA")

    if "DEC" in columns:
        sorted_columns.append("DEC")

    uppercase_columns = []
    parameteral_columns = []
    for column in columns:
        if column.isupper() and column not in sorted_columns:
            uppercase_columns.append(column)
        elif "u_pos_{0}".format(column) in columns:
            parameteral_columns.append(column)

    uppercase_columns, parameteral_columns = map(
        sorted, [uppercase_columns, parameteral_columns])
    all_parameteral_columns = []
    variants = ("{0}", "u_pos_{0}", "u_neg_{0}", "u_maxabs_{0}")
    for column in parameteral_columns:
        all_parameteral_columns.extend(
            [variant.format(column) for variant in variants])

    sorted_columns.extend(uppercase_columns)
    sorted_columns.extend(all_parameteral_columns)

    other_columns = sorted(set(columns).difference(sorted_columns))
    ignore_columns = ("model_configuration", "optimised_theta",
                      "initial_theta")
    sorted_columns.extend(list(set(other_columns).difference(ignore_columns)))

    # Create data types
    formats = [("f8", "|S256")[isinstance(results[-1][each], (str, unicode))] \
        for each in sorted_columns]

    # Create table
    data = [[result.get(each, ["", np.nan][formats[i] == "f8"]) \
        for i, each in enumerate(sorted_columns)] for result in results]
    results_table = np.core.records.fromrecords(data,
                                                names=sorted_columns,
                                                formats=formats)

    # Write results to filename
    primary_hdu = pyfits.PrimaryHDU()
    table_hdu = pyfits.BinTableHDU(results_table)
    hdulist = pyfits.HDUList([primary_hdu, table_hdu])
    hdulist.writeto(args.output_filename, clobber=args.clobber)

    logger.info("Successfully written results from {0} sources with {1} fields"\
        " to {2}".format(len(results), len(results[0]), args.output_filename))
Example #20
0
def write_pcm(sfname,
              receiver_params,
              valid_chans,
              outname="pcm.fits",
              fake=False):
    #This method writes out the receiver parameters into a file that's readable/usable by PSRCHIVE.
    #Most of it is getting the header/data formatting correct, so it's not very enlightening, but it is useful.
    shdulist = pyfits.open(sfname)
    head = shdulist[0].header
    head['OBS_MODE'] = "PCM"
    nchan = head['OBSNCHAN']
    prihdu = pyfits.PrimaryHDU(header=head)
    new_hdulist = pyfits.BinTableHDU(header=shdulist[1].header,
                                     data=shdulist[1].data,
                                     name=shdulist[1].name)
    chans = np.linspace(head['OBSFREQ'] - head['OBSBW'] / 2.0,
                        head['OBSFREQ'] + head['OBSBW'] / 2.0,
                        nchan,
                        dtype='d')
    weights = np.zeros(nchan)
    weights[valid_chans] = 1
    Gs = weights.reshape(nchan, 1)
    expanded_receivers = np.zeros(nchan * 6).reshape(nchan, 6)
    vchan = 0
    for i in range(nchan):
        if i in valid_chans:
            expanded_receivers[i] = receiver_params[vchan, [5, 4, 2, 0, 3, 1]]
            vchan += 1

    params = np.append(Gs, expanded_receivers, axis=1)
    if not fake:
        params[params == 0] = np.NaN
    ncovar = 28
    ncov = ncovar * nchan
    covars = np.ones(ncov) * 0.0005
    write_params = np.concatenate(params)
    chisqs = np.ones(nchan)
    nfree = np.copy(weights)
    chans = np.asarray([chans])
    weights = np.asarray([weights])
    write_params = np.asarray([write_params])
    covars = np.asarray([covars])
    chisqs = np.asarray([chisqs])
    nfree = np.asarray([nfree])

    freqcol = pyfits.Column(name="DAT_FREQ", format="512D", array=chans)
    wtcol = pyfits.Column(name="DAT_WTS", format="512E", array=weights)
    datcol = pyfits.Column(name="DATA", format="3584E", array=write_params)
    covcol = pyfits.Column(name="COVAR", format="14336E", array=covars)
    chicol = pyfits.Column(name="CHISQ", format="512E", array=chisqs)
    freecol = pyfits.Column(name="NFREE", format="512J", array=nfree)

    cols = pyfits.ColDefs([freqcol, wtcol, datcol, covcol, chicol, freecol])

    feed_hdu = pyfits.BinTableHDU.from_columns(cols, name="FEEDPAR")
    feed_hdu.header.comments[
        'TTYPE1'] = '[MHz] Centre frequency for each channel'
    feed_hdu.header.comments['TFORM1'] = 'NCHAN doubles'
    feed_hdu.header.comments['TTYPE2'] = 'Weights for each channel'
    feed_hdu.header.comments['TFORM2'] = 'NCHAN floats'
    feed_hdu.header.comments['TTYPE3'] = 'Cross-coupling data'
    feed_hdu.header.comments['TFORM3'] = 'NCPAR*NCHAN floats'
    feed_hdu.header.comments['TTYPE4'] = 'Formal covariances of coupling data'
    feed_hdu.header.comments['TFORM4'] = 'NCOVAR*NCHAN floats'
    feed_hdu.header.comments[
        'TTYPE5'] = 'Total chi-squared (objective merit function)'
    feed_hdu.header.comments['TFORM5'] = 'NCHAN floats'
    feed_hdu.header.comments['TTYPE6'] = 'Number of degrees of freedom'
    feed_hdu.header.comments['TFORM6'] = 'NCHAN long (32-bit) integers'
    feed_hdu.header['CAL_MTHD'] = ('van04e18', 'Cross-coupling method')
    feed_hdu.header['NCPAR'] = ('7', 'Number of coupling parameters')
    feed_hdu.header['NCOVAR'] = ('28', 'Number of parameter covariances')
    feed_hdu.header['NCHAN'] = ('512', 'Nr of channels in Feed coupling data')
    feed_hdu.header['EPOCH'] = ('56038.3352', '[MJD] Epoch of calibration obs')
    feed_hdu.header['TUNIT1'] = ('MHz', 'Units of field')
    feed_hdu.header['TDIM3'] = ('(7,512)', 'Dimensions (NCPAR,NCHAN)')
    feed_hdu.header['TDIM4'] = ('(28,512)', 'Dimensions (NCOVAR,NCHAN)')
    feed_hdu.header['EXTVER'] = ('1', 'auto assigned by template parser')
    feed_hdu.header['PAR_0000'] = ('G', 'scalar gain')
    feed_hdu.header['PAR_0001'] = ('gamma',
                                   'differential gain (hyperbolic radians)')
    feed_hdu.header['PAR_0002'] = ('phi', 'differential phase (radians)')
    feed_hdu.header['PAR_0003'] = ('el0',
                                   'ellipticity of receptor 0 (radians)')
    feed_hdu.header['PAR_0004'] = ('or0',
                                   'orientation of receptor 0 (radians)')
    feed_hdu.header['PAR_0005'] = ('el1',
                                   'ellipticity of receptor 1 (radians)')
    feed_hdu.header['PAR_0006'] = ('or1',
                                   'orientation of receptor 1 (radians)')
    hdus = pyfits.HDUList(hdus=[prihdu, new_hdulist, feed_hdu])
    hdus.writeto(outname, clobber=True)
Example #21
0
import pyfits
import os

catalog_f = os.path.join(os.environ['SLREALIZERDIR'], 'data', 'qso_mock.fits')
catalog = pyfits.open(catalog_f)[1].data

mask = catalog['LENSID'] == 6136045
test_catalog = catalog[mask]
hdu = pyfits.BinTableHDU(data=test_catalog)
hdu.writeto('test_catalog.fits')
Example #22
0
            

        wlcat_file=psfcat_file.replace('psfcat','psfcat_findstars')

        initfile=pyfits.open(psfcat_file)
            
        findstars_file=pyfits.open(star_file)
        mask=findstars_file[1].data['star_flag']==1
        # create new sextractor file with only these entries
        data=initfile[2].data[mask]
        
        # Need to make different copy of these to not fail
        hdu1=copy.copy(initfile[0])
        hdu2=copy.copy(initfile[1])
        
        hdu = pyfits.BinTableHDU(data)
        hdu.name='LDAC_OBJECTS'
        list=pyfits.HDUList([hdu1,hdu2, hdu])
        
        list.writeto(wlcat_file,clobber=True)
        
        if args.rm_files:
            os.system('rm %s'%psfcat_file)

            # assign the psfcat_file to the new file
            psfcat_file=wlcat_file

    # If we want to cut the brighest magnitudes
    if args.mag_cut>0:
        magcut_file=psfcat_file.replace('psfcat','psfcat_magcut_%0.1f'%args.mag_cut)
Example #23
0
def load_fits(file, config):
    """ utils.load_fits( file, config )

	Loads the users fits object catalog.
	Pass the filename, and the configuration (a dictionary) describing data columns. """

    catalog = {}

    # open up the file
    fits = pyfits.open(file)

    # look for a binary hdu with the appropriate fields
    check = ['ra', 'dec', 'mag', 'point_mag', 're', 'n', 'pa', 'ba']

    use = None
    for (i, ext) in enumerate(fits):

        # don't bother checking if it isn't a binary fits table
        if type(ext) != type(pyfits.BinTableHDU()): continue

        # does it have all the necessary fields?
        # copy fits column names but in lowercase so they can be searched in a case insensitive way
        names = []
        for field in ext.data.names:
            names.append(field.lower())

        missing = ''
        for field in check:
            if not config[field].lower() in names:
                missing = config[field]
                break
        if missing: continue
        # okay, found our field!
        use = i
        break

    # did we find it?
    if use == None:
        if missing:
            raise ValueError(
                'Could not load data catalog - could not locate a binary fits table with field "%s"'
                % missing)
        else:
            raise ValueError(
                'Could not load data catalog - could not locate a binary fits table in input catalog'
            )

    # now we can load the catalog
    cat = fits[use].data
    valid = cat.size

    # and start loading the data!
    # pretend everything is a point source
    # make sure that ids are strings
    ids = []
    for i in range(cat.size):
        ids.append(str(cat.field(config['id'])[i]))
    catalog['id'] = np.asarray(ids)
    catalog['ra'] = cat.field(config['ra'])
    catalog['dec'] = cat.field(config['dec'])

    # sersic fields to load
    sersics = ['re', 'n', 'pa', 'ba', 're_err', 'n_err', 'pa_err', 'ba_err']
    for field in sersics:
        if config.has_key(field): catalog[field] = np.zeros(valid)

    catalog['model'] = np.asarray(['point'] * valid).astype(
        '|S6'
    )  # numpy string arrays carry a length - expand to make room for 'sersic'
    if config.has_key('point_mag'):
        catalog['mag'] = cat.field(config['point_mag'])
        if config.has_key('point_mag_err'):
            catalog['mag_err'] = cat.field(config['point_mag_err'])
    else:
        catalog['mag'] = cat.field(config['mag'])
        if config.has_key('mag_err'):
            catalog['mag_err'] = cat.field(config['mag_err'])

    # figure out what is a sersic and what is a point source
    m = cat.field(config['model_type']) == 'sersic'
    p = cat.field(config['model_type']) == 'point'
    # number of unrecognized model types
    unknown = valid - m.sum() - p.sum()

    # load sersic-specific values
    if m.sum() > 0:
        catalog['mag'][m] = cat.field(config['mag'])[m]
        catalog['model'][m] = ['sersic']
        if config.has_key('mag_err') and catalog.has_key('mag_err'):
            catalog['mag_err'][m] = cat.field(config['mag_err'])[m]
        for field in sersics:
            if config.has_key(field):
                catalog[field][m] = cat.field(config[field])[m]

    # return catalog and counts
    return (catalog, valid, unknown)
Example #24
0
                                                       format='D',
                                                       array=out_zphot),
                                         pyfits.Column(name='sersicfit',
                                                       format='8D',
                                                       array=out_sersicfit),
                                         pyfits.Column(name='bulgefit',
                                                       format='16D',
                                                       array=out_bulgefit),
                                         pyfits.Column(name='fit_status',
                                                       format='5J',
                                                       array=out_fit_status),
                                         pyfits.Column(name='fit_mad_s',
                                                       format='D',
                                                       array=out_fit_mad_s),
                                         pyfits.Column(name='fit_mad_b',
                                                       format='D',
                                                       array=out_fit_mad_b),
                                         pyfits.Column(name='fit_dvc_btt',
                                                       format='D',
                                                       array=out_fit_dvc_btt)]
                                        ))

# Write outputs.
print "Writing to file ",out_fitfile
tbhdu.writeto(out_fitfile, clobber=True)

# Write new subset of catalog file.
print "Re-writing to file ",out_catfile
galsim_cat = pyfits.BinTableHDU(galsim_cat[use_ind])
galsim_cat.writeto(out_catfile, clobber=True)
Example #25
0
 def filter(self, mask):
     return LDACCat(
         pyfits.BinTableHDU(data=self.hdu.data[mask],
                            header=self.hdu.header))
Example #26
0
        cat_file = cat_file.replace('psfcat', 'psfcat_findstars')

        if not os.path.exists(cat_file.replace(
                'psfcat', 'psfcat_findstars')) or args.force:
            initfile = pyfits.open(old_cat)

            findstars_file = pyfits.open(star_file)
            mask = findstars_file[1].data['star_flag'] == 1
            # create new sextractor file with only these entries
            data = initfile[2].data[mask]

            # Need to make different copy of these to not fail
            hdu1 = copy.copy(initfile[0])
            hdu2 = copy.copy(initfile[1])

            hdu = pyfits.BinTableHDU(data)
            hdu.name = 'LDAC_OBJECTS'
            list = pyfits.HDUList([hdu1, hdu2, hdu])

            list.writeto(cat_file, clobber=True)

    # If we want to cut the brighest magnitudes
    if args.mag_cut > 0:
        cut_file = cat_file.replace('psfcat',
                                    'psfcat_mag_%0.1f' % args.mag_cut)
        if (not os.path.exists(cut_file) or args.force):
            # get the brightest 10 mags that have flags=0 and take the median just in case some were
            # selected
            name = cat_file
            hdu = 2
    fieldSize = 42.0/60.0
    
    SMASHcat = sys.argv[1]
    FieldsDat = sys.argv[2]

    SMASHroot, SMASHext = path.splitext(SMASHcat)

    fields = np.loadtxt(FieldsDat)
    fnum = (fields[:,0]).astype(int)
    ra = fields[:,1]
    dec = fields[:,2]
    nFields = len(fnum)

    smash = pf.open(SMASHcat)
    smashData = smash[1].data
    smashRa = smashData['RA']
    smashDec = smashData['DEC']
    
    for i in range(nFields):
        SMASHoutName = SMASHroot + ('_%d' % fnum[i]) + SMASHext
        raLow = ra[i] - 0.5*fieldSize / np.cos(dec[i]*np.pi/180)
        raHi =  ra[i] + 0.5*fieldSize / np.cos(dec[i]*np.pi/180)
        decLow = dec[i] - 0.5*fieldSize
        decHi =  dec[i] + 0.5*fieldSize
        inField = np.where((smashRa>raLow) & (smashRa<raHi) & (smashDec>decLow) & (smashDec<decHi))
        if len(inField[0])>0:
            fieldSmashData = smashData[inField]
            hdu = pf.BinTableHDU(data=fieldSmashData)
            hdu.writeto(SMASHoutName)
            print fnum[i], len(inField[0]), SMASHoutName
Example #28
0
    dec = ff.DEC
    l, b = es.coords.eq2gal(ra, dec)
    theta = (90 - b) / 180. * 3.14159265  #Galactic coordinate
    phi = l / 180. * 3.14159265
    nside = 2**5
    hid = hp.ang2pix(nside, theta, phi)
    unqid = np.unique(hid)
    nid = len(unqid)
    for j in range(nid):
        ok = hid == unqid[j]
        if np.any(ok):
            fname = '/media/jghao/data/sdssdr8/patches/hpixFile/' + str(
                unqid[j]) + '.fit'
            if os.path.isfile(fname):
                fname = fname[:-4] + '_' + str(i) + '.fit'
            hdu = pf.BinTableHDU(ff[ok])
            hdu.writeto(fname)

#---------merge those multiple ones ------------

f = gl.glob('/media/jghao/data/sdssdr8/patches/hpixFile/*_?.fit')
idx = []
for fname in f:
    idx_start = fname.find('hpixFile/') + 9
    idx_end = fname.find('_')
    idx.append(int(fname[idx_start:idx_end]))

idx = np.array(idx)
unqidx = np.unique(idx)

for j in unqidx: