Example #1
0
def first_min_from_core(psf):
    dim = psf.shape[0]
    row = psf[dim/2]
    for i in range(dim/2, dim):
        if row[i + 1] > row[i]:
            #print "climbing again from {0} to {1}".format(i, i + 1)
            debug("turning point at radius {0}px (row[{1}] = {2} < row[{3}] = {4})".format(i - dim/2, i, row[i], i + 1, row[i + 1]))
            return i
Example #2
0
def generate_scaled_psf(dimension, primary, secondary, scale_to_physical):
    # regenerate the PSF with obscuration in the aperture
    unscaled_psf_obscured = generate_psf_full(dimension, primary, secondary)
    scaled_psf = scipy.ndimage.zoom(unscaled_psf_obscured, scale_to_physical)
    scaled_psf_ctr = scipy.ndimage.center_of_mass(scaled_psf)
    max_aperture_radius = scaled_psf.shape[0] / 2
    debug("Scaled PSF to", scaled_psf.shape)
    debug("max aperture radius = ", max_aperture_radius)
    return scaled_psf, scaled_psf_ctr, max_aperture_radius
Example #3
0
def pngtofits(infile, exposure):
    # Is this a wildcard pattern?
    if '*' in infile:
        files = glob.glob(infile) # list of all matching
        debug("converting all of", files)
    else:
        files = [infile,] # list of one
    
    for fn in files:
        base, ext = os.path.splitext(fn)
        img = Image.open(fn)
        data = numpy.array(img)
        hdu = pyfits.PrimaryHDU(data)
        hdu.header['EXPOSURE'] = exposure
        hdu.writeto('{0}.fits'.format(base))
        info("Wrote to",'{0}.fits'.format(base))
Example #4
0
def removeband(infile, outfile, exclude_from, exclude_to):
    imagehdul = pyfits.open(infile)
    data = imagehdul[0].data
    if len(data.shape) == 3:
        debug("Operating on a data cube")
        for idx in range(0, data.shape[0]):
            debug("Processing frame", idx + 1, "of", data.shape[0])
            frame = Frame(data[idx], None)
            avgrow_median_subtract(frame, exclude_from, exclude_to)
            data[idx] = frame.data
        imagehdul[0].data = data
    else:
        frame = Frame(data, None)
        avgrow_median_subtract(frame, exclude_from, exclude_to)
        imagehdul[0].data = frame.data
    imagehdul.writeto(outfile)
    info("Wrote corrected (de-banded and median subtracted) image to", outfile)
Example #5
0
def split_frames(cubefile, range_pairs, target_dir):
    dirname, filename = os.path.split(cubefile)
    filebase = filename.rsplit('.', 1)[0]
    if len(filebase) > 8:
        warn("IRAF doesn't like long filenames. "
             "Consider shortening the cube filename ({0})".format(filebase))
    
    outfiles = []
    
    for fromidx, toidx in range_pairs:
        for i in range(fromidx, toidx+1):
            infile = cubefile + "[*,*,{0}]".format(i)
            outfile = '{0}/frame_{1:04}.fit'.format(target_dir, i)
            debug("imcopy", infile, outfile)
            iraf.imcopy( # easier to use imcopy and preserve headers than to use pyfits I think
                input=infile,
                output=outfile
            )
            outfiles.append(outfile)
        
        # f = open(inlst, 'w')
        # f.writelines(infiles)
        # f.write('\n')
        # f.close()
        
        # outfile = '{0}/{1}_{2}-{3}.fit'.format(target_dir, filebase, fromidx, toidx)
        # debug("imcombine input={input} output={output} combine=sum reject=none".format(
        #     input="@{0}".format(inlst), #','.join(infiles),
        #     output=outfile,
        # ))
        # outfiles.append(outfile)
        # iraf.imcombine(
        #     input=','.join(infiles),
        #     output=outfile,
        #     combine="sum",
        #     reject="none",
        #     # project='no', # IRAF wants bools specified / default is nonboolean?
        #     # mclip='no',
        # )
    return outfiles
Example #6
0
def plot_with_arcseconds(outfile, radii, real_values, ideal_values, min_radius_real,
        max_extent_px, plate_scale_px, ylabel="Values at Radius", marker=None):

    # Plot with twinned axis in arcseconds
    fig, host = plt.subplots()

    plt.plot(radii, real_values, 'r', marker=marker, label="Science Image")
    plt.plot(radii, ideal_values, 'g', marker=marker, label="Ideal PSF")
    plt.axvspan(0, min_radius_real, facecolor='g', alpha=0.25)

    plt.ylabel(ylabel)
    plt.xlabel("Radius (pixels from center)")

    plt.xlim(1, max_extent_px)
    plt.ylim(0, np.max(ideal_values))
    xfrom, xto = fig.axes[0].get_xlim()
    par1 = host.twiny()
    par1.axes.set_xlabel("Radius (arcseconds from center)")
    par1.set_xlim((xfrom * plate_scale_px, xto * plate_scale_px))
    par1.grid()
    host.legend(loc=4)
    
    plt.savefig(outfile)
    debug("saved plot to", outfile)
Example #7
0
def cubetoframes(cubefile, rangespec='', clobber=False):
    ranges = parse_ranges(rangespec)
    path, filename = os.path.split(cubefile)
    filebase = cubefile.rsplit('.', 1)[0]

    target_dir = os.path.join(path, '{0}_frames'.format(filebase))
    debug("target:", target_dir)
    if os.path.isdir(target_dir):
        debug("target dir exists:", target_dir)
        if clobber:
            shutil.rmtree(target_dir)
        else:
            error("target dir exists (remove dir or set clobber=True)", target_dir)
            raise RuntimeError("target dir exists (remove dir or set clobber=True)")
    ensure_dir(target_dir)
    outimgs = split_frames(cubefile, ranges, target_dir)
    debug("made outimgs", outimgs)
Example #8
0
def cubestack(cubefile, rangespec='', clobber=False):
    ranges = []
    for rangestr in rangespec.split(','):
        a, b = rangestr.split('-')
        ranges.append((int(a), int(b)))
    debug("stacking range pairs:", ranges)
    path, filename = os.path.split(cubefile)
    filebase = cubefile.rsplit('.', 1)[0]
    target_dir = os.path.join(path, '{0}_stacks'.format(filebase))
    debug("target:", target_dir)
    
    tmp_target_dir = tempfile.mkdtemp()
    outimgs = combine_cube_frames(cubefile, ranges, tmp_target_dir)
    if os.path.isdir(target_dir):
        debug("target dir exists, removing", target_dir)
        shutil.rmtree(target_dir)
    shutil.copytree(tmp_target_dir, target_dir)
Example #9
0
def pngtocube(directory, filepattern, outfile, exposure):
    if os.path.exists(outfile):
        error("File", outfile, "already exists!")
        return
    files = glob.glob(os.path.join(directory, "*"))
    
    # turn filepattern into a regex
    filepattern = filepattern.replace('$i', r'(\d+)')
    
    # sort files by index instead of lexically
    # (i.e. [99.png, 100.png, 990.png] not [100.png, 99.png, 990.png])
    filetuples = []
    for f in files:
        _, fname = os.path.split(f)
        match = re.match(filepattern, fname)
        if match:
            index = int(match.groups()[0])
            filetuples.append((index, fname))

    filetuples.sort()
    
    
    # from pprint import pprint
    # pprint(filetuples)
    debug("Have", len(filetuples), "frames, numbered", filetuples[0][0], "to", filetuples[-1][0])
    
    # get image dimensions from first input file
    # (assuming all frames are the same shape, which should be true)
    img = Image.open(filetuples[0][1])
    data = numpy.array(img)
    debug("Input frame shape:", data.shape)
    
    shape = (len(filetuples), data.shape[0], data.shape[1])
    cubedata = numpy.zeros(shape)
    debug("Output cube shape:", shape)
    
    for idx, infile in filetuples:
        img = Image.open(infile)
        data = numpy.array(img)
        cubedata[idx] = data
    
    hdu = pyfits.PrimaryHDU(cubedata)
    hdu.header['EXPOSURE'] = exposure
    hdulist = pyfits.HDUList([hdu])
    hdu.writeto(outfile)
    info("Wrote to", outfile)
Example #10
0
def daofind_brightest(filename, fwhmpsf=2.5, threshold=20.0):
    debug("finding brightest in", filename)
    data = pyfits.getdata(filename)
    sigma = np.std(data) * 0.75 # background stddev... approximated because we don't know where the star IS yet
    
    _dao_setup(fwhmpsf, threshold, sigma)
    
    tmp_target_dir = tempfile.mkdtemp()
    outfile = os.path.join(tmp_target_dir, 'daofind.coo')
    
    iraf.noao.digiphot.apphot.daofind.run(
        image=filename,
        output=outfile,
        interactive=False,
        verify=False,
    )

    found_stars = parse_coo_file(outfile)
    if len(found_stars) == 0:
        warn("HAX: halving fwhmpsf to try and get a detection")
        # FIXME: kludge to get AAS 2013 data 
        _dao_setup(fwhmpsf, threshold / 2.0, sigma)
        outfile = os.path.join(tmp_target_dir, 'daofind.coo.2')
    
        iraf.noao.digiphot.apphot.daofind.run(
            image=filename,
            output=outfile,
            interactive=False,
            verify=False,
        )
        found_stars = parse_coo_file(outfile)
        
    debug(found_stars)
    found_stars.sort(order=['MAG'])
    brightest = found_stars[0]
    debug("brightest found @", brightest['XCENTER'], ',', brightest['YCENTER'], 'with mag', brightest['MAG'])
    
    shutil.rmtree(tmp_target_dir)
    return brightest
Example #11
0
def aoavgcube(cubefile, outfile, newexposure, fromidx=1, toidx=None):
    """
    Combine (without rejection) many short exposures in a data cube
    to approximate a "seeing disk" from a longer integration time.
    
    cubefile - input data cube path
    outfile - output data cube path
    fromidx - 1-based index of start of combining range (default: 1)
    toidx - 1-based index of end of range (default: None, end of cube)
    """
    newexposure = float(newexposure)
    ffile = pyfits.open(cubefile)
    header = ffile[0].header
    data = ffile[0].data

    if 'EXPOSURE' in header.keys():
        oldexposure = float(header['EXPOSURE'])
    elif 'EXPTIME' in header.keys():
        oldexposure = float(header['EXPTIME'])
    else:
        raise Exception("No exposure time value found in the datacube header!")

    assert newexposure > oldexposure, ("Can't get a shorter exposure time by combining frames! "
        "oldexposure {0} > newexposure {1}".format(oldexposure, newexposure))

    frames_per_combined = int(newexposure / oldexposure)
    epsilon = (newexposure / oldexposure) - int(newexposure / oldexposure)
    if epsilon > 0.01:
        warn("New exposure is not an integer multiple of old exposure, rounding to", frames_per_combined)
    assert frames_per_combined > 1, "Only one old frame per combined frame; this is probably not what you want"

    if toidx <= fromidx:
        # use entire cube
        toidx = data.shape[0]
    else:
        assert toidx <= data.shape[0], "toidx ({0}) > number of frames ({1})".format(toidx, data.shape[0])
    assert data.shape[0] > 1, "Only one frame found! Is this a data cube?"

    total_frames = toidx - fromidx
    debug("toidx=",toidx,"fromidx=",fromidx)

    if total_frames % frames_per_combined != 0:
        warn("Total frames in range (toidx - fromidx =", toidx - fromidx, ") is not an integer multiple "
             "of the number of frames per combined exposure (", frames_per_combined,
             ") so", total_frames % frames_per_combined, "frames from the end of "
             "the range will be left off.")
        toidx = toidx - (total_frames % frames_per_combined)
        total_frames = toidx - fromidx

    total_combined_frames = int(total_frames / frames_per_combined)
    info("Output data cube will have", total_combined_frames, "total frames of", newexposure, "sec exposure")
    info("Processing input data cube frames", fromidx, "to", toidx)
    target_dir = tempfile.mkdtemp()
    info("Created working directory {0} for intermediate data".format(target_dir))

    try:
        range_pairs = list((
            fromidx + n * frames_per_combined,
            fromidx + (n+1) * frames_per_combined - 1
        ) for n in range(0, total_combined_frames))
        
        frame_paths = combine_cube_frames(cubefile, range_pairs, target_dir)
        # frames_to_cube(frame_paths, outfile)
    finally:
        # Don't leave clutter if the task fails
        # shutil.rmtree(target_dir)
        info("Removed working directory {0}".format(target_dir))
Example #12
0
def photstrehl(cubefile, rangespec, primary, secondary, dimension, f_number,
        pixel_scale, lambda_mean, growth_step, normalize_at, find_source, xcenter, ycenter,
        fwhmpsf, threshold, quiet):
    start_time = time.time()
    info("Started at:", start_time)
    if not os.path.exists(cubefile):
        raise RuntimeError("No file named {0}".format(cubefile))
    cubefile_base = os.path.splitext(os.path.basename(cubefile))[0]
    # I: compute ideal psf
    
    scale_to_physical, plate_scale_px, min_radius_real = compute_psf_scale(
        dimension,
        primary,
        secondary,
        f_number,
        lambda_mean,
        pixel_scale
    )
    
    info("The radius of the first minimum in physical pixels is", min_radius_real, "px")
    
    scaled_psf, scaled_psf_ctr, max_aperture_radius = generate_scaled_psf(
        dimension,
        primary,
        secondary,
        scale_to_physical
    )
    
    # wrap psf in a frame
    psf = Frame(scaled_psf, scaled_psf_ctr)
    
    if normalize_at != 0:
        max_extent_px = normalize_at
        debug("Rescaling to max integrated flux @ r=", max_extent_px, "pixels")
    else:
        max_extent_px = 2.5 / plate_scale_px # After 2.5" we're almost certainly measuring noise
        debug("after 2.5'' or", max_extent_px, "px we'll be almost certainly measuring noise")
    growth_max = max_extent_px + growth_step * 3 # do 3 steps beyond the 2.5" mark
    assert growth_max < max_aperture_radius, "Curve of Growth won't fit on PSF frame with this max extent"
    
    # precompute CoG and profile to be rescaled later
    
    # phot CoG needs a FITS file, so write one:
    psftmpfilename = os.path.abspath('./{0}.fits'.format(uuid.uuid1()))
    hdu = pyfits.PrimaryHDU(psf.data)
    hdu.writeto(psftmpfilename)
    debug("psf FITS file temporarily stored in", psftmpfilename)
    
    phot_curve_of_growth(psf, psftmpfilename, growth_max, step=growth_step, quiet=quiet, fitsky=False)
    profile_from_growthcurve(psf)
    
    # II: analyze images
    
    # functions to rescale our PSF Frame computed values
    # to physical counts
    
    def max_flux(frame):
        """Frame max integrated flux (for a radius less than 2.5'')"""
        return np.max(frame.fluxes[frame.radii <= max_extent_px])
    
    def scale_psf_fluxes(frame, psf):
        """
        Returns a profile and curve of growth values for the ideal PSF
        scaled such that the total integrated flux is equivalent to the
        maximum flux in the frame passed as the first argument.
        """
        scale_factor = (max_flux(frame) / max_flux(psf))
        return psf.profile * scale_factor, psf.fluxes * scale_factor

    tmp_target_dir = tempfile.mkdtemp()
    ranges = parse_ranges(rangespec)
    debug("splitting ranges", ranges)
    fits_frames = split_frames(cubefile, ranges, tmp_target_dir)
    debug("working in", tmp_target_dir, "with frames", fits_frames)

    # set up array to hold each frame's analysis data
    radii_count = psf.radii.shape[0]
    frame_count = len(fits_frames)
    shape = (frame_count, 5, radii_count)
    analysis_frames = np.zeros(shape)
    # [cog x count, prof x count, ideal x count, idealprof x count, strehl x count] x frames
    
    strehl_rows = []
    
    for fits_frame in fits_frames:
        # this is slightly dumb... parse the frame # out of the filename
        frame_num = int(re.findall(r'\d+', os.path.basename(fits_frame))[0])
        if find_source:
            bright = daofind_brightest(fits_frame, fwhmpsf, threshold)
            center_col, center_row = bright['XCENTER'], bright['YCENTER']
            debug("frame #", frame_num, "has brightest at", center_col, center_row)
            center_coords = (float(center_col), float(center_row))
            frame = Frame(pyfits.getdata(fits_frame), center_coords)
        else:
            center_coords = (float(xcenter), float(ycenter))
            frame = Frame(pyfits.getdata(fits_frame), center_coords)
        
        debug("loaded frame from", fits_frame)
    
        # subtract median row to handle light/charge leakage biasing measurements
        exclude_from, exclude_to = frame.ybounds(r=int(max_extent_px)) # exclude region of max_extent_px around center of frame
        avgrow_median_subtract(frame, exclude_from, exclude_to)
        debug("median subtracted frame")
        phot_curve_of_growth(frame, fits_frame, growth_max, step=growth_step, quiet=quiet, fitsky=True)
        debug("curve of growth generated")
        profile_from_growthcurve(frame)
        debug("profile generated")
    
        # scale psf.fluxes and psf.profile to a max value determined from frame
        ideal_profile, ideal_fluxes = scale_psf_fluxes(frame, psf)
        strehls_for_all_radii = frame.fluxes / ideal_fluxes
        strehl_rows.append((frame_num, center_coords, strehls_for_all_radii))

    outfile = "{0}_{1}_strehlseries.txt".format(cubefile_base, rangespec)
    debug("writing strehl series to", outfile)
    with open(outfile, 'w') as f:
        f.write("# columns 2 and up are the pixel radii at which we computed the Strehl ratio\n")
        f.write("# frameidx\txcenter\tycenter\t")
        f.write('\t'.join(map(str, psf.radii)))
        f.write('\n')
        for idx, center, ratios in strehl_rows:
            f.write(str(idx))
            f.write('\t')
            f.write('\t'.join(map(str, center)))
            f.write('\t')
            f.write('\t'.join(map(str, ratios)))
            f.write('\n')

    debug("removing exploded cube from", tmp_target_dir)
    shutil.rmtree(tmp_target_dir)

    info("Completed at:", time.time())
    info("Total time:", time.time() - start_time)
Example #13
0
def strehlframe(image, primary, secondary, dimension, f_number, pixel_scale,
        lambda_mean, growth_step, find_source, xcenter, ycenter, fwhmpsf,
        threshold, quiet):
    start_time = time.time()
    info("Started at:", start_time)
    if not os.path.exists(image):
        raise RuntimeError("No file named {0}".format(image))
    # Were we given coordinates, or do we need to find the source?
    if find_source:
        bright = daofind_brightest(image, fwhmpsf, threshold)
        center_col, center_row = bright['XCENTER'], bright['YCENTER']
    else:
        center_col, center_row = xcenter, ycenter
    
    scale_to_physical, plate_scale_px, min_radius_real = compute_psf_scale(
        dimension,
        primary,
        secondary,
        f_number,
        lambda_mean,
        pixel_scale
    )
    
    scaled_psf, scaled_psf_ctr, max_aperture_radius = generate_scaled_psf(
        dimension,
        primary,
        secondary,
        scale_to_physical
    )
    
    # wrap psf in a frame
    psf = Frame(scaled_psf, scaled_psf_ctr)
    # precompute CoG and profile to be rescaled later
    curve_of_growth(psf, max_aperture_radius, step=growth_step, quiet=quiet)
    profile_from_growthcurve(psf)
    
    # values and functions to rescale our PSF Frame computed values
    # to physical counts
    max_extent_px = 2.5 / plate_scale_px # After 2.5" we're almost certainly measuring noise
    debug("after 2.5'' or", max_extent_px, "px we're almost certainly measuring noise")
    
    def max_flux(frame):
        """Frame max integrated flux (for a radius less than 2.5'')"""
        return np.max(frame.fluxes[frame.radii <= max_extent_px])
    
    def scale_psf_fluxes(frame, psf):
        """
        Returns a profile and curve of growth values for the ideal PSF
        scaled such that the total integrated flux is equivalent to the
        maximum flux in the frame passed as the first argument.
        """
        scale_factor = (max_flux(frame) / max_flux(psf))
        return psf.profile * scale_factor, psf.fluxes * scale_factor

    image_base = os.path.splitext(os.path.basename(image))[0]
    frame = Frame(pyfits.getdata(image), (float(center_col), float(center_row)))
    debug("loaded frame from", image)
    # subtract median row to handle light/charge leakage biasing measurements
    exclude_from, exclude_to = frame.ybounds(r=int(max_extent_px)) # exclude region of max_extent_px around center of frame
    avgrow_median_subtract(frame, exclude_from, exclude_to)
    debug("median subtracted frame")
    curve_of_growth(frame, max_aperture_radius, step=growth_step, quiet=quiet)
    debug("curve of growth generated")
    profile_from_growthcurve(frame)
    debug("profile generated")
    
    # scale psf.fluxes and psf.profile to a max value determined from frame
    ideal_profile, ideal_fluxes = scale_psf_fluxes(frame, psf)
    
    write_table("{0}_strehl.dat".format(image_base), (
        ("Pixel Radius", frame.radii),
        ("Enclosed Pixels", frame.npix),
        ("Image Enclosed Energy (counts)", frame.fluxes),
        ("Ideal Enclosed Energy (counts)", ideal_fluxes),
        ("Strehl Ratio (for peak in this radius)", frame.fluxes / ideal_fluxes),
        ("Image Radial Profile (counts at radius)", frame.profile),
        ("Ideal Radial Profile (counts at radius)", ideal_profile)
    ))
    
    # Plot Curve of Growth with twinned axis in arcseconds
    
    plot_with_arcseconds(
        image_base + "_growth.pdf",
        psf.radii,
        frame.fluxes,
        ideal_fluxes,
        min_radius_real,
        max_extent_px,
        plate_scale_px,
        ylabel="Enclosed Flux at Radius"
    )
    
    # fig, host = plt.subplots()
    # 
    # plt.plot(frame.radii, frame.fluxes, 'r', label="Science Image")
    # plt.plot(psf.radii, ideal_fluxes, 'g', label="Ideal PSF")
    # plt.axvspan(0, min_radius_real, facecolor='g', alpha=0.25)
    # 
    # plt.ylabel("Enclosed Flux at Radius")
    # plt.xlabel("Radius (pixels from center)")
    # 
    # plt.xlim(1, max_extent_px)
    # plt.ylim(0, np.max(ideal_fluxes))
    # xfrom, xto = fig.axes[0].get_xlim()
    # par1 = host.twiny()
    # par1.axes.set_xlabel("Radius (arcseconds from center)")
    # par1.set_xlim((xfrom * plate_scale_px, xto * plate_scale_px))
    # par1.grid()
    # host.legend(loc=4)
    # 
    # plt.savefig('{0}_growth.pdf'.format(image_base))
    # debug("saved plot to", '{0}_growth.pdf'.format(image_base))
    # 
    
    # Plot profile with twinned axis in arcseconds
    
    plot_with_arcseconds(
        image_base + "_profile.pdf",
        psf.radii,
        frame.profile,
        ideal_profile,
        min_radius_real,
        10, # xlim max
        plate_scale_px,
        ylabel="Flux at Radius",
        marker="."
    )
    
    # fig, host = plt.subplots()
    # 
    # plt.plot(frame.radii, frame.profile, c='r', marker=".", label="Science Image")
    # plt.plot(psf.radii, ideal_profile, c='g', marker='.', label="Ideal PSF")
    # plt.axvspan(0, min_radius_real, facecolor='g', alpha=0.25)
    # 
    # plt.ylabel("Flux at Radius")
    # plt.xlabel("Radius (pixels from center)")
    # 
    # plt.xlim(1, 10)
    # plt.ylim(0, np.max(ideal_profile))
    # xfrom, xto = fig.axes[0].get_xlim()
    # par1 = host.twiny()
    # par1.axes.set_xlabel("Radius (arcseconds from center)")
    # par1.set_xlim((xfrom * plate_scale_px, xto * plate_scale_px))
    # par1.grid()
    # host.legend(loc=4)
    # 
    # plt.savefig('{0}_profile.pdf'.format(image_base))
    # debug("saved plot to", '{0}_profile.pdf'.format(image_base))
    info("Completed at:", time.time())
    info("Total time:", time.time() - start_time)
Example #14
0
def photstrehlframe(image, primary, secondary, dimension, f_number, pixel_scale,
        lambda_mean, growth_step, normalize_at, find_source, xcenter, ycenter, fwhmpsf,
        threshold, quiet):
    start_time = time.time()
    info("Started at:", start_time)
    if not os.path.exists(image):
        raise RuntimeError("No file named {0}".format(image))
    # Were we given coordinates, or do we need to find the source?
    if find_source:
        bright = daofind_brightest(image, fwhmpsf, threshold)
        center_col, center_row = bright['XCENTER'], bright['YCENTER']
    else:
        center_col, center_row = xcenter, ycenter
    
    scale_to_physical, plate_scale_px, min_radius_real = compute_psf_scale(
        dimension,
        primary,
        secondary,
        f_number,
        lambda_mean,
        pixel_scale
    )
    
    info("The radius of the first minimum in physical pixels is", min_radius_real, "px")
    
    scaled_psf, scaled_psf_ctr, max_aperture_radius = generate_scaled_psf(
        dimension,
        primary,
        secondary,
        scale_to_physical
    )
    
    # wrap psf in a frame
    psf = Frame(scaled_psf, scaled_psf_ctr)
    
    if normalize_at != 0:
        max_extent_px = normalize_at
        debug("Rescaling to max integrated flux @ r=", max_extent_px, "pixels")
    else:
        max_extent_px = 2.5 / plate_scale_px # After 2.5" we're almost certainly measuring noise
        debug("after 2.5'' or", max_extent_px, "px we'll be almost certainly measuring noise")
    growth_max = max_extent_px + growth_step * 3 # do 3 steps beyond the 2.5" mark
    assert growth_max < max_aperture_radius, "Curve of Growth won't fit on PSF frame with this max extent"
    
    # phot CoG needs a FITS file, so write one:
    psftmpfilename = os.path.abspath('./{0}.fits'.format(uuid.uuid1()))
    hdu = pyfits.PrimaryHDU(psf.data)
    hdu.writeto(psftmpfilename)
    debug("psf FITS file temporarily stored in", psftmpfilename)
    
    # precompute CoG and profile to be rescaled later
    phot_curve_of_growth(psf, psftmpfilename, growth_max, step=growth_step, quiet=quiet, fitsky=False)
    profile_from_growthcurve(psf)
    
    # values and functions to rescale our PSF Frame computed values
    # to physical counts

    
    def max_flux(frame):
        """Frame max integrated flux (for a radius less than 2.5'')"""
        return np.max(frame.fluxes[frame.radii <= max_extent_px])
    
    def scale_psf_fluxes(frame, psf):
        """
        Returns a profile and curve of growth values for the ideal PSF
        scaled such that the total integrated flux is equivalent to the
        maximum flux in the frame passed as the first argument.
        """
        scale_factor = (max_flux(frame) / max_flux(psf))
        return psf.profile * scale_factor, psf.fluxes * scale_factor

    image_base = os.path.splitext(os.path.basename(image))[0]
    frame = Frame(pyfits.getdata(image), (float(center_col), float(center_row)))
    debug("loaded frame from", image)
    # subtract median row to handle light/charge leakage biasing measurements
    exclude_from, exclude_to = frame.ybounds(r=int(max_extent_px)) # exclude region of max_extent_px around center of frame
    avgrow_median_subtract(frame, exclude_from, exclude_to)
    debug("median subtracted frame")
    phot_curve_of_growth(frame, image, growth_max, step=growth_step, quiet=quiet, fitsky=True)
    debug("curve of growth generated")
    profile_from_growthcurve(frame)
    debug("profile generated")
    
    # scale psf.fluxes and psf.profile to a max value determined from frame
    ideal_profile, ideal_fluxes = scale_psf_fluxes(frame, psf)
    
    write_table("{0}_strehl.dat".format(image_base), (
        ("Pixel Radius", frame.radii),
        ("Enclosed Pixels", frame.npix),
        ("Image Enclosed Energy (counts)", frame.fluxes),
        ("Ideal Enclosed Energy (counts)", ideal_fluxes),
        ("Strehl Ratio (for peak in this radius)", frame.fluxes / ideal_fluxes),
        ("Image Radial Profile (counts at radius)", frame.profile),
        ("Ideal Radial Profile (counts at radius)", ideal_profile)
    ))
    
    # Plot Curve of Growth with twinned axis in arcseconds
    
    plot_with_arcseconds(
        image_base + "_growth.pdf",
        psf.radii,
        frame.fluxes,
        ideal_fluxes,
        min_radius_real,
        max_extent_px,
        plate_scale_px,
        ylabel="Enclosed Flux at Radius"
    )
    
    # Plot profile with twinned axis in arcseconds
    
    plot_with_arcseconds(
        image_base + "_profile.pdf",
        psf.radii,
        frame.profile,
        ideal_profile,
        min_radius_real,
        10, # xlim max
        plate_scale_px,
        ylabel="Flux at Radius",
        marker="."
    )
    
    info("Completed at:", time.time())
    info("Total time:", time.time() - start_time)
Example #15
0
def phot_curve_of_growth(frame, original_filename, max_aperture, step=0.5, quiet=True, fitsky=True):
    """
    Calculate a curve of growth by integrating flux in circular apertures
    (centered on frame.center) of successively larger radii.
    
    This implementation uses the IRAF phot task to handle fractional
    pixel radii. This means it needs to refer to the original file
    for the frame.
    
    max_aperture - radius in pixels from center where we
                   stop growing our aperture
    step - number of pixels to grow radius by (default: 1)
    quiet - do not emit a line for each step (default: True)
    """
    radii, fluxes, npix = np.arange(step, max_aperture, step), [], []
    
    # initialize the parameters we care about for daophot
    _dao_setup(2.5, 5.0, np.std(frame.data))
    
    fitskypars = iraf.noao.digiphot.apphot.fitskypars
    # do we fit sky? (False for PSF)
    if fitsky:
        fitskypars.salgorithm = "centroid"
        fitskypars.annulus = 50.0
        fitskypars.dannulus = 10.0
    else:
        fitskypars.salgorithm = "constant"
        fitskypars.skyvalue = 0.0
    
    tmp_target_dir = tempfile.mkdtemp()
    
    # tell daophot where the star is
    coopath = os.path.join(tmp_target_dir, 'mysource.coo')
    with open(coopath, 'w') as f:
        f.write("{0} {1}\n".format(frame.x, frame.y))
    
    outfile = os.path.join(tmp_target_dir, 'growth.mag')
    photpars = iraf.noao.digiphot.apphot.photpars
    photpars.apertures = ','.join(map(str, radii))
    iraf.noao.digiphot.apphot.phot.run(
        image=original_filename,
        coords=coopath,
        output=outfile,
        interactive=False,
        verify=False,
    )
    #shutil.rmtree(tmp_target_dir)
    debug("output in", tmp_target_dir)
    
    with open(outfile) as f:
        lines = f.readlines()
    
    # magic numbers for daophot output: skip first 79 lines
    # following lines have this format
    #N RAPERT   SUM           AREA       FLUX          MAG    MERR   PIER PERROR   \
    #U scale    counts        pixels     counts        mag    mag    ##   perrors  \
    #F %-12.2f  %-14.7g       %-11.7g    %-14.7g       %-7.3f %-6.3f %-5d %-9s
    # (we're only expecting one source in this .mag file so we can be lazy)
    
    lines = lines[79:]
    
    for line in lines:
        rapert, sum, area, flux = map(float, line.split()[:4])
        fluxes.append(flux)
        npix.append(area)
    
    frame.radii, frame.fluxes, frame.npix = np.array(radii), np.array(fluxes), np.array(npix)
    return frame.radii, frame.fluxes, frame.npix