示例#1
0
    def __init__(self, cmdline_options=None, hdulist=None):

        # set command-line options to a safe value
        if (cmdline_options is None):
            cmdline_options = set_default_options()
        self.options = cmdline_options

        self.filtername = None
        self.ota = None

        self.fpl = None
        self.detector_glow = "yes"
        self.binning = 1
        self.logger = logging.getLogger("Calibrations")

        if (hdulist is not None):
            self.filtername = hdulist[0].header['FILTER']
            self.fpl = podi_focalplanelayout.FocalPlaneLayout(inp=hdulist)
            self.binning = get_binning(hdulist[0].header)
            self.filter_level = get_filter_level(hdulist[0].header)

        self.mastercal_dir = "%s/mastercals" % (sitesetup.exec_dir)
示例#2
0
            hdulist[0].header['FPPOS'] = 'xy%02d' % (new)

            # chaneg all observing date forward by 3 years
            date_format = "%Y-%m-%dT%H:%M:%S.%f"
            date_obs = datetime.datetime.strptime(
                hdulist[0].header['DATE-OBS'], date_format)

            date_new = date_obs + datetime.timedelta(days=3 * 365)
            hdulist[0].header['DATE-OBS'] = date_new.strftime(date_format)
            # print date_new.strftime(date_format)

            #print hdulist[0].header['DATE-OBS'], date_obs, date_new

            hdulist[0].header['MJD-OBS'] += 3 * 365  # change time

            fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist[0])

            obsid_timefmt = "%Y%m%dT%H%M%S"
            #localtime = date_new - timedelta(hours=7)
            #new_obsid = "%s.%d" % (date_new.strftime(obsid_timefmt), dither_number)
            #print obsid
            new_obsid = "%s%04d%s" % (obsid[0], int(obsid[1:5]) + 3, obsid[5:])
            #print new_obsid

            # Change the OTA-ID to simulate a more realistic pattern for broken cells
            hdulist[0].header["OTA_ID"] = "%d" % (
                fpl.get_otaid_from_position(new))
            hdulist[0].header['OBSID'] = new_obsid[1:]

            hdulist[0].header['FILENAME'] = "%s.%02d.fits" % (new_obsid, new)
示例#3
0
def make_fringing_template(input_filelist, outputfile, return_hdu=False, 
                           skymode='local', operation="nanmedian.bn",
                           bpm_dir=None, wipe_cells=None, ocdclean=False,
):
    """

    Create a fringe template from the given list of suitable input frames. 

    For each frame, compute the sky-level and the sky-countrate. Frames with
    sky-countrates exceeding a filter-specific level are ignored, as the sky is
    very likely contaminated by stray light. This also eliminates frames with
    background gradients. Each frame is then background-subtracted and
    normalized by its background-level, leaving only the fringe amplitude
    behind.

    To eliminate sources, all data for a given extension are then
    median-combined. Once this is complete for all available extensions, the
    resulting fringe maps are written to the output FITS file.

    """

    logger = logging.getLogger("MakeFringeTemplate")

    # First loop over all filenames and make sure all files exist
    hdu_filelist = []
    for filename in input_filelist:
        if (os.path.isfile(filename)):

            with pyfits.open(filename) as hdulist:
                exptime = hdulist[0].header['EXPTIME']
                filter = hdulist[0].header['FILTER']
                skylevel = hdulist[0].header['SKYLEVEL']
                if (filter in avg_sky_countrates):
                    max_skylevel = 2 * avg_sky_countrates[filter] * exptime
                    if (skylevel > max_skylevel):
                        logger.info("Frame %s exceeds sky-level limitation (%.1f vs %.1f cts/s)" % (
                            filename, skylevel/exptime, max_skylevel))
                        continue

            hdu_filelist.append(filename) #pyfits.open(file))

    if (len(hdu_filelist) <= 0):
        stdout_write("No existing files found in input list, hence nothing to do!\n")
        return
    
    # Read the input parameters
    # Note that file headers are copied from the first file

    # Create the primary extension of the output file
    ref_hdulist = pyfits.open(hdu_filelist[0])  #hdu_filelist[0]
    primhdu = pyfits.PrimaryHDU(header=ref_hdulist[0].header)
    fpl = podi_focalplanelayout.FocalPlaneLayout(ref_hdulist)

    # Add PrimaryHDU to list of OTAs that go into the output file
    out_hdulist = [primhdu]

    filtername = primhdu.header['FILTER']
    if (outputfile == "auto"):
        outputfile = "fringes__%s.fits" % (filtername)

    print("Output file=",outputfile)

    #
    # Prepare all input frames, just like for the illumination correction, i.e.
    # - mask out sources
    # - eliminate OTAs used for guiding
    # - wipe out certain cells
    #
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.Queue()

    logger.info("Preparing all individual fringe template frames")
    number_files_sent_off = 0
    for fitsfile in hdu_filelist:
        logger.debug("Queuing %s" % (fitsfile))
        queue.put((fitsfile))
        number_files_sent_off += 1

    additional_sextractor_options = """
        -FILTER N
        -DETECT_THRESH 3
        -BACK_SIZE"""

    conf_file = "%s/config/fringing.conf" % (sitesetup.exec_dir)
    processes = []
    for i in range(sitesetup.number_cpus):
        logger.debug("Starting process #%d" % (i+1))
        p = multiprocessing.Process(target=podi_illumcorr.compute_illumination_frame,
                                    kwargs = {'queue': queue,
                                              'return_queue': return_queue,
                                              'tmp_dir': sitesetup.swarp_singledir,
                                              'redo': True,
                                              'mask_guide_otas': True, #mask_guide_otas,
                                              'mask_regions': None, #mask_regions,
                                              'bpm_dir': bpm_dir,
                                              'wipe_cells': wipe_cells,
                                              'ocdclean': ocdclean,
                                              'apply_correction': False,
                                              'conf_file': conf_file,
                                          },
                                    # args=(queue, return_queue),
        )
        queue.put(None)
        p.start()
        processes.append(p)

    masked_list = []
    for i in range(number_files_sent_off):
        masked_frame = return_queue.get()
        if (masked_frame is not None):
            masked_list.append(masked_frame)

    for p in processes:
        p.join()

    logger.info("All files prepared, combining ...")


    #
    # Now loop over all extensions and compute the mean
    #
    for extid, ext in enumerate(ref_hdulist):

        # Check what OTA we are dealing with
        if (not is_image_extension(ext)):
            continue

        data_blocks = []
        extname = ext.name 

        if (filtername in fpl.otas_for_photometry):
            useful_otas = fpl.otas_for_photometry[filtername]
            ota_id = int(extname[3:5])
            if (not ota_id in useful_otas):
                continue

        stdout_write("\rCombining frames for OTA %s (#%2d/%2d) ..." % (extname, extid, len(ref_hdulist)))

        # Now open all the other files, look for the right extension, and copy their image data to buffer
        #for file_number in range(0, len(filelist)):
        for filename in masked_list: # hdu_filelist:

            try:
                hdulist = pyfits.open(filename)
                this_hdu = hdulist[extname]
            except:
                continue

            # Skip all OTAs that are marked as video/guide OTAs
            cellmode = this_hdu.header['CELLMODE']
            if (cellmode.find("V") >= 0):
                continue
            
            skylevel = this_hdu.header['SKY_MEDI']

            if (skymode == 'global'):
                skylevel = hdulist[0].header['SKYLEVEL']
            if ("EXPTIME" in hdulist[0].header):
                exptime = hdulist[0].header['EXPTIME']
                filter = hdulist[0].header['FILTER']
                if (filter in avg_sky_countrates):
                    max_skylevel = 2 * avg_sky_countrates[filter] * exptime
                    if (skylevel > max_skylevel):
                        stdout_write(" (%.1f)" % (skylevel/exptime))
                        continue

            fringing = (this_hdu.data - skylevel) / skylevel
            stdout_write(" %.1f" % (skylevel/exptime))
            data_blocks.append(fringing)

            # delete the data block to free some memory, since we won't need it anymore
            hdulist.close()

        stdout_write(" combining ...")
        #combined = podi_imcombine.imcombine_data(data_blocks, "nanmedian")
        combined = podi_imcombine.imcombine_data(data_blocks, operation) #"nanmedian.bn")

        # Create new ImageHDU
        # Insert the imcombine'd frame into the output HDU
        # Copy all headers from the reference HDU
        # stdout_write(" creating HDU ...")
        hdu = pyfits.ImageHDU(header=ext.header, data=combined)

        # Append the new HDU to the list of result HDUs
        out_hdulist.append(hdu)
        stdout_write(" done!\n")

        del hdu

    # Add the assumed skylevel countrate to primary header so we can use it
    # when it comes to correcting actual data
    out_hdulist[0].header["SKYCNTRT"] = (avg_sky_countrates[filter], "average countrate of dark sky")

    return_hdu = False
    out_hdu = pyfits.HDUList(out_hdulist)
    if (not return_hdu and outputfile != None):
        stdout_write(" writing results to file %s ..." % (outputfile))
        clobberfile(outputfile)
        out_hdu.writeto(outputfile, clobber=True)
        out_hdu.close()
        del out_hdu
        del out_hdulist
        stdout_write(" done!\n")
    elif (return_hdu):
        stdout_write(" returning HDU for further processing ...\n")
        return out_hdu
    else:
        stdout_write(" couldn't write output file, no filename given!\n")

    return
示例#4
0
def measure_focus_ota(filename, n_stars=5):
    """

    Obtain a focus measurement from teh specified filename. To do so,

    1) run source extractor to get FWHM measurements and positions for all sources

    2) group sources into vertical sequences as expected from the function 
       of the focus tool

    3) assign physical focus positions to each measurement

    4) return final catalog back to master so a final, combined focus curve
       can be assembled.

    """

    # print"\n\n\nworking on file ",filename,"\n\n\n"

    try:
        hdulist = pyfits.open(filename)
    except IOError:
        logger.debug("Can't open file %s" % (filename))
        return None
    except:
        podi_logging.log_exception()
        return None

    obsid = hdulist[0].header['OBSID']
    ota = hdulist[1].header['WN_OTAX'] * 10 + hdulist[1].header['WN_OTAY']
    ota_id = hdulist[0].header['OTA_ID']

    logger = logging.getLogger("MeasureFocusOTA: %s(%02d)" % (obsid, ota))
    logger.info("Starting work ...")

    obsid = hdulist[0].header['OBSID']
    ota = int(hdulist[0].header['FPPOS'][2:4])

    # Check the object name to see if if contains the information about the exposure
    focus_positions = numpy.arange(n_stars)[::-1] + 1.
    real_focus_positions = False
    object_name = hdulist[0].header['OBJECT']
    if (object_name.startswith("Focus Center")):
        # This looks like it might be the right format
        try:
            items = object_name.split()
            # Check all items
            if (len(items) == 7 and items[0] == "Focus"
                    and items[1] == "Center" and items[3] == "NStep"
                    and items[5] == "DStep"):
                n_stars = int(items[4])
                focus_center = float(items[2])
                focus_step = float(items[6])
                focus_start = focus_center - (n_stars - 1) / 2 * focus_step
                focus_positions = numpy.arange(
                    n_stars,
                    dtype=numpy.float32)[::-1] * focus_step + focus_start
                logger.debug(
                    "Infom from header: N=%d, center=%.0f, step=%.0f, start=%.0f"
                    % (n_stars, focus_center, focus_Step, focus_step,
                       focus_start))
                logger.debug("Focus positions: %s" % (str(focus_positions)))
                real_focus_positions = True
        except:
            pass

    # Run SourceExtractor on the file
    sex_config = "%s/config/focus.sexconf" % (sitesetup.exec_dir)
    sex_param = "%s/config/focus.sexparam" % (sitesetup.exec_dir)
    catfile = "%s/tmp.%s_OTA%02d.cat" % (sitesetup.scratch_dir, obsid, ota)
    sex_cmd = "%(sexcmd)s -c %(sex_config)s -PARAMETERS_NAME %(sex_param)s -CATALOG_NAME %(catfile)s %(filename)s" % {
        "sexcmd": sitesetup.sextractor,
        "sex_config": sex_config,
        "sex_param": sex_param,
        "catfile": catfile,
        "filename": filename,
        "redirect": sitesetup.sex_redirect,
    }
    # print "\n"*10,sex_cmd,"\n"*10
    # Run source extractor
    # catfile = "/tmp//tmp.pid4383.20121008T221836.0_OTA33.cat"

    if (not os.path.isfile(catfile)):
        logger.debug("Running source extractor to search for stars")
        start_time = time.time()
        try:
            ret = subprocess.Popen(sex_cmd.split(),
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
            (sex_stdout, sex_stderr) = ret.communicate()
            if (ret.returncode != 0):
                logger.warning(
                    "Sextractor might have a problem, check the log")
                logger.info("Stdout=\n" + sex_stdout)
                logger.info("Stderr=\n" + sex_stderr)
        except OSError as e:
            podi_logging.log_exception()
            print >> sys.stderr, "Execution failed:", e
        end_time = time.time()
        logger.debug("SourceExtractor finished after %.2f seconds" %
                     (end_time - start_time))
    else:
        logger.debug("Source catalog already exists, re-using the old file")

    #
    # delete the tmp catalog
    #

    #
    # Load the source catalog.
    # Handle cases of non-existing or empty catalogs
    #
    logger.debug("loading the source catalog from %s" % (catfile))
    try:
        source_cat = numpy.loadtxt(catfile)
    except IOError:
        logger.warning("The Sextractor catalog is empty, ignoring this OTA")
        source_cat = None
        return None
    if (source_cat.shape[0] <= 0):
        # no sources found
        return None

    # print "\n\n total sources in raw file",source_cat.shape,"\n\n"
    logger.debug("Found %d sources in raw SourceExtractor catalog %s" %
                 (source_cat.shape[0], catfile))

    #
    # Now convert all X/Y values to proper OTA X/Y coordinates based on their
    # extension number
    #
    corr_cat = None
    # print "extensions:", source_cat[:,SXFocusColumn['extension']]

    for i in range(len(hdulist)):
        if (not is_image_extension(hdulist[i])):
            # print "skipping extension",i,", this is not an image extension"
            continue

        # get cell_x, cell_y from this extension
        cell_x = hdulist[i].header['WN_CELLX']
        cell_y = hdulist[i].header['WN_CELLY']
        x1, c2, y1, y2 = cell2ota__get_target_region(cell_x, cell_y, 1)

        # Create a mask for all sources in this cell
        in_this_cell = (source_cat[:, SXFocusColumn['extension']] == i)
        if (numpy.sum(in_this_cell) <= 0):
            logger.debug("Couldn't find any sources in cell %d,%d" %
                         (cell_x, cell_y))
            continue

        # print "found",numpy.sum(in_this_cell),"sources for cell",cell_x, cell_y, "  adding", x1, y1

        cell_cat = source_cat[in_this_cell]
        cell_cat[:, SXFocusColumn['x']] += x1
        cell_cat[:, SXFocusColumn['y']] += y1

        #
        # get overscan-level data for this cell
        #
        binning = get_binning(hdulist[i].header)
        overscan_data = extract_biassec_from_cell(hdulist[i].data, binning)
        overscan_level = numpy.mean(overscan_data)
        cell_cat[:, SXFocusColumn['background']] -= overscan_level

        corr_cat = cell_cat if corr_cat is None else numpy.append(
            corr_cat, cell_cat, axis=0)

    #
    # Attach to each measurement what detector lot it came from
    #
    fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist)
    detector_lot = fpl.get_detector_generation(ota_id)
    corr_cat[:, SXFocusColumn['ota_lot']] = detector_lot

    #
    # Also override the extension number in the source catalog with the
    # position in the overall focal plane
    #
    corr_cat[:, SXFocusColumn['extension']] = ota

    # print "\n\n\n\ntotal corrected catalog:",corr_cat.shape
    # save the source catalog
    #numpy.savetxt("focus_cat.ota%02d" % (ota), source_cat)
    #numpy.savetxt("focus_cat2.ota%02d" % (ota), corr_cat)
    logger.debug("done fixing the pixel coordinates")

    # only select bright enough sources
    bright_enough = corr_cat[:, SXFocusColumn['mag_auto']] < -10
    corr_cat = corr_cat[bright_enough]
    #numpy.savetxt("focus_cat3.ota%02d" % (ota), corr_cat)

    #dummy_test = open("dummy.test", "w")
    # Now try to match up stars in a sequence
    all_angles, all_distances = [], []
    for s1 in range(corr_cat.shape[0]):
        # Assume this is the middle star in the sequence

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:, SXFocusColumn['x']] - corr_cat[s1, 2]
        dy = corr_cat[:, SXFocusColumn['y']] - corr_cat[s1, 3]
        d_total = numpy.hypot(dx, dy)

        in_cone = numpy.fabs(dx / dy) < 0.1

        # Need to be at most n_stars * 10'' and at least 5''
        close_enough = (d_total <
                        (n_stars * 10. / 0.11)) & (d_total > 5 / 0.11)
        good_so_far = in_cone & close_enough
        if (numpy.sum(good_so_far) <= 0):
            continue

        candidates = corr_cat[good_so_far]
        # if (numpy.sum(candidates) < n_stars):
        #     # Only use full sequences
        #     continue

        # are the magnitudes comparable
        delta_mag = numpy.fabs(candidates[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']])
        similar_brightness = delta_mag < 1
        #print >>dummy_test, "#", candidates.shape[0], numpy.sum(similar_brightness)
        #print similar_brightness
        if (numpy.sum(similar_brightness) <= 0):
            continue

        good_candidates = candidates[similar_brightness]

        # Now sort the data with increasing y values
        si = numpy.argsort(good_candidates[:, SXFocusColumn['y']])
        sorted_candidates = good_candidates[si]

        #numpy.savetxt(dummy_test, sorted_candidates)
        #print >>dummy_test, "\n\n\n"

        # Now compute the slope and distance between each point and each point
        # above it
        for p1, p2 in itertools.combinations(range(sorted_candidates.shape[0]),
                                             2):
            angle = numpy.arctan2(
                sorted_candidates[p1, 2] - sorted_candidates[p2, 2],
                sorted_candidates[p1, 3] - sorted_candidates[p2, 3])
            distance = numpy.sqrt(
                (sorted_candidates[p1, 2] - sorted_candidates[p2, 2])**2 +
                (sorted_candidates[p1, 3] - sorted_candidates[p2, 3])**2)
            all_angles.append(angle)
            all_distances.append(distance)

    #dummy_test.close()

    # Once we are through with the first iteration find the best-fitting angle

    all_angles = numpy.array(all_angles)
    all_distances = numpy.array(all_distances)

    # Find the best or rather most frequently occuring angle
    all_angles[all_angles < 0] += 2 * math.pi

    #numpy.savetxt("dummy.angles", all_angles)
    #numpy.savetxt("dummy.distances", all_distances)

    filtered_angles = three_sigma_clip(all_angles)
    if (filtered_angles is None or filtered_angles.ndim < 1
            or filtered_angles.shape[0] <= 0):
        return None

    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [16, 84])
    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [5, 95])

    logger.debug(
        "Found median angle %f [%f ...%f]" %
        (numpy.degrees(numpy.median(filtered_angles)),
         numpy.degrees(angle_width[0]), numpy.degrees(angle_width[1])))

    #
    # Now we can do another proper search for all stars
    # This time, only search for complete series (#stars as specified)
    #
    #focus_stars = open("focus_stars", "w")
    all_candidates = []
    for s1 in range(corr_cat.shape[0]):

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:, SXFocusColumn['x']] - corr_cat[s1, 2]
        dy = corr_cat[:, SXFocusColumn['y']] - corr_cat[s1, 3]

        angles = numpy.arctan2(dx, dy)
        angles[angles < 0] += 2 * math.pi

        d_total = numpy.hypot(dx, dy)
        #print numpy.degrees(angle_width), numpy.degrees(angles)

        #print angle_width[0], angle_width[1]
        in_cone1 = (angles > angle_width[0]) & (angles < angle_width[1])
        in_cone2 = (angles + math.pi > angle_width[0]) & (angles + math.pi <
                                                          angle_width[1])
        in_cone = in_cone1 | in_cone2
        # print angles[in_cone]
        #print angles[in_cone][0], angles[in_cone][0] > angle_width[0], angles[in_cone][0] < angle_width[1]
        close_enough = (d_total <
                        ((n_stars + 1) * 10. / 0.11)) & (d_total > 5 / 0.11)
        similar_brightness = numpy.fabs(corr_cat[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']]) < 1
        good = in_cone & close_enough & similar_brightness
        good[s1] = True
        # print s1, ":", numpy.sum(in_cone), numpy.sum(close_enough), numpy.sum(similar_brightness), numpy.sum(good)

        if (numpy.sum(good) <= 1):
            continue

        candidates = corr_cat[good]
        # print "# canddates =", candidates.shape[0]

        if (not candidates.shape[0] == n_stars):
            # Only use full sequences
            continue
            pass

        # print "found match:",s1

        # Now we have a set with the right number of stars, matching the overall
        # angle, and with similar brightnesses
        # sort them top to bottom
        si = numpy.argsort(candidates[:, 3])
        #numpy.savetxt(focus_stars, candidates[:,3])
        #numpy.savetxt(focus_stars, si)
        sorted_candidates = candidates[si]

        # print "XXX", sorted_candidates.shape, sorted_candidates[:,0].shape, focus_positions.shape, n_stars, candidates.shape[0]

        sorted_candidates[:, 0] = focus_positions
        #numpy.savetxt(focus_stars, sorted_candidates)
        #numpy.savetxt(focus_stars, numpy.degrees(angles[good]))
        #numpy.savetxt(focus_stars, in_cone[good])
        #numpy.savetxt(focus_stars, d_total[good])
        #numpy.savetxt(focus_stars, (corr_cat[:,10] - corr_cat[s1,10])[good])
        #print >>focus_stars, "\n\n\n\n"

        all_candidates.append(sorted_candidates)

    #focus_stars.close()

    all_candidates = numpy.array(all_candidates)
    logger.debug(str(all_candidates.shape))

    #xxx = open("steps", "w")
    # Now compute the distances from each star to the previous

    step_vectors = []

    for i in range(1, n_stars):
        # logger.debug("Candidates: %d %s\n%s" % (all_candidates.ndim, str(all_candidates.shape), str(all_candidates)))
        if (all_candidates.ndim < 1 or all_candidates.shape[0] <= 0):
            # We ran out of candidates
            logger.debug("We ran out of viable candidates after %s stars" %
                         (i))
            return None

        steps = all_candidates[:, i, 2:4] - all_candidates[:, i - 1, 2:4]
        #numpy.savetxt(xxx, steps)
        #print >>xxx, "\n\n\n\n"

        logger.debug("Computing average step size, star %d" % (i))
        logger.debug("Steps-X:\n%s" % (str(steps[:, 0])))
        logger.debug("Steps-y:\n%s" % (str(steps[:, 1])))
        clean_dx = three_sigma_clip(steps[:, 0])
        clean_dy = three_sigma_clip(steps[:, 1])

        # Check if both clean_dx and clean_dy are not empty
        logger.debug("clean-dx=%s" % (str(clean_dx)))
        logger.debug("clean-dy=%s" % (str(clean_dy)))
        if (clean_dx.ndim < 1 or clean_dx.shape[0] <= 0 or clean_dy.ndim < 1
                or clean_dy.shape[0] <= 0):
            logger.debug("Can't find a clean dx/dy shift in iteration %d" %
                         (i))
            return None

        dx = numpy.median(clean_dx)
        dy = numpy.median(clean_dy)

        distx = scipy.stats.scoreatpercentile(clean_dx, [16, 84])
        disty = scipy.stats.scoreatpercentile(clean_dy, [16, 84])
        sigma_x = 0.5 * (distx[1] - distx[0])
        sigma_y = 0.5 * (disty[1] - disty[0])

        step_vectors.append([dx, dy, sigma_x, sigma_y])

        good_steps = (steps[:,0] > (dx - 3*sigma_x)) & (steps[:,0] < (dx + 3*sigma_x)) \
            & (steps[:,1] > (dy - 3*sigma_y)) & (steps[:,1] < (dy + 3*sigma_y))

        logger.debug("before step-matching #%d: %s" %
                     (i, str(all_candidates.shape)))
        all_candidates = all_candidates[good_steps]
        logger.debug("after step-matching: #%d: %s" %
                     (i, str(all_candidates.shape)))

    logger.debug("%s: %s" % (filename, str(step_vectors)))

    # final_focus = open("final_focus", "w")
    # for i in range(all_candidates.shape[0]):
    #     numpy.savetxt(final_focus, all_candidates[i])
    #     print >>final_focus, "\n\n\n\n\n"
    # final_focus.close()

    logger.debug("Found %d focus stars" % (all_candidates.shape[0]))
    return all_candidates, real_focus_positions

    logger.debug("Returning final FITS table catalog")
    return None
示例#5
0
def normalize_flatfield(filename,
                        outputfile,
                        binning_x=8,
                        binning_y=8,
                        repeats=3,
                        batchmode_hdu=None,
                        normalize_otas=None):

    logger = logging.getLogger("NormFlatField")
    logger.debug("Starting to normalize %s" % (str(batchmode_hdu)))

    if (batchmode_hdu is not None):
        hdulist = batchmode_hdu
    else:
        hdulist = pyfits.open(filename)

    filter = hdulist[0].header['FILTER']

    fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist)

    list_of_otas_to_normalize = fpl.get_science_area_otas(
        filter, include_vignetted=False)
    if (normalize_otas is not None):
        list_of_otas_to_normalize = normalize_otas

    logger.info("Using these OTAs to normalize overall flux:\n%s" %
                (", ".join(["%02d" % ota
                            for ota in list_of_otas_to_normalize])))

    flatfield_data = numpy.zeros(shape=(len(list_of_otas_to_normalize) * 4096 *
                                        4096 // (binning_x * binning_y)),
                                 dtype=numpy.float32)
    flatfield_data[:] = numpy.NaN

    # also prepare to store the global gain value
    gain_sum = 0
    gain_count = 0

    datapos = 0
    for extension in range(1, len(hdulist)):  #hdulist[1:]:
        if (not is_image_extension(hdulist[extension])):
            continue

        fppos = int(hdulist[extension].header['FPPOS'][2:4])

        #print list_of_otas_to_normalize
        try:
            index = list_of_otas_to_normalize.index(fppos)
        except:
            # We didn't find this OTA in the list, so skip it
            hdulist[extension].header["FF_NORM"] = (False,
                                                    "Used in normalization")
            extension += 1
            continue

        hdulist[extension].header["FF_NORM"] = (True, "Used in normalization")

        gain_ota = hdulist[extension].header['GAIN']
        gain_ota_count = hdulist[extension].header['NGAIN']

        gain_sum += gain_ota * gain_ota_count
        gain_count += gain_ota_count

        # We now know that we should include this OTA in the
        # calculation of the flat-field normalization
        logger.debug("Adding OTA %02d to flat-field ..." % fppos)
        #flatfield_data = numpy.concatenate((flatfield_data, extension.data.flatten()))
        #flatfield_data[extension,:,:] = extension.data

        if (binning_x > 1 or binning_y > 1):
            sx, sy = hdulist[extension].data.shape[0], hdulist[
                extension].data.shape[1]
            bx, by = sx // binning_x, sy // binning_y
            one_d = numpy.reshape(hdulist[extension].data,
                                  (by, binning_y, bx, binning_x)).mean(
                                      axis=-1).mean(axis=1).flatten()
        else:
            one_d = hdulist[extension].data.flatten()

        flatfield_data[datapos:datapos + one_d.shape[0]] = one_d

        datapos += one_d.shape[0]
        #print datapos
        del one_d

    # Remove all remaining NaN values and atruncate the array to the values actually used
    finite = numpy.isfinite(flatfield_data[:datapos])
    flatfield_data = flatfield_data[:datapos][finite]

    # Now we are through all flatfields, compute the median value
    logger.debug(" computing median ...")
    sigma_min, sigma_max = -1e5, 1e6
    for i in range(repeats):

        valid = (flatfield_data > sigma_min) & (flatfield_data < sigma_max)

        ff_median_level = numpy.median(flatfield_data[valid])
        ff_std = numpy.std(flatfield_data[valid])

        sigma_min = ff_median_level - 2 * ff_std
        sigma_max = ff_median_level + 3 * ff_std
        #print i, numpy.sum(valid), datapos, ff_median_level, ff_std, sigma_min, sigma_max

    if (ff_median_level <= 0):
        logger.error("Something went wrong or this is no flatfield frame")
        ff_median_level = 1.0

    #stdout_write("\b\b\b(% 7.1f) ..." % (ff_median_level))
    logger.debug("Found median level % 7.1f ADU, normalizing ..." %
                 (ff_median_level))

    # Now normalize all OTAs with the median flatfield level
    #stdout_write(" normalizing ...")

    # Create a new HDU list for the normalized output
    # hdu_out = [] #pyfits.PrimaryHDU(header=hdulist[0].header)]

    # for extension in range(0, len(hdulist)):
    #     if (not is_image_extension(hdulist[extension])):
    #         hdu_out.append(hdulist[extension])
    #         continue

    #     data = hdulist[extension].data.copy()
    #     data /= ff_median_level
    #     data[data < 0.1] = numpy.NaN
    #     new_hdu = pyfits.ImageHDU(data=data, header=hdulist[extension].header)

    #     #hdulist[extension].data /= ff_median_level
    #     #hdulist[extension].data[hdulist[extension].data < 0.1] = numpy.NaN
    #     new_hdu.header.add_history("FF-level: %.1f" % (ff_median_level))
    #     hdu_out.append(new_hdu)

    # hdulist = pyfits.HDUList(hdu_out)

    hdu_out = []  #pyfits.PrimaryHDU(header=hdulist[0].header)]

    for extension in hdulist:
        if (not is_image_extension(extension)):
            continue

        # data = hdulist[extension].data.copy()
        # data /= ff_median_level
        # data[data < 0.1] = numpy.NaN
        # new_hdu = pyfits.ImageHDU(data=data, header=hdulist[extension].header)
        extension.data /= ff_median_level
        extension.data[extension.data < 0.1] = numpy.NaN

        #hdulist[extension].data /= ff_median_level
        #hdulist[extension].data[hdulist[extension].data < 0.1] = numpy.NaN
        # new_hdu.header.add_history("FF-level: %.1f" % (ff_median_level))
        # hdu_out.append(new_hdu)

    # hdulist = pyfits.HDUList(hdu_out)

    #
    # compute the global gain value and store it in primary header
    #
    logger.debug("Computing global gain value (sum=%.1f, #=%d)" %
                 (gain_sum, gain_count))
    global_gain = gain_sum / gain_count if (gain_count > 0) else -1
    hdulist[0].header['GAIN'] = global_gain if (gain_count > 0) else -1.
    hdulist[0].header['NGAIN'] = gain_count

    logger.debug("writing results to file (%s) ..." % (outputfile))
    clobberfile(outputfile)
    hdulist.writeto(outputfile, overwrite=True)
    logger.info("done!")
示例#6
0
def create_quickview(filename, output_directory, verbose=False, clobber=True):

    logger = logging.getLogger("QuickView")

    create_otalevel = cmdline_arg_isset("-otalevel")
    scaling = cmdline_arg_set_or_default("-scaling", None)
    if (not scaling in ['linear', 'log', 'sqrt', 'arcsinh', 'asinh']):
        scaling = 'sqrt'

    hdulist = pyfits.open(filename)
    filter = hdulist[0].header['FILTER']
    obsid = hdulist[0].header['OBSID']
    object = hdulist[0].header['OBJECT'].replace(' ', '_').replace(',', '_')

    fullframe_image_filename = "%s/%s_%s.%s.jpg" % (output_directory, obsid,
                                                    object, scaling)
    if (os.path.isfile(fullframe_image_filename) and not clobber):
        # File exists and we were asked not to overwrite anything
        stdout_write("\nFile (%s) exists, skipping ...\n" % (filename))
        return

    if (verbose):
        stdout_write("\nWorking on file %s (%s, %s) ...\n" %
                     (filename, object, filter))

    fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist)
    try:
        list_of_otas_to_normalize = fpl.otas_to_normalize_ff[filter]
    except:
        list_of_otas_to_normalize = fpl.central_2x2

    # Allocate some memory to hold the data we need to determine the
    # most suitable intensity levels
    binned_data = numpy.zeros(shape=(13 * 512 * 512), dtype=numpy.float32)
    binned_data[:] = numpy.NaN

    #
    #
    #
    available_ota_coords = []
    for ext in hdulist:
        if (not is_image_extension(ext)):
            continue
        try:
            ota = ext.header['OTA']
        except:
            continue
        x = int(math.floor(ota / 10.))
        y = int(ota % 10)
        available_ota_coords.append((x, y))

    datapos = 0
    #    if (verbose):
    #        stdout_write("   Finding contrast: Reading OTA")
    logger.info("Finding best contrast")
    for extension in range(1, len(hdulist)):
        if (not is_image_extension(hdulist[extension])):
            continue
        if ('CELLMODE' in hdulist[extension].header
                and hdulist[extension].header['CELLMODE'].find("V") > 0):
            logger.info("Skipping guide-OTA %s" %
                        (hdulist[extension].header['EXTNAME']))
            continue

        fppos = int(hdulist[extension].header['FPPOS'][2:4])

        try:
            index = list_of_otas_to_normalize.index(fppos)
        except:
            # We didn't find this OTA in the list, so skip it
            hdulist[extension].header['FF_NORM'] = (False,
                                                    "Used in normalization")
            extension += 1
            continue

        logger.debug("Reading OTA %02d" % (fppos))
        #stdout_write("\rReading OTA %02d" % (fppos))
        #        if (verbose):
        #            stdout_write(" %02d" % (fppos))

        # Rebin the frame 8x8 to make it more manageable
        binned = numpy.reshape(hdulist[extension].data,
                               (512, 8, 512, 8)).mean(axis=-1).mean(axis=1)
        one_d = binned.flatten()
        binned_data[datapos:datapos + one_d.shape[0]] = one_d
        datapos += one_d.shape[0]
        del one_d
        del binned

    #if (verbose):
    #    stdout_write(" - done!\n")

    #
    # Now we are through all OTA/extensions, compute the median value and stds
    # so we can scale the frames accordingly
    #
    #if (verbose):
    #    stdout_write("   Finding best intensity levels ...")
    median = 0
    std = 1e8
    binned_data = binned_data[0:datapos]
    for looper in range(3):
        valid = (binned_data > (median - std)) & (binned_data <
                                                  (median + 3 * std))
        #print numpy.sum(valid)
        median = numpy.median(binned_data[valid])
        std = numpy.std(binned_data[valid])
        #print median, std, median-std, median+3*std

    # Compute the intensity levels, making sure not to exceed the maximum possible range
    min_level = numpy.max([median - 1 * std, 0])
    max_level = numpy.min([median + 8 * std, 60000])
    # stdout_write(" using %d ... %d\n" % (min_level, max_level))
    logger.info("Using intensity scale %d ... %d" % (min_level, max_level))

    #
    # Now that we have all the scaling factors, go ahead and create the preview images
    #

    # Create space to hold the full 8x8 OTA focal plane
    full_focalplane = numpy.zeros(shape=(4096, 4096))
    # if (verbose):
    #     stdout_write("   Creating jpeg for OTA")
    logger.info("Creating jpeg for OTAs and full focalplane")
    for extension in range(1, len(hdulist)):
        if (not is_image_extension(hdulist[extension])):
            continue

        fppos = int(hdulist[extension].header['FPPOS'][2:4])
        logger.info("Creating JPG for extension %s" %
                    (hdulist[extension].name))
        #stdout_write("\r   Creating jpegs (%02d) ..." % fppos)
        # if (verbose):
        #     stdout_write(" %02d" % (fppos))

        fp_x = fppos % 10
        fp_y = math.floor(fppos / 10)

        hdulist[extension].data[numpy.isnan(hdulist[extension].data)] = 0
        binned = numpy.reshape(hdulist[extension].data,
                               (512, 8, 512, 8)).mean(axis=-1).mean(axis=1)

        greyscale = (binned - min_level) / (max_level - min_level)
        greyscale[greyscale < 0] = 0
        greyscale[greyscale >= 1] = 1

        print "ASINH?", (scaling in ['arcsinh', 'asinh'])
        if (scaling == 'sqrt'):
            greyscale = numpy.sqrt(greyscale)
        elif (scaling in ['arcsinh', 'asinh']):
            greyscale = numpy.arcsinh(greyscale)
        elif (scaling == 'log'):
            greyscale = numpy.log10(greyscale + 1) / numpy.log10(2.0)
        else:  # (scaling == 'linear')
            pass

        ffp_x = fp_x * 512
        ffp_y = fp_y * 512
        full_focalplane[ffp_x:ffp_x + 512, ffp_y:ffp_y +
                        512] = greyscale[:, :]  #.astype(numpy.int)

        #image = Image.fromarray(numpy.uint8(greyscale*255))
        #image_filename = "%s/%s_%s.%02d.jpg" % (output_directory, obsid, object, fppos)
        #image.transpose(Image.FLIP_TOP_BOTTOM).save(image_filename, "JPEG")
        #del image

        if (create_otalevel):
            #
            # Mark all overexposed pixels in a different color
            #
            channel_r = greyscale + 0
            channel_g = greyscale + 0
            channel_b = greyscale + 0

            channel_r[binned > limit_overexposed] = overexposed[0]
            channel_g[binned > limit_overexposed] = overexposed[1]
            channel_b[binned > limit_overexposed] = overexposed[2]

            im_r = Image.fromarray(numpy.uint8(channel_r * 255))
            im_g = Image.fromarray(numpy.uint8(channel_g * 255))
            im_b = Image.fromarray(numpy.uint8(channel_b * 255))
            im_rgb = Image.merge('RGB', (im_r, im_g, im_b))
            image_filename = "%s/%s_%s.%02d.rgb-%s.jpg" % (
                output_directory, obsid, object, fppos, scaling)
            im_rgb.transpose(Image.FLIP_TOP_BOTTOM).save(
                image_filename, "JPEG")

            # Delete all temporary images to keep memory demands low
            del im_r
            del im_g
            del im_b
            del im_rgb

    #
    # Prepare the preview for the full focal plane
    #
    #stdout_write("\r   Creating jpegs (full-frame) ...")
    # if (verbose):
    #     stdout_write(" full-frame")
    image = Image.fromarray(numpy.uint8(full_focalplane * 255))

    # Add lines to indicate detector borders. Make sure to make them wider than
    # just one pixel, otherwise the lines completely disappear if displaying the
    # image zoomed out
    draw = ImageDraw.Draw(image)
    for i in range(1, 8):
        for linewidth in range(-5, 0):
            draw.line(
                (0, i * 512 + linewidth, image.size[0], i * 512 + linewidth),
                fill=128)
            draw.line(
                (i * 512 + linewidth, 0, i * 512 + linewidth, image.size[1]),
                fill=128)

    if (crossout_missing_otas):
        # Now loop over all OTAs and mark the ones that do not exist
        for y in range(8):
            for x in range(8):
                tuple = (x, y)
                try:
                    index = available_ota_coords.index(tuple)
                except:
                    # We only get here if the OTA is not listed as available
                    # cross it out in the full focal plane image
                    #print "Strokign out ",tuple
                    for lw in range(-5, 0):
                        draw.line((x * 512 + lw, y * 512, (x + 1) * 512 + lw,
                                   (y + 1) * 512),
                                  fill=128)
                        draw.line((x * 512 + lw, (y + 1) * 512,
                                   (x + 1) * 512 + lw, y * 512),
                                  fill=128)

    image = image.transpose(Image.FLIP_TOP_BOTTOM)

    watermark_OTA = True
    dx, dy = 150, 150
    if (watermark_OTA):
        image = image.convert('RGBA')
        draw = ImageDraw.Draw(image)
        font = ImageFont.truetype('/usr/share/./fonts/truetype/DroidSans.ttf',
                                  size=200)
        for x, y in itertools.product(range(8), repeat=2):
            try:
                index = available_ota_coords.index((x, y))
            except:
                continue
            draw.text((x * 512 + dx, (7 - y) * 512 + dy),
                      "%02d" % (x * 10 + y),
                      font=font,
                      fill=(0, 64, 128, 255))

    image.save(fullframe_image_filename, "JPEG")
    del image
    logger.info("Writing file to %s" % (fullframe_image_filename))

    # stdout_write(" - done!\n")
    logger.info("all done!\n")