예제 #1
0
    def load_photometry(self, mef):
        print('[info]Loading broadband photometry')

        lamb, flam = [], []
        for filename, bandpass in self.obscat.items():
            lamb.append(bandpass.photplam)  # the filter central wave
            photflam = bandpass.photflam  # just a shorthand

            if mef:
                raise NotImplementedError("MEF loading photometry")

            else:
                img = fitsimage.FITSImage(filename, 0)  # read the fits iamge
                pflam = bandpass.photflam  # just get the photflam for access
                flux = [
                    src.instrumental_flux(img) * photflam
                    for segid, src in self.items()
                ]

                flam.append(flux)

        # repackage the data
        lamb = np.array(lamb)
        flam = list(zip(*flam))

        # now record the data to the sources
        for src, flux in zip(self.values(), flam):
            src.set_SED(lamb, flux)
예제 #2
0
    def from_classic(self, hdus, hdui):
        print('[info]Loading sources from CLASSIC segmentation map')

        # load the images
        exten = 0
        seg = fitsimage.FITSImage(hdus, exten)
        img = fitsimage.FITSImage(hdui, exten)

        # get the reverse index
        revind = indices.reverse(seg.image.astype(np.uint64))
        if revind[0][0] == 0:
            del revind[0]  # remove the sky from the segmentation map

        # build a progress bar
        pb = tqdm.tqdm(total=len(revind),
                       dynamic_ncols=True,
                       desc='Classic segmap')

        # get the zeropoint
        zeropoint = self.obscat.detzero

        # process each index
        for segid, ri in revind:
            pb.desc = self.PREFIX.format(segid)

            # get the (x,y) pairs
            x, y = indices.one2two(ri, np.flip(seg.shape))

            # get bounding box
            bbox = (np.amin(x), np.amax(x), np.amin(y), np.amax(y))

            # call something like hextract
            subseg = seg.extract(*bbox)
            subimg = img.extract(*bbox)

            # put the segID in the header for safe keeping
            subseg['SEGID'] = (segid, 'Segmentation ID from pylinear')

            # create the source
            src = Source(subimg, subseg, zeropoint)
            if src.valid:
                self[segid] = src

            # update
            pb.update()
예제 #3
0
파일: astrometry.py 프로젝트: ubutnux/lemon
def parallel_astrometry(args):
    """ Function argument of map_async() to do astrometry in parallel.

    This will be the first argument passed to multiprocessing.Pool.map_async(),
    which chops the iterable into a number of chunks that are submitted to the
    process pool as separate tasks. 'args' must be a three-element tuple with
    (1) a string with the path to the FITS image, (2) a string with the path to
    the output directory and (3) 'options', the optparse.Values object returned
    by optparse.OptionParser.parse_args().

    This function does astrometry on each FITS image with the astrometry_net()
    function. The output FITS files, containing the WCS headers calculated by
    Astrometry.net, are written to the output directory with the same basename
    as the original files but with the string options.suffix appended before
    the file extension.

    The path to each solved image is put, as a string, into the module-level
    'queue' object, a process shared queue. If the image cannot be solved, None
    is put instead. Note that the contents of the shared queue are necessary so
    that the progress bar can be updated to reflect the number of input images
    that have been processed so far. Apart from that, you most probably do not
    need to do anything with these paths, as the output files are written to
    the output directory by astrometry_net().

    """

    path, output_dir, options = args

    img = fitsimage.FITSImage(path)
    # Add the suffix to the basename of the FITS image
    root, ext = os.path.splitext(os.path.basename(path))
    output_filename = root + options.suffix + ext
    dest_path = os.path.join(output_dir, output_filename)

    if options.blind:
        msg = "%s: solving the image blindly (--blind option)"
        logging.debug(msg % img.path)
        ra = dec = None
        msg = "%s: using α = δ = None"
        logging.debug(msg % img.path)

    else:

        try:
            ra  = img.ra (options.rak)
            dec = img.dec(options.deck)
        except (ValueError, KeyError), e:
            msg = "%s: %s" % (img.path, str(e))
            logging.debug(msg)
            ra = dec = None
            msg = "%s: could not read coordinates from FITS header"
            logging.debug(msg % img.path)
            msg = "%s: using α = δ = None"
            logging.debug(msg % img.path)
예제 #4
0
    def __init__(self, img_path, coords_path):
        """ Instantiation method for the QPhot class.

        img_path - path to the FITS image on which to do photometry.
        coords_path - path to the text file with the celestial coordinates
                      (right ascension and declination) of the astronomical
                      objects to be measured. These objects must be listed one
                      per line, in two columns. Note that this class does *not*
                      apply proper-motion correction, so the coordinates must
                      be corrected before being written to the file. In case
                      the proper motions of the objects are listed in the file,
                      in columns third and fourth, ValueError is raised.

        """

        super(list, self).__init__()
        self.image = fitsimage.FITSImage(img_path)
        self.coords_path = coords_path

        for ra, dec, pm_ra, pm_dec in methods.load_coordinates(
                self.coords_path):
            if ra == 0 and dec == 0:
                msg = (
                    "the right ascension and declination of one or more "
                    "astronomical objects in '%s' is zero. This is a very bad "
                    "sign: these are the celestial coordinates that SExtractor "
                    "uses for sources detected on a FITS image that has not been "
                    "calibrated astrometrically (may that be your case?), and "
                    "without that it is impossible to do photometry on the "
                    "desired coordinates" % self.coords_path)
                # Emit warning only once
                warnings.warn(msg)
                break

            if pm_ra is not None or pm_dec is not None:
                msg = ("at least one object in the '%s' file lists its proper "
                       "motions. This is not allowed. The coordinates must be "
                       "written to the file already adjusted for their proper "
                       "motions, as this class cannot apply any correction" %
                       self.coords_path)
                raise ValueError(msg)
예제 #5
0
    def from_classic(self, hdus, hdui):
        print('[info]Loading sources from CLASSIC segmentation map')

        # load the images
        exten = 0
        seg = fitsimage.FITSImage(hdus, exten)
        img = fitsimage.FITSImage(hdui, exten)

        # get the reverse index
        revind = indices.reverse(seg.image.astype(np.uint64))
        if revind[0][0] == 0:
            del revind[0]  # remove the sky from the segmentation map

        # build a progress bar
        pb = tqdm.tqdm(total=len(revind),
                       dynamic_ncols=True,
                       desc='Classic segmap')

        # get the zeropoint
        zeropoint = self.obscat.detzero

        # process each index
        for segid, ri in revind:
            pb.desc = self.PREFIX.format(segid)

            # get the (x,y) pairs
            x, y = indices.one2two(ri, np.flip(seg.shape))

            # get bounding box
            bbox = (np.amin(x), np.amax(x), np.amin(y), np.amax(y))

            # call something like hextract
            subseg = seg.extract(*bbox)
            subimg = img.extract(*bbox)

            # check for a cube ID
            if segid == self.cubeid:
                print('[debug]This is hardcoded for a single-pixel region')

                self.composite = CompositeSource(segid, subseg)

                #y,x=np.mgrid[0:ny,0:nx]
                # y//=dy
                # x//=dx
                # xx,yy are pairs for new seg regions

                print('[info]Preparing cube-extraction for: {}'.format(segid))

                for xx, yy, ss in zip(x, y, ri):

                    bbox = (xx, xx, yy, yy)

                    # call something like hextract
                    subseg = seg.extract(*bbox)
                    subimg = img.extract(*bbox)
                    subid = -ss

                    # put the segID in the header for safe keeping
                    subseg.image.fill(subid)
                    subseg['SEGID'] = (subid, 'Segmentation ID from pylinear')

                    # create the source
                    src = Source(subimg, subseg, zeropoint, minpix=1)
                    if src.valid:
                        self[subid] = src
                        self.composite[subid] = bbox  # record the bounding box

                # reset the segid to create a unique entry
                #segid=-indices.two2one(x,y,dim)
            else:

                # put the segID in the header for safe keeping
                subseg['SEGID'] = (segid, 'Segmentation ID from pylinear')

                # create the source
                src = Source(subimg, subseg, zeropoint)
                if src.valid:
                    self[segid] = src

            # update
            pb.update()
예제 #6
0
파일: astrometry.py 프로젝트: ubutnux/lemon
            msg = "%s did not solve. Ignored."

        msg %= img.path
        warnings.warn(msg, RuntimeWarning)
        queue.put(None)
        logging.debug("%s: None put into global queue" % path)
        return

    try:
        shutil.move(output_path, dest_path)
        logging.debug("%s: solved image saved to %s" % (path, dest_path))
    except (IOError, OSError), e:
        logging.debug("%s: can't solve image (%s)" % (path, str(e)))
        methods.clean_tmp_files(output_path)

    output_img = fitsimage.FITSImage(dest_path)

    debug_args = path, output_img.path
    logging.debug("%s: updating header of output image (%s)" % debug_args)
    msg1 = "Astrometry done via LEMON on %s" % methods.utctime()
    msg2 = "[Astrometry] WCS solution found by Astrometry.net"
    msg3 = "[Astrometry] Original image: %s" % img.path

    output_img.add_history(msg1)
    output_img.add_history(msg2)
    output_img.add_history(msg3)
    logging.debug("%s: header of output image (%s) updated" % debug_args)

    queue.put(output_img.path)
    msg = "{0}: astrometry result ({1!r}) put into global queue"
    logging.debug(msg.format(*debug_args))
예제 #7
0
파일: import.py 프로젝트: ubutnux/lemon
def main(arguments = None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:] # ignore argv[0], the script name
    (options, args) = parser.parse_args(args = arguments)

    # Print the help message and abort the execution if there are not two
    # positional arguments left after parsing the options, as the user must
    # specify the path to both the input and output directories.

    if len(args) < 2:
        parser.print_help()
        return 2  # 2 is generally used for command line syntax errors
    else:
        input_dirs = args[:-1]
        output_dir = args[-1]

    # Make sure that all the input directories exist, abort otherwise.
    for path in input_dirs:
        if not os.path.exists(path):
            print "%sThe input directory, '%s', does not exist. Exiting." % \
                  (style.prefix, path)
            return 1

    # The input and output directories must be different, as otherwise some
    # files (especially if the filename of the output files is automatically
    # detected) could be overwritten.
    for path in input_dirs:
        if os.path.abspath(path) == os.path.abspath(output_dir):
            print "%s[INPUT_DIRS] and OUTPUT_DIR must be different. " \
                  "Exiting." % style.prefix
            return 1

    # Make sure that the output directory exists, create it otherwise
    methods.determine_output_dir(output_dir)

    # Recursively walk down the input directories, obtaining a list of all the
    # regular files. Then, and while a progress bar is shown to let the user
    # estimate how much longer it is, detect which among them are FITS files.

    print "%sIndexing regular files within directory trees starting at " \
          "INPUT_DIRS..." % style.prefix ,
    files_paths = fitsimage.find_files(input_dirs,
                                       followlinks = options.followlinks,
                                       pattern = options.pattern)
    print 'done.'

    print "%sDetecting FITS images among the %d indexed regular files..." % \
          (style.prefix, len(files_paths))

    images_set = set()
    methods.show_progress(0.0)
    for path_index, path in enumerate(files_paths):
        try:
            images_set.add(fitsimage.FITSImage(path))
            fraction = (path_index + 1) / len(files_paths) * 100
            methods.show_progress(fraction)
        except fitsimage.NonStandardFITS:
            pass
    else:
        methods.show_progress(100)
        print

    if not len(images_set):
        print "%sNo FITS files were found. Exiting." % style.prefix
        return 1
    else:
        print "%s%d FITS files detected." % (style.prefix, len(images_set))

    # All the images must have the same size; otherwise, only those with the
    # most common dimensions will be imported, while the rest will be ignored
    print style.prefix
    print "%sChecking the sizes of the detected images..." % style.prefix,
    img_sizes = collections.defaultdict(int) # dimensions counter
    for img in images_set:
        img_sizes[img.size] += 1
    print 'done.'

    # The most common size is the only one element in case len(img_sizes) == 1
    x_size, y_size = max(img_sizes.iterkeys(), key = img_sizes.get)[:2]

    if len(img_sizes) == 1:
        print "%sAll the FITS images have the same size: %d x %d pixels" % \
              (style.prefix, x_size, y_size)
    else:

        print "%sMultiple sizes were detected among the FITS images." % style.prefix
        print "%sDiscarding images with a size other than %d x %d pixels, " \
              "the most common..." % (style.prefix, x_size, y_size) ,
        old_size = len(images_set)
        images_set = set(img for img in images_set if img.size == (x_size, y_size))
        print 'done.'

        if not images_set:
            print "%sThere are no FITS files left. Exiting." % style.prefix
            return 1
        else:
            print "%s%d FITS files were discarded because of their size, " \
                  "%s remain." % (style.prefix, old_size - len(images_set),
                                  len(images_set))

    # Those FITS images whose object names do not match any of the given
    # patterns, or which do not even have the keyword which contains the
    # name for the object observed, are discarded.
    print style.prefix
    print "%sImporting only those FITS files whose %s keyword can be found " \
          "and matches" % (style.prefix, options.objectk)
    print "%sone of the following Unix patterns: %s ..." % \
          (style.prefix, options.objectn)

    # We first test that the keyword exists (hence the pass for the KeyError
    # exception, which means that the image is filtered out) and, after that,
    # check whether its value matches one of the regular expressions which
    # define the object names to be imported.
    object_set = set()

    # Keep the track of how many images are ignored for each reason
    saturated_excluded = 0
    non_match_excluded = 0

    for img in images_set:

        try:
            object_name = img.read_keyword(options.objectk)
            for pattern in options.objectn:
                regexp = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
                if regexp.match(object_name):
                    # Even if the object name matchs, the median number of
                    # counts must still be below the threshold, if any. If the
                    # number of ADUs is irrelevant we can avoid having to
                    # unnecessarily compute it.
                    if options.max_counts:
                        with pyfits.open(img.path, readonly = True) as hdu:
                            median_counts = numpy.median(hdu[0].data)
                        if median_counts > options.max_counts:
                            print "%s%s excluded (matched, but saturated " \
                                  "with %d ADUs)" % (style.prefix, img.path,
                                                     median_counts)
                            saturated_excluded += 1
                            break

                    # This point reached if median number of ADUs of image is
                    # above the threshold or irrelevant, so it can be imported.
                    print "%s%s imported (%s matches '%s')" % (style.prefix,
                           img.path, object_name, pattern)

                    object_set.add(img)
                    break

            else: # only executed if for loop exited cleanly
                print "%s%s excluded (%s does not match anything)" % \
                      (style.prefix, img.path, object_name)
                non_match_excluded += 1
        except KeyError:
            pass

    if not saturated_excluded and not non_match_excluded:
        print "%sNo images were filtered out. Hooray!" % style.prefix
    if saturated_excluded:
        print "%s%d files were discarded because they were saturated " \
              "(> %d ADUs)." % (style.prefix, saturated_excluded,
                                options.max_counts)
    if non_match_excluded:
        print "%s%d files were discarded because of their non-matching " \
              "object names." % (style.prefix, non_match_excluded)

    # Abort the execution if all the FITS files were filtered out
    if not object_set:
        print "%sThere are no FITS files left. Exiting." % style.prefix
        return 1

    # Sort the FITS files by their date of observation, according to the header
    print style.prefix
    print "%sSorting the FITS files by their date of observation " \
          "[keyword: %s]..." % (style.prefix, options.datek) ,

    kwargs = dict(date_keyword = options.datek,
                  time_keyword = options.timek,
                  exp_keyword = options.exptimek)
    get_date = operator.methodcaller('date', **kwargs)
    sorted_imgs = sorted(object_set, key = get_date)

    # Let the user know if one or more images could not be sorted (because of
    # problems when parsing the FITS keywords from which the observation date
    # is derived) and thus discarded.
    difference = len(object_set) - len(sorted_imgs)
    assert difference >= 0
    if difference:
        print
        print "%s%d files were discarded as the observation date keyword " \
              "was not found or the " % (style.prefix, difference)
        print "%sdate in it represented did not conform to the FITS " \
              "standard." % style.prefix

        # Execution is aborted if all the FITS files were filtered out
        if not sorted_imgs:
            print "%sThere are no FITS files left. Exiting." % style.prefix
            return 1
    else:
        print 'done.'

    # If no filename for the output images was specified, attempt to
    # automatically detect the most common basename among the FITS files.
    # This is doing by extracting the leftmost non-numeric substring of
    # all the filenames and taking that which repeats the most.

    if not options.filename:
        print style.prefix
        print "%sDetecting the most common name among input files..." % \
              style.prefix ,
        sys.stdout.flush()

        # Use a dictionary in order to keep the track of how many times we
        # have come across each prefix (leftmost non-numeric substring in
        # the filename) and select that with most occurrences.

        prefixes = collections.defaultdict(int)
        for prefix in (img.prefix for img in sorted_imgs):
            prefixes[prefix] += 1

        # Select the prefix (key) that is repeated the most
        options.filename = max(prefixes, key = prefixes.get)
        print 'done.'

    print "%sImported FITS filenames will start with the string: '%s'" % \
          (style.prefix, options.filename)

    # Now we have to copy the FITS files. The basename of each imported file
    # will be options.filename + its sequence number. Filling zeros will be
    # affixed to each number so that the lenth of all the basenames is the
    # same. Following Dijkstra's teachings, we start numbering at zero.

    assert len(sorted_imgs)
    ndigits = len(str(len(sorted_imgs) - 1))
    print "%s%d digits are needed in order to enumerate %d files." % \
          (style.prefix, ndigits, len(sorted_imgs))

    print style.prefix
    print "%sCopying the FITS files to '%s'..." % \
          (style.prefix, output_dir)

    for index, fits_file in enumerate(sorted_imgs):

        # i.e., 'ferM_' + '0000' + '.fits' = 'ferM_0000.fits'
        dest_name = '%s%0*d.fits' % (options.filename, ndigits, index)
        dest_path = os.path.join(output_dir, dest_name)

        shutil.copy2(fits_file.path, dest_path)

        # The permission bits have been copied, but we need to make sure
        # that the copy of the FITS file is always writable, no matter what
        # the original permissions were. This is equivalent to `chmod u+w`
        methods.owner_writable(dest_path, True)

        dest_img = fitsimage.FITSImage(dest_path)

        # Add some information to the FITS header...
        if not options.exact:

            msg1 = "File imported by LEMON on %s" % methods.utctime()
            dest_img.add_history(msg1)

            # If the --uik option is given, store in this keyword the absolute
            # path to the image of which we made a copy. This allows other
            # LEMON commands, if necessary, to access the original FITS files
            # in case the imported images are modified (e.g., bias subtraction
            # or flat-fielding) before these other commands are executed.

            if options.uncimgk:

                comment = "before any calibration task"
                dest_img.update_keyword(options.uncimgk,
                                        os.path.abspath(dest_img.path),
                                        comment = comment)

                msg2 = "[Import] Original image: %s"
                dest_img.add_history(msg2 % os.path.abspath(fits_file.path))

        # ... unless we want an exact copy of the images. If that is the case,
        # verify that the SHA-1 checksum of the original and the copy matches
        elif fits_file.sha1sum != dest_img.sha1sum:
            msg = "copy of %s not identical (SHA-1 differs)" % fits_file.path
            raise IOError(msg)

        # Show which file has been copied, using the format of the
        # 'cp -v' command: `./ultra2/ferM_11.fits' -> `imported/img_01.fits'
        print  "%s`%s' -> `%s'" % (style.prefix, fits_file.path, dest_path)

    # Finally, let the user know how many FITS images, and the fraction of
    # the total, that were imported, as well as their size in megabytes.
    print style.prefix
    ifraction = len(sorted_imgs) / len(images_set) * 100
    print "%sFITS files detected: %d" % (style.prefix, len(images_set))
    print "%sFITS files successfully imported: %d (%.2f%%)" % \
          (style.prefix, len(sorted_imgs), ifraction)

    total_size = 0.0
    for fits_file in sorted_imgs:
        total_size += os.path.getsize(fits_file.path) # in bytes

    print "%sTotal size of imported files: %.2f MB" % \
          (style.prefix, total_size / (1024.0 ** 2))
    print "%sYou're done ^_^" % style.prefix
    return 0
예제 #8
0
파일: mosaic.py 프로젝트: dhorkin/lemon
def main(arguments=None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args=arguments)

    # Print the help and abort the execution if there are fewer than three
    # positional arguments left, as the user must specify at least two FITS
    # images and the output mosaic into which they are assembled.
    if len(args) < 3:
        parser.print_help()
        return 2  # used for command line syntax errors
    else:
        assert len(args) >= 3
        input_paths = set(args[:-1])
        output_path = args[-1]

    # Refuse to overwrite the output FITS file unless explicitly instructed to
    # do so. Note that, if the --overwritten option is given, we do not need to
    # delete the existing file: it will be silently overwritten when the output
    # of montage.mosaic() is shutil.move()'d to the output path.

    if os.path.exists(output_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_path)
            print style.error_exit_message
            return 1

    # Workaround for a bug in montage.mosaic() that raises an error ('mpirun
    # has exited due to process rank [...] without calling "finalize"...') if
    # mpi = True and background_match = True. Until this is fixed, we can only
    # use one core if the --background-match option is given by the user.

    if options.background_match and options.ncores > 1:
        options.ncores = 1
        for msg in (
                "{0}Warning: --background-match is incompatible with --cores > 1.",
                "{0}Setting the --cores option to a value of one.",
                "{0}This is a workaround for a known bug in montage-wrapper:",
                "{0}https://github.com/astropy/montage-wrapper/issues/18"):
            print msg.format(style.prefix)
        print

    # Map each filter to a list of FITSImage objects
    files = fitsimage.InputFITSFiles()

    msg = "%sMaking sure the %d input paths are FITS images..."
    print msg % (style.prefix, len(input_paths))

    util.show_progress(0.0)
    for index, path in enumerate(input_paths):
        # fitsimage.FITSImage.__init__() raises fitsimage.NonStandardFITS if
        # one of the paths is not a standard-conforming FITS file.
        try:
            img = fitsimage.FITSImage(path)

            # If we do not need to know the photometric filter (because the
            # --filter was not given) do not read it from the FITS header.
            # Instead, use None. This means that 'files', a dictionary, will
            # only have a key, None, mapping to all the input FITS images.

            if options.filter:
                pfilter = img.pfilter(options.filterk)
            else:
                pfilter = None

            files[pfilter].append(img)

        except fitsimage.NonStandardFITS:
            print
            msg = "'%s' is not a standard FITS file"
            raise fitsimage.NonStandardFITS(msg % path)

        percentage = (index + 1) / len(input_paths) * 100
        util.show_progress(percentage)
    print  # progress bar doesn't include newline

    # The --filter option allows the user to specify which FITS files, among
    # all those received as input, must be combined: only those images taken
    # in the options.filter photometric filter.
    if options.filter:

        msg = "%s%d different photometric filters were detected:"
        print msg % (style.prefix, len(files.keys()))

        for pfilter, images in sorted(files.iteritems()):
            msg = "%s %s: %d files (%.2f %%)"
            percentage = len(images) / len(files) * 100
            print msg % (style.prefix, pfilter, len(images), percentage)

        msg = "%sIgnoring images not taken in the '%s' photometric filter..."
        print msg % (style.prefix, options.filter),
        sys.stdout.flush()

        discarded = 0
        for pfilter, images in files.items():
            if pfilter != options.filter:
                discarded += len(images)
                del files[pfilter]

        if not files:
            print
            msg = "%sError. No image was taken in the '%s' filter."
            print msg % (style.prefix, options.filter)
            print style.error_exit_message
            return 1

        else:
            print 'done.'
            msg = "%s%d images taken in the '%s' filter, %d were discarded."
            print msg % (style.prefix, len(files), options.filter, discarded)

    # montage.mosaic() silently ignores those FITS images that have no WCS
    # information in their headers, and also raises a rather cryptic exception
    # (mMakeHdr: Invalid table file) if none of them has been astrometrically
    # solved. Instead of ignoring some images without warning or showing a
    # confusing error message that makes it almost impossible to understand
    # what may be failing, use FITSImage.center_wcs() to make sure that all the
    # images have WCS information, raising NoWCSInformationError otherwise.

    for img in files:
        # May raise NoWCSInformationError
        img.center_wcs()

    # montage.mosaic() requires as first argument the directory containing the
    # input FITS images but, in order to maintain the same syntax across all
    # LEMON commands, we receive them as command-line arguments. Thus, create a
    # temporary directory and symlink from it the input images. Hard links are
    # not an option because os.link() will raise "OSError: [Errno 18] Invalid
    # cross-device link" if the temporary directory is created in a different
    # partition.

    pid = os.getpid()
    suffix = "_LEMON_%d_mosaic" % pid
    kwargs = dict(suffix=suffix + '_input')
    input_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(util.clean_tmp_files, input_dir)

    for img in files:
        path = img.path
        source = os.path.abspath(path)
        basename = os.path.basename(path)
        link_name = os.path.join(input_dir, basename)
        os.symlink(source, link_name)

    # The output of montage.mosaic() is another directory, to which several
    # files are written, so we need the path to a second temporary directory.
    # Delete it before calling mosaic(), as otherwise it will raise IOError
    # ("Output directory already exists").

    kwargs = dict(suffix=suffix + '_output')
    output_dir = tempfile.mkdtemp(**kwargs)
    atexit.register(util.clean_tmp_files, output_dir)
    os.rmdir(output_dir)

    kwargs = dict(
        background_match=options.background_match,
        combine=options.combine,
        bitpix=-64,
    )

    if options.ncores > 1:
        kwargs['mpi'] = True  # use MPI whenever possible
        kwargs['n_proc'] = options.ncores  # number of MPI processes
    montage.mosaic(input_dir, output_dir, **kwargs)

    # montage.mosaic() writes several files to the output directory, but we are
    # only interested in one of them: 'mosaic.fits', the mosaic FITS image.

    MOSAIC_OUTPUT = 'mosaic.fits'
    src = os.path.join(output_dir, MOSAIC_OUTPUT)

    if options.reproject:
        print "%sReproject mosaic to point North..." % style.prefix,
        sys.stdout.flush()
        kwargs = dict(north_aligned=True, silent_cleanup=True)
        montage.reproject(src, output_path, **kwargs)
        print 'done.'
    else:
        # No reprojection, move mosaic to the output path
        shutil.move(src, output_path)

    print "%sYou're done ^_^" % style.prefix
    return 0
예제 #9
0
def main(arguments=None):
    """main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args=arguments)

    # Adjust the logger level to WARNING, INFO or DEBUG, depending on the
    # given number of -v options (none, one or two or more, respectively)
    logging_level = logging.WARNING
    if options.verbose == 1:
        logging_level = logging.INFO
    elif options.verbose >= 2:
        logging_level = logging.DEBUG
    logging.basicConfig(format=style.LOG_FORMAT, level=logging_level)

    # Print the help and abort the execution if there are not two positional
    # arguments left after parsing the options, as the user must specify at
    # least one (only one?) input FITS file and the output JSON file.
    if len(args) < 2:
        parser.print_help()
        return 2  # 2 is generally used for command line syntax errors
    else:
        sources_img_path = args[0]
        input_paths = list(set(args[1:-1]))
        output_json_path = args[-1]

    # The execution of this module, especially when doing long-term monitoring
    # of reasonably crowded fields, may easily take several *days*. The least
    # we can do, in order to spare the end-user from insufferable grief because
    # of the waste of billions of valuable CPU cycles, is to avoid to have the
    # output file accidentally overwritten.

    if os.path.exists(output_json_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_json_path)
            print style.error_exit_message
            return 1

    msg = "%sExamining the headers of the %s FITS files given as input..."
    print msg % (style.prefix, len(input_paths))

    files = fitsimage.InputFITSFiles()
    for index, img_path in enumerate(input_paths):
        img = fitsimage.FITSImage(img_path)
        pfilter = img.pfilter(options.filterk)
        files[pfilter].append(img)

        percentage = (index + 1) / len(input_paths) * 100
        util.show_progress(percentage)

    print  # progress bar doesn't include newline
    print style.prefix

    # To begin with, we need to identify the most constant stars, something for
    # which we have to do photometry on all the stars and for all the images of
    # the campaign. But fret not, as this has to be done only this time: once
    # we get the light curves of all the stars and for all the images, we will
    # be able to determine which are the most constant among them and work
    # always with this subset in order to determine which aperture and sky
    # annulus are the optimal.

    msg = "%sDoing initial photometry with FWHM-derived apertures..."
    print msg % style.prefix
    print style.prefix

    # mkstemp() returns a tuple containing an OS-level handle to an open file
    # and its absolute pathname. Thus, we need to close the file right after
    # creating it, and tell the photometry module to overwrite (-w) it.

    kwargs = dict(prefix="photometry_", suffix=".LEMONdB")
    phot_db_handle, phot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(util.clean_tmp_files, phot_db_path)
    os.close(phot_db_handle)

    basic_args = [sources_img_path] + input_paths + [phot_db_path, "--overwrite"]

    phot_args = [
        "--maximum",
        options.maximum,
        "--margin",
        options.margin,
        "--cores",
        options.ncores,
        "--min-sky",
        options.min,
        "--objectk",
        options.objectk,
        "--filterk",
        options.filterk,
        "--datek",
        options.datek,
        "--timek",
        options.timek,
        "--expk",
        options.exptimek,
        "--coaddk",
        options.coaddk,
        "--gaink",
        options.gaink,
        "--fwhmk",
        options.fwhmk,
        "--airmk",
        options.airmassk,
    ]

    # The --gain and --uik options default to None, so add them to the list of
    # arguments only if they were given. Otherwise, (a) --gaink would be given
    # a value of 'None', a string, that would result in an error when optparse
    # attempted to convert it to float, and (b) --uik would understood 'None'
    # as the name of the keyword storing the path to the uncalibrated image.

    if options.gain:
        phot_args += ["--gain", options.gain]

    if options.uncimgk:
        phot_args += ["--uncimgk", options.uncimgk]

    # Pass as many '-v' options as we have received here
    [phot_args.append("-v") for x in xrange(options.verbose)]

    extra_args = [
        "--aperture",
        options.aperture,
        "--annulus",
        options.annulus,
        "--dannulus",
        options.dannulus,
    ]

    # Non-zero return codes raise subprocess.CalledProcessError
    args = basic_args + phot_args + extra_args
    check_run(photometry.main, [str(a) for a in args])

    # Now we need to compute the light curves and find those that are most
    # constant. This, of course, has to be done for each filter, as a star
    # identified as constant in Johnson I may be too faint in Johnson B, for
    # example. In other words: we need to calculate the light curve of each
    # star and for each filter, and then determine which are the
    # options.nconstant stars with the lowest standard deviation.

    print style.prefix
    msg = "%sGenerating light curves for initial photometry."
    print msg % style.prefix
    print style.prefix

    kwargs = dict(prefix="diffphot_", suffix=".LEMONdB")
    diffphot_db_handle, diffphot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(util.clean_tmp_files, diffphot_db_path)
    os.close(diffphot_db_handle)

    diff_args = [
        phot_db_path,
        "--output",
        diffphot_db_path,
        "--overwrite",
        "--cores",
        options.ncores,
        "--minimum-images",
        options.min_images,
        "--stars",
        options.nconstant,
        "--minimum-stars",
        options.min_cstars,
        "--pct",
        options.pct,
        "--weights-threshold",
        options.wminimum,
        "--max-iters",
        options.max_iters,
        "--worst-fraction",
        options.worst_fraction,
    ]

    [diff_args.append("-v") for x in xrange(options.verbose)]

    check_run(diffphot.main, [str(a) for a in diff_args])
    print style.prefix

    # Map each photometric filter to the path of the temporary file where the
    # right ascension and declination of each constant star, one per line, will
    # be saved. This file is from now on passed, along with the --coordinates
    # option, to photometry.main(), so that photometry is not done on all the
    # astronomical objects, but instead exclusively on these ones.

    coordinates_files = {}

    miner = mining.LEMONdBMiner(diffphot_db_path)
    for pfilter in miner.pfilters:

        # LEMONdBMiner.sort_by_curve() returns a list of two-element tuples,
        # mapping the ID of each star to the standard deviation of its light
        # curve in this photometric filter. The list is sorted in increasing
        # order by the standard deviation. We are only interested in the first
        # 'options.nconstant', needing at least 'options.pminimum'.

        msg = "%sIdentifying the %d most constant stars for the %s filter..."
        args = style.prefix, options.nconstant, pfilter
        print msg % args,
        sys.stdout.flush()

        kwargs = dict(minimum=options.min_images)
        stars_stdevs = miner.sort_by_curve_stdev(pfilter, **kwargs)
        cstars = stars_stdevs[: options.nconstant]

        if len(cstars) < options.pminimum:
            msg = (
                "fewer than %d stars identified as constant in the "
                "initial photometry for the %s filter"
            )
            args = options.pminimum, pfilter
            raise NotEnoughConstantStars(msg % args)
        else:
            print "done."

        if len(cstars) < options.nconstant:
            msg = "%sBut only %d stars were available. Using them all, anyway."
            print msg % (style.prefix, len(cstars))

        # Replacing whitespaces with underscores is easier than having to quote
        # the path to the --coordinates file if the name of the filter contains
        # them (otherwise, optparse would only see up to the first whitespace).
        prefix = "%s_" % str(pfilter).replace(" ", "_")
        kwargs = dict(prefix=prefix, suffix=".coordinates")
        coords_fd, coordinates_files[pfilter] = tempfile.mkstemp(**kwargs)
        atexit.register(util.clean_tmp_files, coordinates_files[pfilter])

        # LEMONdBMiner.get_star() returns a five-element tuple with the x and y
        # coordinates, right ascension, declination and instrumental magnitude
        # of the astronomical object in the sources image.
        for star_id, _ in cstars:
            ra, dec = miner.get_star(star_id)[2:4]
            os.write(coords_fd, "%.10f\t%.10f\n" % (ra, dec))
        os.close(coords_fd)

        msg = "%sStar coordinates for %s temporarily saved to %s"
        print msg % (style.prefix, pfilter, coordinates_files[pfilter])

    # The constant astronomical objects, the only ones to which we will pay
    # attention from now on, have been identified. So far, so good. Now we
    # generate the light curves of these objects for each candidate set of
    # photometric parameters. We store the evaluated values in a dictionary in
    # which each filter maps to a list of json_parse.CandidateAnnuli objects.

    evaluated_annuli = collections.defaultdict(list)

    for pfilter, coords_path in coordinates_files.iteritems():

        print style.prefix
        msg = "%sFinding the optimal photometric parameters for the %s filter."
        print msg % (style.prefix, pfilter)

        if len(files[pfilter]) < options.min_images:
            msg = "fewer than %d images (--minimum-images option) for %s"
            args = options.min_images, pfilter
            raise NotEnoughConstantStars(msg % args)

        # The median FWHM of the images is needed in order to calculate the
        # range of apertures that we need to evaluate for this filter.

        msg = "%sCalculating the median FWHM for this filter..."
        print msg % style.prefix,

        pfilter_fwhms = []
        for img in files[pfilter]:
            img_fwhm = photometry.get_fwhm(img, options)
            logging.debug("%s: FWHM = %.3f" % (img.path, img_fwhm))
            pfilter_fwhms.append(img_fwhm)

        fwhm = numpy.median(pfilter_fwhms)
        print " done."

        # FWHM to range of pixels conversion
        min_aperture = fwhm * options.lower
        max_aperture = fwhm * options.upper
        annulus = fwhm * options.sky
        dannulus = fwhm * options.width

        # The dimensions of the sky annulus remain fixed, while the
        # aperture is in the range [lower * FWHM, upper FWHM], with
        # increments of options.step pixels.
        filter_apertures = numpy.arange(min_aperture, max_aperture, options.step)
        assert filter_apertures[0] == min_aperture

        msg = "%sFWHM (%s passband) = %.3f pixels, therefore:"
        print msg % (style.prefix, pfilter, fwhm)
        msg = "%sAperture radius, minimum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.lower, min_aperture)
        msg = "%sAperture radius, maximum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.upper, max_aperture)
        msg = "%sAperture radius, step = %.2f pixels, which means that:"
        print msg % (style.prefix, options.step)

        msg = "%sAperture radius, actual maximum = %.3f + %d x %.2f = %.3f pixels"
        args = (
            style.prefix,
            min_aperture,
            len(filter_apertures),
            options.step,
            max(filter_apertures),
        )
        print msg % args

        msg = "%sSky annulus, inner radius = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.sky, annulus)
        msg = "%sSky annulus, width = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.width, dannulus)

        msg = "%s%d different apertures in the range [%.2f, %.2f] to be evaluated:"
        args = (
            style.prefix,
            len(filter_apertures),
            filter_apertures[0],
            filter_apertures[-1],
        )
        print msg % args

        # For each candidate aperture, and only with the images taken in
        # this filter, do photometry on the constant stars and compute the
        # median of the standard deviation of their light curves as a means
        # of evaluating the suitability of this combination of parameters.
        for index, aperture in enumerate(filter_apertures):

            print style.prefix

            kwargs = dict(prefix="photometry_", suffix=".LEMONdB")
            fd, aper_phot_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(util.clean_tmp_files, aper_phot_db_path)
            os.close(fd)

            paths = [img.path for img in files[pfilter]]
            basic_args = [sources_img_path] + paths + [aper_phot_db_path, "--overwrite"]

            extra_args = [
                "--filter",
                str(pfilter),
                "--coordinates",
                coords_path,
                "--aperture-pix",
                aperture,
                "--annulus-pix",
                annulus,
                "--dannulus-pix",
                dannulus,
            ]

            args = basic_args + phot_args + extra_args
            check_run(photometry.main, [str(a) for a in args])

            kwargs = dict(prefix="diffphot_", suffix=".LEMONdB")
            fd, aper_diff_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(util.clean_tmp_files, aper_diff_db_path)
            os.close(fd)

            # Reuse the arguments used earlier for diffphot.main(). We only
            # need to change the first argument (path to the input LEMONdB)
            # and the third one (path to the output LEMONdB)
            diff_args[0] = aper_phot_db_path
            diff_args[2] = aper_diff_db_path
            check_run(diffphot.main, [str(a) for a in diff_args])

            miner = mining.LEMONdBMiner(aper_diff_db_path)

            try:
                kwargs = dict(minimum=options.min_images)
                cstars = miner.sort_by_curve_stdev(pfilter, **kwargs)
            except mining.NoStarsSelectedError:
                # There are no light curves with at least options.min_images points.
                # Therefore, much to our sorrow, we cannot evaluate this aperture.
                msg = "%sNo constant stars for this aperture. Ignoring it..."
                print msg % style.prefix
                continue

            # There must be at most 'nconstant' stars, but there may be fewer
            # if this aperture causes one or more of the constant stars to be
            # too faint (INDEF) in so many images as to prevent their lights
            # curve from being computed.
            assert len(cstars) <= options.nconstant

            if len(cstars) < options.pminimum:
                msg = (
                    "%sJust %d constant stars, fewer than the allowed "
                    "minimum of %d, had their light curves calculated "
                    "for this aperture. Ignoring it..."
                )
                args = style.prefix, len(cstars), options.pminimum
                print style.prefix
                continue

            # 'cstars' contains two-element tuples: (ID, stdev)
            stdevs_median = numpy.median([x[1] for x in cstars])
            params = (aperture, annulus, dannulus, stdevs_median)
            # NumPy floating-point data types are not JSON serializable
            args = (float(x) for x in params)
            candidate = json_parse.CandidateAnnuli(*args)
            evaluated_annuli[pfilter].append(candidate)

            msg = "%sAperture = %.3f, median stdev (%d stars) = %.4f"
            args = style.prefix, aperture, len(cstars), stdevs_median
            print msg % args

            percentage = (index + 1) / len(filter_apertures) * 100
            msg = "%s%s progress: %.2f %%"
            args = style.prefix, pfilter, percentage
            print msg % args

        # Let the user know of the best 'annuli', that is, the one for
        # which the standard deviation of the constant stars is minimal
        kwargs = dict(key=operator.attrgetter("stdev"))
        best_candidate = min(evaluated_annuli[pfilter], **kwargs)

        msg = "%sBest aperture found at %.3f pixels with stdev = %.4f"
        args = style.prefix, best_candidate.aperture, best_candidate.stdev
        print msg % args

    print style.prefix
    msg = "%sSaving the evaluated apertures to the '%s' JSON file ..."
    print msg % (style.prefix, output_json_path),
    json_parse.CandidateAnnuli.dump(evaluated_annuli, output_json_path)
    print " done."

    print "%sYou're done ^_^" % style.prefix
    return 0
예제 #10
0
        a,d=self.all_pix2world(x,y,0)
        return obj.all_world2pix(a,d,0)
        #xx,yy=obj.all_world2pix(a,d,0)
        #return xx+0.5,yy+0.5
        
        
    
        
if __name__=='__main__':
    with fits.open('/Users/rryan/MACS0647/data1/pre_Z11_-25_1.302_G141/icc905meq_flt.fits') as hdul:
        hdr=hdul[1].header
    wcs=WCS(hdr)


    import fitsimage
    x=fitsimage.FITSImage('/Users/rryan/MACS0647/data/CLASH/65mas/z11_seg.fits')

    y=x.extract(50,70,50,70)
    print(y)

    a,d=y.xy2ad(10,10)
    print(a,d)
    xx,yy=wcs.ad2xy(a,d)
    print(xx,yy)
    
    #a,d=wcs.xy2ad(np.array([11]),np.array([21]))
    
    #print(a,d)
    #x,y=wcs.ad2xy(a,d)

    #print(x,y)
예제 #11
0
def main(arguments=None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args=arguments)

    # Adjust the logger level to WARNING, INFO or DEBUG, depending on the
    # given number of -v options (none, one or two or more, respectively)
    logging_level = logging.WARNING
    if options.verbose == 1:
        logging_level = logging.INFO
    elif options.verbose >= 2:
        logging_level = logging.DEBUG
    logging.basicConfig(format=style.LOG_FORMAT, level=logging_level)

    # Print the help and abort the execution if there are not two positional
    # arguments left after parsing the options, as the user must specify at
    # least one (only one?) input FITS file and the output directory
    if len(args) < 2:
        parser.print_help()
        return 2  # 2 is generally used for command line syntax errors
    else:
        input_paths = args[:-1]
        output_dir = args[-1]

    # Make sure that the output directory exists, and create it if it doesn't.
    # The subdirectories for discarded images are not yet created; we put this
    # off until we know that at least one image is indeed going to be excluded.
    util.determine_output_dir(output_dir)
    fwhm_dir = os.path.join(output_dir, options.fwhm_dir)
    elong_dir = os.path.join(output_dir, options.elong_dir)

    print "%s%d paths given as input, on which sources will be detected." % \
          (style.prefix, len(input_paths))
    print "%sRunning SExtractor on all the FITS images..." % style.prefix

    # Use a pool of workers and run SExtractor on the images in parallel!
    pool = multiprocessing.Pool(options.ncores)
    map_async_args = ((path, options) for path in input_paths
                      if os.path.isfile(path))
    result = pool.map_async(parallel_sextractor, map_async_args)

    util.show_progress(0.0)
    while not result.ready():
        time.sleep(1)
        util.show_progress(queue.qsize() / len(input_paths) * 100)
        # Do not update the progress bar when debugging; instead, print it
        # on a new line each time. This prevents the next logging message,
        # if any, from being printed on the same line that the bar.
        if logging_level < logging.WARNING:
            print

    result.get()  # reraise exceptions of the remote call, if any
    util.show_progress(100)  # in case the queue was ready too soon
    print

    # Three sets, to keep the track of all the images on which SExtractor
    # has been run and also of which have been discarded because of their
    # unnaceptable FWHM or elongation ratio.
    all_images = set()
    fwhm_discarded = set()
    elong_discarded = set()

    # Dictionary mapping each input image to the temporary output file: a copy
    # of the input image but whose FITS header has been updated with the path
    # to the SExtractor catalog and the MD5 hash of the configuration files.
    seeing_tmp_paths = dict()

    # Extract the four-element tuples (path to the image, FWHM, elongation and
    # number of sources detected by SExtractor) from the multiprocessing' queue
    # and store the values in three independent dictionaries; these provide
    # fast access, with O(1) lookup, to the data.
    fwhms = {}
    elongs = {}
    nstars = {}

    for _ in xrange(queue.qsize()):
        path, output_tmp_path, fwhm, elong, stars = queue.get()
        all_images.add(path)
        seeing_tmp_paths[path] = output_tmp_path

        # The clean-up function cannot be registered in parallel_sextractor()
        # because it would remove the temporary FITS file when the process
        # terminates (instead of when our program exits, which is what we
        # need). Do it here, to make sure that whatever happens next these
        # temporary files are always deleted.
        atexit.register(util.clean_tmp_files, output_tmp_path)

        fwhms[path] = fwhm
        elongs[path] = elong
        nstars[path] = stars

    if not all_images:
        print "%sError. No FITS images were detected." % style.prefix
        print style.error_exit_message
        return 1

    # Let's first discard those images with a bad full width at half maximum.
    # In order to to this, we fit a normal distribution (assuming the FWHMs to
    # be Gaussian distributed) and define the maximum allowed value as that
    # which exceeds the specified number of standard deviations of the mean.

    print "%sFitting a Gaussian distribution to the FWHMs..." % style.prefix,
    sys.stdout.flush()
    logging.debug("Fitting a Gaussian distribution to the %d FWHMs" %
                  len(fwhms))
    mu, sigma = scipy.stats.norm.fit(fwhms.values())
    logging.debug("FWHMs mean = %.3f" % mu)
    logging.debug("FWHMs sigma = %.3f" % sigma)
    print 'done.'
    sys.stdout.flush()

    print "%sFWHMs mean = %.3f, sigma = %.3f pixels" % (style.prefix, mu,
                                                        sigma)
    maximum_fwhm = mu + (options.fwhm_sigma * sigma)
    logging.debug("Maximum allowed FWHM = %.3f + %.1f x %.3f = %.3f pixels" % \
                 (mu, options.fwhm_sigma, sigma, maximum_fwhm))
    print "%sDiscarding images with a FWHM > %.3f + %.1f x %.3f = %.3f pixels..." % \
          (style.prefix, mu, options.fwhm_sigma, sigma, maximum_fwhm)

    # Exclude images by adding them to the FWHM-discarded set
    for path, fwhm in sorted(fwhms.iteritems()):
        if fwhm > maximum_fwhm:
            fwhm_discarded.add(path)
            logging.debug("%s discarded (FWHM = %.3f > %.3f" % \
                         (path, fwhm, maximum_fwhm))
            print "%s%s discarded (FWHM = %.3f)" % (style.prefix, path, fwhm)

    logging.info("Images discarded by FWHM: %d" % len(fwhm_discarded))
    if not fwhm_discarded:
        print "%sNo images were discarded because of their FWHM. Hooray!" % style.prefix
    else:
        discarded_fraction = len(fwhm_discarded) / len(all_images) * 100
        nleft = len(all_images) - len(fwhm_discarded)  # non-discarded images
        print "%s%d FITS images (%.2f %%) discarded, %d remain" % \
              (style.prefix,  len(fwhm_discarded), discarded_fraction, nleft)

    # Repeat the same approach, now with the elongation ratios. Images already
    # discarded because of their FWHM are not even considered -- why discard
    # them twice? They can simply be ignored.

    print "%sFitting a Gaussian distribution to the elongations..." % style.prefix,
    sys.stdout.flush()
    mu, sigma = scipy.stats.norm.fit(elongs.values())
    logging.debug("Elongations mean = %.3f" % mu)
    logging.debug("Elongations sigma = %.3f" % sigma)
    print 'done.'
    sys.stdout.flush()

    print "%sElongation mean = %.3f, sigma = %.3f pixels" % (style.prefix, mu,
                                                             sigma)
    maximum_elong = mu + (options.elong_sigma * sigma)
    logging.debug("Maximum allowed elongation = %.3f + %.1f x %.3f = %.3f pixels" % \
                 (mu, options.elong_sigma, sigma, maximum_elong))
    print "%sDiscarding images with an elongation > %.3f + %.1f x %.3f = %.3f ..." % \
          (style.prefix, mu, options.elong_sigma, sigma, maximum_elong)

    for path, elong in sorted(elongs.iteritems()):
        # Ignore FWHM-discarded images
        if path in fwhm_discarded:
            logging.debug("%s ignored (already discarded by FWHM)" % path)
            continue
        elif elong > maximum_elong:
            elong_discarded.add(path)
            logging.debug("%s discarded (elongation = %.3f > %.3f" % \
                         (path, fwhm, maximum_elong))
            print "%s%s discarded (elongation = %.3f)" % (style.prefix, path,
                                                          elong)

    logging.info("Images discarded by elongation: %d" % len(elong_discarded))
    if not elong_discarded:
        print "%sNo images were discarded because of their elongation. Yay!" % style.prefix
    else:
        initial_size = len(all_images) - len(fwhm_discarded)
        discarded_fraction = len(elong_discarded) / initial_size * 100
        nleft = initial_size - len(elong_discarded)
        print "%s%d FITS images (%.2f %%) discarded, %d remain" % \
              (style.prefix,  len(elong_discarded), discarded_fraction, nleft)

    # Finally, take the images whose number of stars is at the 'stars_per'
    # percentile and select the one with the best FWHM. This will be our
    # 'best-seeing' image, in which sources may be detected. Taking directly
    # the image with the best FWHM may not work as we need the best-seeomg
    # image to also be one of the most populated.

    print "%sIdentifying the images whose number of detected sources it at " \
          "the %.2f percentile..." % (style.prefix, options.stars_per) ,
    sys.stdout.flush()
    # Ignore discarded images, for whatever reason
    logging.debug("Finding the %.2f percentile of the number of stars " \
                 "detected by SExtractor"  % options.stars_per)
    for path in fwhm_discarded.union(elong_discarded):
        del nstars[path]
        reason = 'FWHM' if path in fwhm_discarded else 'elongation'
        logging.debug("%s ignored (was discarded by %s)" % (path, reason))
    min_nstars = scipy.stats.scoreatpercentile(nstars.values(),
                                               options.stars_per)
    print 'done.'

    print "%sNumber of stars at percentile = %d, taking the images with at " \
          "least this number of sources..." % (style.prefix, min_nstars) ,
    sys.stdout.flush()
    most_populated_images = [
        path for path, stars in nstars.iteritems() if stars >= min_nstars
    ]

    logging.debug("There are %s images with a number of stars at the %.2f " \
                 "percentile" % (len(most_populated_images), options.stars_per))
    logging.debug("Identifying the image with the lowest FWHM")
    print 'done.'

    print "%sFinally, finding the image with the lowest FWHM among these " \
          "%d images..." % (style.prefix, len(most_populated_images)),
    sys.stdout.flush()

    # Find the image with the best seeing (lowest FWHM)
    best_seeing = min(most_populated_images, key=lambda path: fwhms[path])
    logging.debug("Best-seeing image: %s" % path)
    logging.debug("Best-seeing image FWHM = %.3f" % fwhms[best_seeing])
    logging.debug("Best-seeing image elongation = %.3f" % elongs[best_seeing])
    logging.debug("Best-seeing image sources = %d" % nstars[best_seeing])
    assert best_seeing not in fwhm_discarded
    assert best_seeing not in elong_discarded
    print 'done.'

    print "%sBest-seeing image = %s, with %d sources and a FWHM of %.3f pixels" % \
          (style.prefix, best_seeing, nstars[best_seeing], fwhms[best_seeing])

    # The subdirectories are created only if at least one image is going to be
    # discarded. We do not want empty directories in case no image is discarded
    # because of its full-width at half maximum (FWHM) or elongation.

    if fwhm_discarded:
        util.determine_output_dir(fwhm_dir, quiet=True)

    if elong_discarded:
        util.determine_output_dir(elong_dir, quiet=True)

    # Finally, copy all the FITS images to the output directory
    processed = 0
    for path in sorted(all_images):
        # Add the suffix to the basename of the FITS image
        root, ext = os.path.splitext(os.path.basename(path))
        output_filename = root + options.suffix + ext
        logging.debug("Basename '%s' + '%s' becomes '%s'" % \
                     (path, options.suffix, output_filename))

        if path in fwhm_discarded:
            output_path = os.path.join(fwhm_dir, output_filename)
            logging.debug("%s was discarded because of its FWHM" % path)
            logging.debug("%s to be copied to subdirectory %s" %
                          (path, fwhm_dir))
            history_msg1 = "Image discarded by LEMON on %s" % util.utctime()
            history_msg2 = "[Discarded] FWHM = %.3f pixels, maximum allowed value = %.3f" % \
                           (fwhms[path], maximum_fwhm)

        elif path in elong_discarded:
            output_path = os.path.join(elong_dir, output_filename)
            logging.debug("%s was discarded because of its elongation ratio" %
                          path)
            logging.debug("%s to be copied to subdirectory %s" %
                          (path, elong_dir))
            history_msg1 = "Image discarded by LEMON on %s" % util.utctime()
            history_msg2 = "[Discarded] Elongation = %.3f, maximum allowed value = %.3f" % \
                           (elongs[path], maximum_elong)

        elif path == best_seeing:

            # Retain original name if --filename is an empty string
            if not options.bseeingfn:
                filename = output_filename
            else:
                filename = options.bseeingfn

            output_path = os.path.join(output_dir, filename)
            logging.debug("%s is the best-seeing image" % path)
            logging.debug("%s to be copied to directory %s with name %s" % \
                         (path, output_dir, options.bseeingfn))
            history_msg1 = "Image identified by LEMON as the 'best-seeing' one"
            history_msg2 = "FWHM = %.3f | Elongation = %.3f | Sources: %d (at %.2f percentile)" % \
                           (fwhms[path], elongs[path], nstars[path], options.stars_per)

        else:
            output_path = os.path.join(output_dir, output_filename)
            logging.debug("%s to be copied to %s" % (path, output_dir))
            history_msg1 = "Image FWHM = %.3f" % fwhms[path]
            history_msg2 = "Image elongation = %.3f" % elongs[path]

        if os.path.exists(output_path) and not options.overwrite:
            msg = ("%sError. Output FITS file '%s' already exists. "
                   "You need to use --overwrite.")
            args = style.prefix, output_path
            print msg % args
            print style.error_exit_message
            return 1

        else:
            src = seeing_tmp_paths[path]
            shutil.move(src, output_path)

        util.owner_writable(output_path, True)  # chmod u+w
        logging.debug("%s copied to %s" % (path, output_path))
        output_img = fitsimage.FITSImage(output_path)
        output_img.add_history(history_msg1)
        output_img.add_history(history_msg2)
        logging.debug("%s: FITS header updated (HISTORY keywords)" % path)

        # Copy the FWHM to the FITS header, for future reference
        comment = "Margin = %d, SNR percentile = %.3f" % (options.margin,
                                                          options.per)
        output_img.update_keyword(options.fwhmk, fwhms[path], comment=comment)
        logging.debug("%s: FITS header updated (%s keyword)" %
                      (path, options.fwhmk))

        print "%sFITS image %s saved to %s" % (style.prefix, path, output_path)
        processed += 1

    print "%sA total of %d images was saved to directory '%s'." % (
        style.prefix, processed, output_dir)
    print "%sWe're done ^_^" % style.prefix
    return 0