Exemplo n.º 1
0
def workerprocess___qr_mastercals(queue, options):
    #print "QR MasterCals worker process started, ready for action..."

    logger = logging.getLogger("QRMasterCals")
    logger.info("MasterCal Listener started")

    while (True):
        try:
            # print "\n\nWaiting for stuff to do\n\n"
            task = queue.get()
        except (KeyboardInterrupt, SystemExit) as e:
            # print "worker received termination notice"
            # Ignore the shut-down command here, and wait for the official
            # shutdown command from main task
            continue

        if (task is None):
            logger.info("Shutting down worker")
            queue.task_done()
            break

        params = task
        # print params

        try:
            handle_mastercals_request(params, logger, options)
        except:
            podi_logging.log_exception()
            pass

        # Mark this task as done, this means we are ready for the next one.
        queue.task_done()
        continue

    return
Exemplo n.º 2
0
def workerprocess___qr_mastercals(queue, options):
    #print "QR MasterCals worker process started, ready for action..."

    logger = logging.getLogger("QRMasterCals")
    logger.info("MasterCal Listener started")

    while (True):
        try:
            # print "\n\nWaiting for stuff to do\n\n"
            task = queue.get()
        except (KeyboardInterrupt, SystemExit) as e:
            # print "worker received termination notice"
            # Ignore the shut-down command here, and wait for the official 
            # shutdown command from main task
            continue

        if (task is None):
            logger.info("Shutting down worker")
            queue.task_done()
            break

        
        params = task
        # print params

        try:
            handle_mastercals_request(params, logger, options)
        except:
            podi_logging.log_exception()
            pass

        # Mark this task as done, this means we are ready for the next one.
        queue.task_done()
        continue

    return
Exemplo n.º 3
0
def create_saturation_catalog_ota(filename, output_dir, verbose=True, 
                                  return_numpy_catalog=False, saturation_limit=65535):
    """
    Create a saturation table for a given OTA exposure.

    Parameters
    ----------
    filename : string
    
        Filename of the OTA FITS file.

    output_dir : string

        If return_numpy_catalog is not set, write the saturation catalog into
        this directory.

    return_numpy_catalog : bool

        If set, return the results as numpy array instead of writing individual
        files to disk.
    

    Returns
    -------
    None - if no saturated pixels are found in this frame
    
    ndarray, extname - if return_numpy_catalog is set

    Nothing - if return_numpy_array is not set

    """

    logger = logging.getLogger("OTASatCat")

    # Open filename
    logger.debug("Input filename: %s" % (filename))

    try:
        hdulist = pyfits.open(filename)
    except IOError:
        logger.debug("Can't open file %s" % (filename))
        return None
    except:
        podi_logging.log_exception()
        return None

    mjd = hdulist[0].header['MJD-OBS']
    obsid = hdulist[0].header['OBSID']
    ota = int(hdulist[0].header['FPPOS'][2:4])
    datatype = hdulist[0].header['FILENAME'][0]

    logger = logging.getLogger("CreateSatCat: %s, OTA %02d" % (obsid, ota))
    logger.debug("Starting work")

    full_coords = numpy.zeros(shape=(0,4)) #, dtype=numpy.int16)
    saturated_pixels_total = 0

    for ext in range(1, len(hdulist)):
        if (not is_image_extension(hdulist[ext])):
            continue

        # Find all saturated pixels (values >= 65K)
        data = hdulist[ext].data
        saturated = (data >= saturation_limit)
        # print hdulist[ext].header['EXTNAME'], data.shape, numpy.max(data)

        # Skip this cell if no pixels are saturated
        number_saturated_pixels = numpy.sum(saturated)
        if (number_saturated_pixels <= 0):
            continue

        saturated_pixels_total += number_saturated_pixels
        
        wn_cellx = hdulist[ext].header['WN_CELLX']
        wn_celly = hdulist[ext].header['WN_CELLY']

        # logger.debug("number of saturated pixels in cell %d,%d: %d" % (wn_cellx, wn_celly, number_saturated_pixels))

        # Do some book-keeping preparing for the masking
        rows, cols = numpy.indices(data.shape)

        saturated_rows = rows[saturated]
        saturated_cols = cols[saturated]

        #print saturated_rows.shape, saturated_cols.shape

        coordinates = numpy.zeros(shape=(number_saturated_pixels,4))
        coordinates[:,0] = wn_cellx
        coordinates[:,1] = wn_celly
        coordinates[:,2] = saturated_cols[:]
        coordinates[:,3] = saturated_rows[:]

        full_coords = numpy.append(full_coords, coordinates, axis=0) #coordinates if full_coords == None else 

    final_cat = numpy.array(full_coords, dtype=numpy.dtype('int16'))

    if (saturated_pixels_total <= 0):
        logger.debug("No saturated pixels found, well done!")
        return None

    logger.debug("Found %d saturated pixels, preparing catalog" % (saturated_pixels_total))
    # Now define the columns for the table
    columns = [\
        pyfits.Column(name='CELL_X', format='I', array=final_cat[:, 0]),
        pyfits.Column(name='CELL_Y', format='I', array=final_cat[:, 1]),
        pyfits.Column(name='X',      format='I', array=final_cat[:, 2]),
        pyfits.Column(name='Y',      format='I', array=final_cat[:, 3])
        ]
    # Create the table extension
    coldefs = pyfits.ColDefs(columns)
    tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
    extension_name = "OTA%02d.SATPIX" % (ota)
    tbhdu.name = extension_name

    if (return_numpy_catalog):
        logger.debug("Returning results as numpy catalog")
        return final_cat, extension_name

    # Also copy the primary header into the new catalog
    primhdu = pyfits.PrimaryHDU(header=hdulist[0].header)

    # Create a HDUList for output
    out_hdulist = pyfits.HDUList([primhdu, tbhdu])
    
    # And create the output file
    output_filename = "%s/%s%s.%02d.saturated.fits" % (output_dir, datatype, obsid, ota)
    stdout_write("Writing output: %s\n" % (output_filename))

    clobberfile(output_filename)
    out_hdulist.writeto(output_filename, clobber=True)

    if (verbose):
        print("some of the saturated pixels:\n",final_cat[0:10,:])

    #numpy.savetxt("test", final_cat)
    #print full_coords.shape
        
    logger.debug("Retuning final FITS table catalog")
    return final_cat
Exemplo n.º 4
0
def create_saturation_catalog(filename, output_dir, verbose=True, mp=False, redo=False,
                              saturation_limit=65535):

    """
    Create catalogs listing all saturated pixels to enable handling saturation
    and persistency effects later-on.

    The main purpose of this file is to call create_saturation_catalog_ota,
    possibly wrapped for with mp_create_saturation_table for parallel
    processing.

    Parameters
    ----------
    filename : string
    
        One file of the exposure. This file is mainly used to obtain the
        necessary information to create all the other filenames for this
        exposure.

    output_dir : string

        Directory to hold all the saturation catalogs. This is the directory
        that will be fed into collectcells via the -persistency command line
        flag.

    mp : bool - not used

    redo : bool

        Recreate the saturation catalog if it already exists

    Returns
    -------

    """

    logger = logging.getLogger("CreateSaturationCatalog")
    logger.info("Creating saturation mask for %s ..." % (filename))

    if (os.path.isfile(filename)):
        # This is one of the OTA fits files
        # extract the necessary information to generate the 
        # names of all the other filenames
        try:
            hdulist = pyfits.open(filename)
        except IOError:
            logger.warning("\rProblem opening file %s...\n" % (filename))
            return
        except:
            podi_logging.log_exception()


        hdr_filename = hdulist[0].header['FILENAME']
        hdr_items = hdr_filename.split('.')
        basename = "%s.%s" % (hdr_items[0], hdr_items[1])
        hdulist.close()

        # Split the input filename to extract the directory part
        directory, dummy = os.path.split(filename)

    elif (os.path.isdir(filename)):
        # As a safety precaution, if the first parameter is the directory containing 
        # the files, extract just the ID string to be used for this script
        if (filename[-1] == "/"):
            filename = filename[:-1]

        basedir, basename = os.path.split(filename)
        directory = filename

    else:
        logger.error("Input %s is neither a file nor a directory!" % (filename))
        logger.error("Aborting operation due to illegal input.")
        return

    output_filename = "%s/%s.saturated.fits" % (output_dir, basename)
    logger.debug("Output saturation catalog: %s" % (output_filename))

    if (os.path.isfile(output_filename) and not redo):
        logger.debug("File (%s) exists, skipping!" % (output_filename))
        return

    # Setup parallel processing
    queue        = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.JoinableQueue()
    #return_queue = multiprocessing.Queue()
        
    number_jobs_queued = 0
    first_fits_file = None
    ota_list = []

    for (ota_x, ota_y) in itertools.product(range(8),repeat=2):
        ota = ota_x * 10 + ota_y

        filename = "%s/%s.%02d.fits" % (directory, basename, ota)
        if (not os.path.isfile(filename)):
            filename = "%s/%s.%02d.fits.fz" % (directory, basename, ota)
            if (not os.path.isfile(filename)):
                continue

        queue.put( (filename, saturation_limit) )
        number_jobs_queued += 1

        # Remember the very first FITS file we find. This will serve as the primary HDU
        if (first_fits_file is None):
            # Create a primary HDU from the first found fits-file
            try:
                firsthdu = pyfits.open(filename)
            except IOError:
                logger.warning("Problem opening FITS file %s" % (filename))
                continue
            logger.debug("Copying general information from file %s" % (filename))
            ota_list.append(pyfits.PrimaryHDU(header=firsthdu[0].header))
            firsthdu.close()
            firsthdu = None
            first_fits_file = filename

    if (first_fits_file is None):
        logger.warning("Couldn't find a valid FITS file, thus nothing to do")
        return

    # Now start all the workers
    logger.debug("Starting worker processes")
    processes = []
    for i in range(sitesetup.number_cpus):
        p = multiprocessing.Process(target=mp_create_saturation_catalog, args=(queue, return_queue, False))
        p.start()
        processes.append(p)
        time.sleep(0.01)

    # Tell all workers to shut down when no more data is left to work on
    logger.debug("Sending shutdown command to worker processes")
    for i in range(len(processes)):
        if (verbose): stdout_write("Sending quit command!\n")
        queue.put( (None) )

    logger.debug("Collecting catalogs for each OTA")
    for i in range(number_jobs_queued):
        if (verbose): print("reading return ",i)

        cat_name = return_queue.get()
        if (cat_name is not None):
            final_cat, extension_name = cat_name

            columns = [
                pyfits.Column(name='CELL_X', format='I', array=final_cat[:, 0]),
                pyfits.Column(name='CELL_Y', format='I', array=final_cat[:, 1]),
                pyfits.Column(name='X',      format='I', array=final_cat[:, 2]),
                pyfits.Column(name='Y',      format='I', array=final_cat[:, 3])
                ]
            # Create the table extension
            coldefs = pyfits.ColDefs(columns)
            tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
            tbhdu.name = extension_name
            ota_list.append(tbhdu)
            
        return_queue.task_done()

    # Join each process to make thre they terminate(d) correctly
    logger.debug("Joining process to ensure proper termination")
    for p in processes:
        p.join()

    hdulist = pyfits.HDUList(ota_list)
    output_filename = "%s/%s.saturated.fits" % (output_dir, basename)
    clobberfile(output_filename)
    logger.debug("Writing output file %s" % (output_filename))
    hdulist.writeto(output_filename, clobber=True)

    logger.debug("all done!")
    return
Exemplo n.º 5
0
def read_fits_catalog(fn, extension, flatten=True):
    logger = logging.getLogger("ReadFITScat")

    logger.debug("Opening FITS catalog from %s" % (fn))
    if (type(fn) is str):
        hdulist = pyfits.open(fn)
    else:
        hdulist = fn

    if (type(extension) is list):
        ext_list = extension
    else:
        ext_list = [extension]

    return_tables = []

    for ext_id in ext_list:

        try:
            ext = hdulist[ext_id]
        except KeyError:
            logger.warning("Extension %s not found in %s" % (ext_id, fn))
            hdulist.info()
            return_tables.append(None)
            continue

        n_fields = ext.header['TFIELDS']
        n_rows = ext.header['NAXIS2']
        # table = numpy.empty((n_rows, n_fields))
        logger.debug("Reading data for %d fields" % (n_fields))
        # print "Reading data for %d fields" % (n_fields)
        table = []
        for f in range(n_fields):
            fd = ext.data.field(f)
            if (fd.ndim > 2 and flatten):
                logger.warning("Unable to handle catalog field %s" %
                               (ext.header['TTYPE%d' % (f + 1)]))
                continue
            if (fd.ndim > 1 and flatten):
                for c2 in range(fd.shape[1]):
                    table.append(fd[:, c2])
            else:
                table.append(fd)

        try:
            if (flatten):
                table = numpy.array(table).T
                logger.debug("Table data: %s" % (str(table.shape)))
            else:
                logger.debug("Table data: %d columns" % (len(table)))
            # np_table = numpy.array(table)
            # print table
        except:
            podi_logging.log_exception()
            pass

        # print table.shape

        # print table[1:3]

        # print ext.header
        return_tables.append(table)

    if (type(extension) is not list):
        return return_tables[0]

    return return_tables
Exemplo n.º 6
0
def worker_slave(queue):
    """

    This function handles all work, either running collectcells locally or 
    remotely via ssh. Files to reduce are read from a queue.

    """

    logger = logging.getLogger("SAMPWorker")

    logger.info("Worker process started, ready for action...")

    if (not setup.use_ssh):
        # If we reduce frames locally, prepare the QR logging.
        options['clobber'] = False

    while (True):
        try:
            # print "\n\nWaiting for stuff to do\n\n"
            task = queue.get()
        except (KeyboardInterrupt, SystemExit) as e:
            # print "worker received termination notice"
            # Ignore the shut-down command here, and wait for the official 
            # shutdown command from main task
            continue

        if (task is None):
            logger.info("Shutting down worker")
            queue.task_done()
            break

        filename, object_name, obsid = task

        logger.info("starting work on file %s" % (filename))

        ccopts = ""
        if (len(sys.argv) > 2):
            # There are some parameters to be forwarded to collectcells
            ccopts = " ".join(sys.argv[1:])
        # print "ccopts=",ccopts

        if (cmdline_arg_isset("-dryrun")):
            logger.info("DRYRUN: Sending off file %s for reduction" % (filename))
            # print "task done!"
            queue.task_done()
            continue


        if (object_name.lower().find("focus") >= 0):
            #
            # This is most likely a focus exposure
            #
            n_stars = int(cmdline_arg_set_or_default("-nstars", 7))
            logger.info("New focus exposure to analyze (with %d stars)" % (n_stars))

            if (setup.use_ssh):

                remote_inputfile = setup.translate_filename_local2remote(filename)
                kw = {
                    'user': setup.ssh_user,
                    'host': setup.ssh_host,
                    'filename': remote_inputfile,
                    'podidir': setup.remote_podi_dir,
                    'outdir': setup.output_dir,
                    'nstars': n_stars,
                }
                ssh_command = "ssh %(user)s@%(host)s %(podidir)s/podi_focus.py -nstars=%(nstars)d %(filename)s %(outdir)s" % kw

                logger.info("Out-sourcing work to %(user)s@%(host)s via ssh" % kw)
                process = subprocess.Popen(ssh_command.split(), 
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
                _stdout, _stderr = process.communicate()
                process.stdout.close()
                process.stderr.close()
                try:
                    process.terminate()
                except:
                    pass

                #print "*"*80
                if (not _stdout == "" and _stdout is not None):
                    logger.info("Received STDOUT:\n%s" % (_stdout))
                    #print "*"*80
                if (not _stderr == "" and _stderr is not None):
                    logger.info("Received STDERR:\n%s" % (_stderr))
                    #print _stderr
                    #print "*"*80
                
            else:
                # Run locally
                logger.info("Analyzing focus sequence (%s) locally" % (filename))
                podi_focus.get_focus_measurement(filename, n_stars=n_stars, output_dir=setup.output_dir)

            logger.info("Done with analysis")

            # Now check if we are supposed to open/display the focus plot
            if (setup.focus_display is not None):
                
                remote_filename = "%s/%s_focus.png" % (setup.output_dir, obsid)
                local_filename = setup.translate_filename_remote2local(filename, remote_filename)

                cmd = "%s %s &" % (setup.focus_display, local_filename)
                logger.info("Opening and displaying plot")
                os.system(cmd)

        else:
            #
            # This is NOT a focus exposure
            #

            if (setup.use_ssh):

                # This is not a focus exposure, to treat it as a normal science exposure
                remote_inputfile = setup.translate_filename_local2remote(filename)
                kw = {
                    'user': setup.ssh_user,
                    'host': setup.ssh_host,
                    'collectcells': setup.ssh_executable,
                    'options': ccopts,
                    'filename': remote_inputfile,
                    'outputfile': setup.output_format, 
                }

                ssh_command = "ssh %(user)s@%(host)s %(collectcells)s %(filename)s %(outputfile)s %(options)s -noclobber" % kw
                
                process = subprocess.Popen(ssh_command.split(), 
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
                _stdout, _stderr = process.communicate()
                process.stdout.close()
                process.stderr.close()
                try:
                    process.terminate()
                except:
                    pass

                #print "*"*80
                if (not _stdout == "" and _stdout is not None):
                    logger.info("Received STDOUT:\n%s" % (_stdout))
                    #print "*"*80
                if (not _stderr == "" and _stderr is not None):
                    logger.info("Received STDERR:\n%s" % (_stderr))
                    #print _stderr
                    #print "*"*80

            else:
                logger.info("Running collectcells (%s)" % (filename))
                podi_collectcells.collectcells_with_timeout(input=filename, 
                                                            outputfile=setup.output_format,
                                                            options=options,
                                                            timeout=300,
                                                            process_tracker=process_tracker)

            #
            # If requested, also send the command to ds9
            #
            local_filename = setup.translate_filename_remote2local(filename, setup.output_format)
            if (cmdline_arg_isset("-forward2ds9")):
                forward2ds9_option = cmdline_arg_set_or_default("-forward2ds9", "image")
                if (forward2ds9_option == "irafmosaic"):
                    cmd = "mosaicimage iraf %s" % (local_filename)
                else:
                    cmd = "fits %s" % (local_filename)

                logger.info("Forwarding file to ds9")
                logger.debug("filename: %s" % (filename))
                logger.debug("remote file: %s" % (remote_inputfile))
                logger.debug("local file: %s" % (local_filename))

                try:
                    cli1 = sampy.SAMPIntegratedClient(metadata = metadata)
                    cli1.connect()
                    cli1.enotify_all(mtype='ds9.set', cmd='frame 2')
                    cli1.enotify_all(mtype='ds9.set', cmd='scale scope global')
                    cli1.enotify_all(mtype='ds9.set', cmd=cmd)
                    cli1.disconnect()
                except Exception as err:
                    logger.error("Problems sending message to ds9: %s" % err)
                    podi_logging.log_exception()
                    pass

            # By default, also open the psf diagnostic plot, if available
            psf_plot_fn = local_filename[:-5]+".psf.png"
            if (os.path.isfile(psf_plot_fn)):
                cmd = "%s %s &" % (setup.focus_display, psf_plot_fn)
                logger.info("Opening and displaying PSF diagnostic plot (%s)" % (psf_plot_fn))
                os.system(cmd)

        #
        # Once the file is reduced, mark the current task as done.
        #
        logger.info("task done!")
        queue.task_done()

    print("Terminating worker process...")

    return
Exemplo n.º 7
0
def get_focus_measurement(filename, n_stars=5, output_dir="./", mp=False):
    """

    Parameters
    ----------
    filename : string
    
        One file of the exposure. This file is mainly used to obtain the
        necessary information to create all the other filenames for this
        exposure.

    output_dir : string

        Directory to hold all the saturation catalogs. This is the directory
        that will be fed into collectcells via the -persistency command line
        flag.

    Returns
    -------

    """

    logger = logging.getLogger("MeasureFocus")
    logger.info("Starting focus measurement for %s (%d *)..." %
                (filename, n_stars))

    if (os.path.isfile(filename)):
        # This is one of the OTA fits files
        # extract the necessary information to generate the
        # names of all the other filenames
        try:
            hdulist = pyfits.open(filename)
        except IOError:
            logger.warning("\rProblem opening file %s...\n" % (filename))
            return
        except:
            podi_logging.log_exception()

        hdr_filename = hdulist[0].header['FILENAME']
        hdr_items = hdr_filename.split('.')
        basename = "%s.%s" % (hdr_items[0], hdr_items[1])
        hdulist.close()

        # Split the input filename to extract the directory part
        directory, dummy = os.path.split(filename)

    elif (os.path.isdir(filename)):
        # As a safety precaution, if the first parameter is the directory containing
        # the files, extract just the ID string to be used for this script
        if (filename[-1] == "/"):
            filename = filename[:-1]

        basedir, basename = os.path.split(filename)
        directory = filename

    # Setup parallel processing
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.JoinableQueue()

    number_jobs_queued = 0
    obsid = None
    for (ota_x, ota_y) in itertools.product(range(8), repeat=2):
        ota = ota_x * 10 + ota_y

        filename = "%s/%s.%02d.fits" % (directory, basename, ota)

        if (not os.path.isfile(filename)):
            filename = "%s/%s.%02d.fits.fz" % (directory, basename, ota)
            if (not os.path.isfile(filename)):
                continue

        logger.debug("Adding file %s to task list" % (filename))
        if (obsid is None):
            hdulist = pyfits.open(filename)
            obsid = hdulist[0].header['OBSID']
            hdulist.close()

        queue.put((filename, n_stars))
        number_jobs_queued += 1
        # break

    # Now start all the workers
    logger.debug("Starting worker processes")
    processes = []
    for i in range(sitesetup.number_cpus):
        p = multiprocessing.Process(target=mp_measure_focus,
                                    args=(queue, return_queue, False))
        p.start()
        processes.append(p)
        time.sleep(0.01)

        # Tell all workers to shut down when no more data is left to work on
        queue.put((None))

    logger.info("Collecting catalogs for each OTA")
    all_foci = None
    real_numbers = False
    for i in range(number_jobs_queued):
        returned = return_queue.get()
        if (returned is None):
            continue

        focus_positions, found_real_numbers = returned
        if (found_real_numbers):
            real_numbers = True

        logger.debug("Received %d focus positions" %
                     (focus_positions.shape[0]))

        all_foci = focus_positions if all_foci is None else numpy.append(
            all_foci, focus_positions, axis=0)
        #print cat_name

        return_queue.task_done()

    # Join each process to make sure they terminate(d) correctly
    logger.debug("Joining process to ensure proper termination")
    for p in processes:
        p.join()

    #print all_foci, all_foci.ndim, all_foci.shape
    if (all_foci is None or all_foci.ndim < 2 or all_foci.shape[0] <= 0):
        logger.error("Couldn't find any star patterns!")
        return

    logger.info("Found a grand total of %d focus positions" %
                (all_foci.shape[0]))
    # print all_foci.shape

    with open("allfoci", "w") as f:
        for s in range(all_foci.shape[0]):
            numpy.savetxt(f, all_foci[s, :, :])
            print >> f, "\n"
    # numpy.savetxt("allfoci", all_foci.reshape((-1,all_foci.shape[2])))

    #
    # Eliminate all focus measurements that could be affected by the low-light
    # CTE problem
    #
    min_background = numpy.min(all_foci[:, :, SXFocusColumn['background']],
                               axis=1)
    detector_lots = all_foci[:, 0, SXFocusColumn['ota_lot']]
    bad = (detector_lots < 7) & (min_background < 100)
    logger.info("Excluding %d focus samples that might have CTE issues" %
                (numpy.sum(bad)))
    all_foci = all_foci[~bad]

    stats = get_mean_focuscurve(all_foci)
    pfit, uncert, fwhm_median, fwhm_std, fwhm_cleaned, best_focus_position, best_focus = stats

    plotfilename = "%s/%s_focus.png" % (output_dir, obsid)
    create_focus_plot(all_foci, stats, basename, plotfilename, real_numbers)

    logger.debug("all done!")
    return
Exemplo n.º 8
0
def imcombine(input_filelist, outputfile, operation, return_hdu=False,
              subtract=None, scale=None, gather_all_otas=True):

    logger = logging.getLogger("ImCombine")

    # First loop over all filenames and make sure all files exist
    filelist = []
    for file in input_filelist:
        if (os.path.isfile(file)):
            filelist.append(file)

    if (len(filelist) <= 0):
        logger.error("No existing files found in input list, hence nothing to do!\n")
        return None

    logger.debug("Stacking the following files:\n -- %s" % ("\n -- ".join(filelist)))

    # elif (len(filelist) == 1):
    #     # stdout_write("Only 1 file to combine, save the hassle and copy the file!\n")
    #     hdulist = pyfits.open(filelist[0])
    #     if (return_hdu):
    #         return hdulist
    #     hdulist.writeto(outputfile, clobber=True)
    #     return
    
    # Read the input parameters
    # Note that file headers are copied from the first file
    reference_filename = filelist[0]
    ref_hdulist = pyfits.open(reference_filename)

    # Create the primary extension of the output file
    # Copy all headers from the reference HDU
    primhdu = pyfits.PrimaryHDU(header=ref_hdulist[0].header)

    # Add PrimaryHDU to list of OTAs that go into the output file
    out_hdulist = [primhdu]

    ref_hdulist.close()
    del ref_hdulist

    #
    # Compile a list of OTAs that will be in the output frame
    #
    otas_found = []
    ota_sizes = {}
    ref_header = {}
    if (gather_all_otas):
        logger.info("Checking all files to compile comprehensive list of available OTAs")
    master_associations = None
    for fn in filelist:
        hdulist = pyfits.open(fn)
        for ext in hdulist:
            if (is_image_extension(ext)):
                try:
                    otas_found.append(ext.name)
                    if (ext.name not in ota_sizes):
                        ota_sizes[ext.name] = ext.data.shape
                        ref_header[ext.name] = ext.header
                except:
                    podi_logging.log_exception()

        #
        # Collect all individual association tables to create 
        # a master association table combining all input data
        #
        assoc_table = podi_associations.read_associations(hdulist)
        if (assoc_table is None):
            logger.info("No association data available")
            assoc_table = {'input_simple': [fn]}
        if (master_associations is None):
            master_associations = assoc_table
        else:
            master_associations = podi_associations.collect_reduction_files_used(
                master_associations, assoc_table)

        hdulist.close()
        del hdulist
        if (not gather_all_otas):
            break

    otas_to_combine = set(otas_found)
    ota_counter = collections.Counter(otas_found)
    #print otas_to_combine
    #print ota_counter


    #
    # Now loop over all extensions and compute the mean
    #
    # for cur_ext in range(0, len(ref_hdulist)):
    for cur_ext, extname in enumerate(otas_to_combine):


        data_blocks = []
        # Check what OTA we are dealing with
        # if (not is_image_extension(ref_hdulist[cur_ext])):
        #     continue
        # ref_fppos = ref_hdulist[cur_ext].name #header['EXTNAME']

        #stdout_write("\rCombining frames for OTA %s (#% 2d/% 2d) ..." % (ref_fppos, cur_ext+1, len(ref_hdulist)))
        #logger.debug("Combining frames for OTA %s (#% 2d/% 2d) ..." % (ref_fppos, cur_ext+1, len(ref_hdulist)))
        logger.info("Combining frames for OTA %s (#% 2d/% 2d) ..." % (extname, cur_ext+1, len(otas_to_combine)))

        #
        # Add some weird-looking construct to move the memory allocation and actual 
        # imcombine into a separate process. This has to do with if/how/when python releases 
        # memory (or not), causing a massive short-term memory leak.
        # With the additional process, the memory is onwed by the other process and the memory
        # is freed once we destroy this helper process.
        #
        return_queue = multiprocessing.JoinableQueue()
        #worker_args=(ref_fppos, filelist, ref_hdulist[cur_ext].data.shape, operation, return_queue, verbose)

        kw_args = {
            'extension': extname,
            'filelist':  filelist, 
            'shape':     (ota_sizes[extname][0], ota_sizes[extname][1], ota_counter[extname]), #ref_hdulist[cur_ext].data.shape,
            'operation': operation, 
            'queue':     return_queue, 
            'verbose':   verbose,
            'subtract':  subtract,
            'scale':     scale,
        }
        # p = multiprocessing.Process(target=imcombine_subprocess, args=worker_args)
        p = multiprocessing.Process(target=imcombine_subprocess, kwargs=kw_args)
        p.start()
        combined = return_queue.get()
        p.terminate()
        del p

        if (verbose): stdout_write(" done, creating fits extension ...")
        logger.debug("done with computing, creating fits extension ...")
        # Create new ImageHDU, insert the imcombined's data and copy the 
        # header from the reference frame
        hdu = pyfits.ImageHDU(data=combined, header=ref_header[extname])

        # Append the new HDU to the list of result HDUs
        out_hdulist.append(hdu)

        if (verbose): stdout_write(" done\n")

    #
    # At this point, add a fits table listing all filenames that went into this combination
    #
    filenames_only = []
    for file in filelist:
        dirname, filename = os.path.split(file)
        filenames_only.append(filename)

    logger.debug("Adding association data to output file")

    out_hdulist[0].header["NCOMBINE"] = (len(filenames_only), "number of combined files")

    columns = [
        pyfits.Column(name='filenumber', format="I4", array=range(1,len(filenames_only)+1)),
        pyfits.Column(name='filename', format="A100", array=filenames_only),
        ]
    coldefs = pyfits.ColDefs(columns)
    tablehdu = pyfits.BinTableHDU.from_columns(coldefs)
    tablehdu.header["EXTNAME"] = "FILELIST"
    out_hdulist.append(tablehdu)

    assoc_tbhdu = podi_associations.create_association_table(
        master_associations)
    out_hdulist.append(assoc_tbhdu)

    #
    # All work done now, prepare to return the data or write it to disk
    #
    out_hdu = pyfits.HDUList(out_hdulist)
    if (not return_hdu and outputfile != None):
        logger.debug(" writing results to file %s ..." % (outputfile))
        clobberfile(outputfile)
        try:
            out_hdu.writeto(outputfile, clobber=True, checksum=True)
        except TypeError:
            # this most likely is this error:
            # TypeError: object of type 'NoneType' has no len()
            # related to the checksum calculation
            clobberfile(outputfile)
            out_hdu.writeto(outputfile, clobber=True)
        except pyfits.VerifyError:
            logger.warning("Encountered FITS verification error, writing anyway")
            try:
                out_hdu.writeto(outputfile, clobber=True, checksum=False, output_verify='ignore')
            except:
                raise
        except:
            raise
        out_hdu.close()
        del out_hdu
        del out_hdulist
        stdout_write(" done!\n")
    elif (return_hdu):
        logger.debug(" returning HDU for further processing ...")
        return out_hdu
    else:
        logger.debug(" couldn't write output file, no filename given!")

    return None
Exemplo n.º 9
0
            hdr = hdulist[ota].header.copy()
            hdr['NAXIS'] = 2
            hdr['NAXIS1'] = 4096
            hdr['NAXIS2'] = 4096
            hdr['CRVAL1'] = math.fmod(hdr['CRVAL1'] + wcs_offset[0], 360.0)
            hdr['CRVAL2'] += wcs_offset[1]
            wcs = astWCS.WCS(hdr, mode='pyfits')
            wcs.updateFromHeader()
            x, y = corners[ota]
            print numpy.array(wcs.pix2wcs(x, y))
            radec_ref[idx, :] = numpy.array(wcs.pix2wcs(x, y))
        except:
            # ignore OTAs that are missing
            # this implies we need at least ONE of the 4 central OTAs to
            # make this correction
            podi_logging.log_exception()
            pass

    ref_point = bottleneck.nanmean(radec_ref, axis=0)
    # ref_point += wcs_offset

    print "REF:", ref_point

    crval = numpy.array(
        [hdulist[1].header['CRVAL1'], hdulist[1].header['CRVAL2']])

    d_crval = ref_point - crval
    print "current CRVAL:", hdulist[1].header['CRVAL1'], hdulist[1].header[
        'CRVAL2']

    #
Exemplo n.º 10
0
def measure_focus_ota(filename, n_stars=5):
    """

    Obtain a focus measurement from teh specified filename. To do so,

    1) run source extractor to get FWHM measurements and positions for all sources

    2) group sources into vertical sequences as expected from the function 
       of the focus tool

    3) assign physical focus positions to each measurement

    4) return final catalog back to master so a final, combined focus curve
       can be assembled.

    """


    # print"\n\n\nworking on file ",filename,"\n\n\n"

    try:
        hdulist = pyfits.open(filename)
    except IOError:
        logger.debug("Can't open file %s" % (filename))
        return None
    except:
        podi_logging.log_exception()
        return None

    obsid = hdulist[0].header['OBSID']
    ota = hdulist[1].header['WN_OTAX'] * 10 + hdulist[1].header['WN_OTAY']
    ota_id = hdulist[0].header['OTA_ID']

    logger = logging.getLogger("MeasureFocusOTA: %s(%02d)" % (obsid, ota))
    logger.info("Starting work ...")

    obsid = hdulist[0].header['OBSID']
    ota = int(hdulist[0].header['FPPOS'][2:4])

    # Check the object name to see if if contains the information about the exposure
    focus_positions = numpy.arange(n_stars)[::-1]+1.
    real_focus_positions = False
    object_name = hdulist[0].header['OBJECT']
    if (object_name.startswith("Focus Center")):
        # This looks like it might be the right format
        try:
            items = object_name.split()
            # Check all items
            if (len(items) == 7 and
                items[0] == "Focus" and
                items[1] == "Center" and 
                items[3] == "NStep" and
                items[5] == "DStep"):
                n_stars = int(items[4])
                focus_center = float(items[2])
                focus_step = float(items[6])
                focus_start = focus_center - (n_stars-1)/2*focus_step
                focus_positions = numpy.arange(n_stars, dtype=numpy.float32)[::-1] * focus_step + focus_start
                logger.debug("Infom from header: N=%d, center=%.0f, step=%.0f, start=%.0f" % (
                    n_stars, focus_center, focus_Step, focus_step, focus_start))
                logger.debug("Focus positions: %s" % (str(focus_positions)))
                real_focus_positions = True
        except:
            pass

    # Run SourceExtractor on the file
    sex_config = "%s/config/focus.sexconf" % (sitesetup.exec_dir)
    sex_param = "%s/config/focus.sexparam" % (sitesetup.exec_dir)
    catfile = "%s/tmp.%s_OTA%02d.cat" % (sitesetup.scratch_dir, obsid, ota)
    sex_cmd = "%(sexcmd)s -c %(sex_config)s -PARAMETERS_NAME %(sex_param)s -CATALOG_NAME %(catfile)s %(filename)s" % {
        "sexcmd": sitesetup.sextractor,
        "sex_config": sex_config,
        "sex_param": sex_param,
        "catfile": catfile,
        "filename": filename,
        "redirect": sitesetup.sex_redirect,
    }
    # print "\n"*10,sex_cmd,"\n"*10
    # Run source extractor
    # catfile = "/tmp//tmp.pid4383.20121008T221836.0_OTA33.cat"

    if (not os.path.isfile(catfile)):
        logger.debug("Running source extractor to search for stars")
        start_time = time.time()
        try:
            ret = subprocess.Popen(sex_cmd.split(), 
                                   stdout=subprocess.PIPE, 
                                   stderr=subprocess.PIPE)
            (sex_stdout, sex_stderr) = ret.communicate()
            if (ret.returncode != 0):
                logger.warning("Sextractor might have a problem, check the log")
                logger.info("Stdout=\n"+sex_stdout)
                logger.info("Stderr=\n"+sex_stderr)
        except OSError as e:
            podi_logging.log_exception()
            print >>sys.stderr, "Execution failed:", e
        end_time = time.time()
        logger.debug("SourceExtractor finished after %.2f seconds" % (end_time - start_time))
    else:
        logger.debug("Source catalog already exists, re-using the old file")

    #
    # delete the tmp catalog
    #

    #
    # Load the source catalog.
    # Handle cases of non-existing or empty catalogs
    # 
    logger.debug("loading the source catalog from %s" % (catfile))
    try:
        source_cat = numpy.loadtxt(catfile)
    except IOError:
        logger.warning("The Sextractor catalog is empty, ignoring this OTA")
        source_cat = None
        return None
    if (source_cat.shape[0] <= 0):
        # no sources found
        return None

    # print "\n\n total sources in raw file",source_cat.shape,"\n\n"
    logger.debug("Found %d sources in raw SourceExtractor catalog %s" % (source_cat.shape[0], catfile))

    #
    # Now convert all X/Y values to proper OTA X/Y coordinates based on their 
    # extension number
    #
    corr_cat = None
    # print "extensions:", source_cat[:,SXFocusColumn['extension']]

    for i in range(len(hdulist)):
        if (not is_image_extension(hdulist[i])):
            # print "skipping extension",i,", this is not an image extension"
            continue

        # get cell_x, cell_y from this extension
        cell_x = hdulist[i].header['WN_CELLX']
        cell_y = hdulist[i].header['WN_CELLY']
        x1, c2, y1, y2 = cell2ota__get_target_region(cell_x, cell_y, 1)
        
        # Create a mask for all sources in this cell
        in_this_cell = (source_cat[:,SXFocusColumn['extension']] == i)
        if (numpy.sum(in_this_cell) <= 0):
            logger.debug("Couldn't find any sources in cell %d,%d" % (cell_x, cell_y))
            continue

        # print "found",numpy.sum(in_this_cell),"sources for cell",cell_x, cell_y, "  adding", x1, y1
        
        cell_cat = source_cat[in_this_cell]
        cell_cat[:,SXFocusColumn['x']] += x1
        cell_cat[:,SXFocusColumn['y']] += y1

        #
        # get overscan-level data for this cell
        #
        binning = get_binning(hdulist[i].header)
        overscan_data = extract_biassec_from_cell(hdulist[i].data, binning)
        overscan_level = numpy.mean(overscan_data)
        cell_cat[:, SXFocusColumn['background']] -= overscan_level

        corr_cat = cell_cat if corr_cat is None else numpy.append(corr_cat, cell_cat, axis=0)

    # 
    # Attach to each measurement what detector lot it came from
    #
    fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist)
    detector_lot = fpl.get_detector_generation(ota_id)
    corr_cat[:, SXFocusColumn['ota_lot']] = detector_lot

    #
    # Also override the extension number in the source catalog with the 
    # position in the overall focal plane
    #
    corr_cat[:, SXFocusColumn['extension']] = ota


    # print "\n\n\n\ntotal corrected catalog:",corr_cat.shape
    # save the source catalog
    #numpy.savetxt("focus_cat.ota%02d" % (ota), source_cat)
    #numpy.savetxt("focus_cat2.ota%02d" % (ota), corr_cat)
    logger.debug("done fixing the pixel coordinates")

    # only select bright enough sources
    bright_enough = corr_cat[:,SXFocusColumn['mag_auto']] < -10
    corr_cat = corr_cat[bright_enough]
    #numpy.savetxt("focus_cat3.ota%02d" % (ota), corr_cat)

    #dummy_test = open("dummy.test", "w")
    # Now try to match up stars in a sequence
    all_angles, all_distances = [], []
    for s1 in range(corr_cat.shape[0]):
        # Assume this is the middle star in the sequence

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:,SXFocusColumn['x']] - corr_cat[s1,2]
        dy = corr_cat[:,SXFocusColumn['y']] - corr_cat[s1,3]
        d_total  = numpy.hypot(dx, dy)

        in_cone = numpy.fabs(dx/dy) < 0.1

        # Need to be at most n_stars * 10'' and at least 5'' 
        close_enough = (d_total < (n_stars * 10. / 0.11)) & (d_total > 5 / 0.11)
        good_so_far = in_cone & close_enough
        if (numpy.sum(good_so_far) <= 0):
            continue

        candidates = corr_cat[good_so_far]
        # if (numpy.sum(candidates) < n_stars):
        #     # Only use full sequences
        #     continue

        # are the magnitudes comparable
        delta_mag = numpy.fabs(candidates[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']])
        similar_brightness = delta_mag < 1
        #print >>dummy_test, "#", candidates.shape[0], numpy.sum(similar_brightness)
        #print similar_brightness
        if (numpy.sum(similar_brightness) <= 0):
            continue

        good_candidates = candidates[similar_brightness]


        # Now sort the data with increasing y values
        si = numpy.argsort(good_candidates[:,SXFocusColumn['y']])
        sorted_candidates = good_candidates[si]

        #numpy.savetxt(dummy_test, sorted_candidates)
        #print >>dummy_test, "\n\n\n"

        # Now compute the slope and distance between each point and each point 
        # above it
        for p1, p2 in itertools.combinations(range(sorted_candidates.shape[0]), 2):
            angle = numpy.arctan2(sorted_candidates[p1,2] - sorted_candidates[p2,2],
                                  sorted_candidates[p1,3] - sorted_candidates[p2,3])
            distance = numpy.sqrt( (sorted_candidates[p1,2] - sorted_candidates[p2,2])**2
                                   + (sorted_candidates[p1,3] - sorted_candidates[p2,3])**2 )
            all_angles.append(angle)
            all_distances.append(distance)

    #dummy_test.close()

    # Once we are through with the first iteration find the best-fitting angle

    all_angles = numpy.array(all_angles)
    all_distances = numpy.array(all_distances)

    # Find the best or rather most frequently occuring angle
    all_angles[all_angles < 0] += 2*math.pi

    #numpy.savetxt("dummy.angles", all_angles)
    #numpy.savetxt("dummy.distances", all_distances)
 
    filtered_angles = three_sigma_clip(all_angles)
    if (filtered_angles is None or
        filtered_angles.ndim < 1 or
        filtered_angles.shape[0] <= 0):
        return None

    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [16,84])
    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [5,95])

    logger.debug("Found median angle %f [%f ...%f]" % (
            numpy.degrees(numpy.median(filtered_angles)), 
            numpy.degrees(angle_width[0]), numpy.degrees(angle_width[1])
            )
    )

    #
    # Now we can do another proper search for all stars
    # This time, only search for complete series (#stars as specified)
    #
    #focus_stars = open("focus_stars", "w")
    all_candidates = []
    for s1 in range(corr_cat.shape[0]):

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:,SXFocusColumn['x']] - corr_cat[s1,2]
        dy = corr_cat[:,SXFocusColumn['y']] - corr_cat[s1,3]

        angles = numpy.arctan2(dx, dy)
        angles[angles < 0] += 2*math.pi

        d_total = numpy.hypot(dx, dy)
        #print numpy.degrees(angle_width), numpy.degrees(angles)

        #print angle_width[0], angle_width[1]
        in_cone1 = (angles > angle_width[0]) & (angles < angle_width[1])
        in_cone2 = (angles+math.pi > angle_width[0]) & (angles+math.pi < angle_width[1])
        in_cone = in_cone1 | in_cone2
        # print angles[in_cone]
        #print angles[in_cone][0], angles[in_cone][0] > angle_width[0], angles[in_cone][0] < angle_width[1]
        close_enough = (d_total < ((n_stars+1) * 10. / 0.11)) & (d_total > 5 / 0.11)
        similar_brightness = numpy.fabs(corr_cat[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']]) < 1
        good = in_cone & close_enough & similar_brightness
        good[s1] = True
        # print s1, ":", numpy.sum(in_cone), numpy.sum(close_enough), numpy.sum(similar_brightness), numpy.sum(good)

        if (numpy.sum(good) <= 1):
            continue

        candidates = corr_cat[good]
        # print "# canddates =", candidates.shape[0]

        if (not candidates.shape[0] == n_stars): 
             # Only use full sequences
            continue
            pass

        # print "found match:",s1

        # Now we have a set with the right number of stars, matching the overall 
        # angle, and with similar brightnesses
        # sort them top to bottom
        si = numpy.argsort(candidates[:,3])
        #numpy.savetxt(focus_stars, candidates[:,3])
        #numpy.savetxt(focus_stars, si)
        sorted_candidates = candidates[si]

        # print "XXX", sorted_candidates.shape, sorted_candidates[:,0].shape, focus_positions.shape, n_stars, candidates.shape[0]

        sorted_candidates[:,0] = focus_positions
        #numpy.savetxt(focus_stars, sorted_candidates)
        #numpy.savetxt(focus_stars, numpy.degrees(angles[good]))
        #numpy.savetxt(focus_stars, in_cone[good])
        #numpy.savetxt(focus_stars, d_total[good])
        #numpy.savetxt(focus_stars, (corr_cat[:,10] - corr_cat[s1,10])[good])
        #print >>focus_stars, "\n\n\n\n"

        all_candidates.append(sorted_candidates)
    
    #focus_stars.close()

    all_candidates = numpy.array(all_candidates)
    logger.debug(str(all_candidates.shape))

    #xxx = open("steps", "w")
    # Now compute the distances from each star to the previous

    step_vectors = []

    for i in range(1, n_stars):
        # logger.debug("Candidates: %d %s\n%s" % (all_candidates.ndim, str(all_candidates.shape), str(all_candidates)))
        if (all_candidates.ndim < 1 or
            all_candidates.shape[0] <= 0):
            # We ran out of candidates
            logger.debug("We ran out of viable candidates after %s stars" % (i))
            return None

        steps = all_candidates[:,i,2:4] - all_candidates[:,i-1,2:4]
        #numpy.savetxt(xxx, steps)
        #print >>xxx, "\n\n\n\n"

        logger.debug("Computing average step size, star %d" % (i))
        logger.debug("Steps-X:\n%s" % (str(steps[:,0])))
        logger.debug("Steps-y:\n%s" % (str(steps[:,1])))
        clean_dx = three_sigma_clip(steps[:,0])
        clean_dy = three_sigma_clip(steps[:,1])

        # Check if both clean_dx and clean_dy are not empty
        logger.debug("clean-dx=%s" % (str(clean_dx)))
        logger.debug("clean-dy=%s" % (str(clean_dy)))
        if (clean_dx.ndim < 1 or clean_dx.shape[0] <= 0 or
            clean_dy.ndim < 1 or clean_dy.shape[0] <= 0):
            logger.debug("Can't find a clean dx/dy shift in iteration %d" % (i))
            return None

        dx = numpy.median(clean_dx)
        dy = numpy.median(clean_dy)

        distx = scipy.stats.scoreatpercentile(clean_dx, [16,84])
        disty = scipy.stats.scoreatpercentile(clean_dy, [16,84])
        sigma_x = 0.5 * (distx[1] - distx[0])
        sigma_y = 0.5 * (disty[1] - disty[0])

        step_vectors.append([dx, dy, sigma_x, sigma_y])

        good_steps = (steps[:,0] > (dx - 3*sigma_x)) & (steps[:,0] < (dx + 3*sigma_x)) \
            & (steps[:,1] > (dy - 3*sigma_y)) & (steps[:,1] < (dy + 3*sigma_y))

        logger.debug("before step-matching #%d: %s" % (i, str(all_candidates.shape)))
        all_candidates = all_candidates[good_steps]
        logger.debug("after step-matching: #%d: %s" % (i, str(all_candidates.shape)))

    logger.debug("%s: %s" % (filename, str(step_vectors)))

    # final_focus = open("final_focus", "w")
    # for i in range(all_candidates.shape[0]):
    #     numpy.savetxt(final_focus, all_candidates[i])
    #     print >>final_focus, "\n\n\n\n\n"
    # final_focus.close()

    logger.debug("Found %d focus stars" % (all_candidates.shape[0]))
    return all_candidates, real_focus_positions

    logger.debug("Returning final FITS table catalog")
    return None
Exemplo n.º 11
0
def create_psf_profiles(infilename, outdir_base, width):

    #
    # Create an output directory to hold output files
    #
    _, output_dir = os.path.split(infilename)
    # output_dir, _ = os.path.split(os.path.abspath(infilename))
    if (infilename.endswith(".fits")):
        output_dir = output_dir[:-5]
    output_dir = "%s/%s" % (outdir_base, output_dir)

    try:
        os.mkdir(output_dir)
    except:
        pass

    # Open the file
    logger.info("Opening input file %s" % (infilename))
    hdulist = pyfits.open(infilename)
    is_odi_frame = ('INSTRUME' in hdulist[0].header and
                    hdulist[0].header['INSTRUME'] == 'podi')

    # columns = ['RA', 'DEC', 'X', 'Y', 'OTA', 'BACKGROUND', 'FLAGS', 'MAG_D60']
    columns = ['RA', 'DEC', 'X', 'Y', 'OTA', 'FWHM_IMAGE', 'BACKGROUND', 'FLAGS', 'MAG_D60']
    raw_cols = ['ra', 'dec', 'x', 'y', 'ota', 'fwhm_image', 'background', 'flags', 'mag_aper_6.0']
    col_format = ['%11.6f', '%11.6f', '%8.2f', '%8.2f', '%3d', '%6.2f', '%8.2f', '%6x', '%7.3f']
    if (is_odi_frame):
        logger.info("This frame IS a ODI frame!")
        # Catalog of ODI sources
        table = hdulist['CAT.ODI']
        # convert table into a n-D numpy catalog, with ra/dec, x/y, and magnitudes 
        n_sources = table.data.field(0).shape[0]
        #logger.info("Found %d stars in catalog" %(n_sources))

        starcat = numpy.empty((n_sources, len(columns)))
        for idx, colname in enumerate(columns):
            starcat[:,idx] = table.data.field(colname)

    else:
        # If it's not a ODI frame, we need to run source extractor first
        logger.info("This is not a ODI frame, running source extractor")
        fitsfile = infilename 
        catfile = infilename[:-5]+".cat" 
        sex_config_file = "%s/config/wcsfix.sex" % (sitesetup.exec_dir)
        parameters_file = "%s/config/wcsfix.sexparam" % (sitesetup.exec_dir)
        sexcmd = "%s -c %s -PARAMETERS_NAME %s -CATALOG_NAME %s %s" % (
            sitesetup.sextractor, sex_config_file, parameters_file, catfile, 
            fitsfile)
        logger.debug("Sextractor command:\n%s" % (sexcmd))

        if (not os.path.isfile(catfile)):
            logger.debug("Running SourceExtractor")
            start_time = time.time()
            try:
                ret = subprocess.Popen(sexcmd.split(), 
                                       stdout=subprocess.PIPE, 
                                       stderr=subprocess.PIPE)
                (sex_stdout, sex_stderr) = ret.communicate()
                #os.system(sexcmd)
                if (ret.returncode != 0):
                    logger.warning("Sextractor might have a problem, check the log")
                    logger.debug("Stdout=\n"+sex_stdout)
                    logger.debug("Stderr=\n"+sex_stderr)
            except OSError as e:
                podi_logging.log_exception()
                print >>sys.stderr, "Execution failed:", e
                end_time = time.time()
                logger.debug("SourceExtractor returned after %.3f seconds" % (end_time - start_time))
        else:
            logger.debug("Reusing existing source catalog!")

        try:
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                source_cat = numpy.loadtxt(catfile)
        except IOError:
            logger.error("The Sextractor catalog is empty, ignoring this OTA")
            sys.exit(0)
            
        # Now translate the raw sextractor catalog into the format we need here
        n_sources = source_cat.shape[0]
        starcat = numpy.empty((n_sources, len(raw_cols)))
        for idx, colname in enumerate(raw_cols):
            starcat[:,idx] = source_cat[:, SXcolumn[colname]]

        # Write fake header to file
        # assume zeropoint of 25 mag, corrected for exposure time
        # find brightest star, assume its 12th mag
        hdulist[0].header['PHOTZP_X'] = 10. - numpy.min(starcat[:,-1])
        print hdulist[0].header['PHOTZP_X']
        #
        # Also assing ODI-compatible extension names
        #
        for ext in range(1, len(hdulist)):
            hdulist[ext].name = "OTA%02d.SCI" % (ext)
        hdulist.info()


    # Convert all instrumental magnitudes into calibrated ones
    starcat[:,-1] += hdulist[0].header['PHOTZP_X'] # no correction for exptime etc.
    logger.info("Found a total of %4d stars" % (starcat.shape[0]))

    #
    # Eliminate all stars with nearby sources
    #
    pixelscale_center = 0.1/3600.
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        wcs = astWCS.WCS(hdulist[1].header, mode='pyfits')
        pixelscale_center = wcs.getPixelSizeDeg() * 3600. # this is arcsec per pixel
    within_range_pixels = 5.0 / pixelscale_center
    logger.debug("nearby stars: if within %.2f pixels" % (within_range_pixels))
    kdtree = scipy.spatial.cKDTree(starcat[:,2:4]) # use x/y 
    # need to look for at least 2 neighbors, since every source also shows up as its one neighbor
    nearest_neighbor, i = kdtree.query(x=starcat[:,2:4], k=2, p=2, 
                                       distance_upper_bound=within_range_pixels)
    numpy.savetxt("nn.dump", nearest_neighbor)
    neighbor_count = numpy.sum(numpy.isfinite(nearest_neighbor), axis=1) - 1.0
    numpy.savetxt("nnc.dump", neighbor_count, '%d')
    
    # now only pick stars with no neighbor star within 5 arcsec
    starcat = starcat[neighbor_count < 1]
    logger.info("Eliminating % 4d stars due to nearby neighbors" % (numpy.sum(neighbor_count >= 1)))

    #print nearest_neighbor

    

    # Now exclude all stars with flags, indicating something might be wrong
    starcat = starcat[starcat[:,-2] == 0]
    logger.info("Found %d good stars with no flags" % (starcat.shape[0]))

    # Print the top of the catalog just for illustrative purposes
    numpy.savetxt(sys.stdout, starcat[:5,:], col_format)

    #
    # try to isolate non-stellar sources.
    # this likely only works if most sources are stars, i.e. not in galaxy clusters
    #
    fwhms = starcat[:, columns.index('FWHM_IMAGE')]
    filtered_fwhm, is_star = three_sigma_clip(fwhms, return_mask=True)
    #print "All sources:\n",starcat[:,columns.index('FWHM_IMAGE')]
    #print "\n\nFWHM like stars:\n",starcat[:,columns.index('FWHM_IMAGE')][is_star]
    #print "\n\nFWHM not stars:\n",starcat[:,columns.index('FWHM_IMAGE')][~is_star]
    # Apply star-only selection
    starcat = starcat[is_star]

    #
    # Now select average profiles for each of the magitude ranges
    #
    mag_ranges = [(14,15), (15,16), (16,17)] #(18,18.5), (20,20.5)]

    exclude_outlying_otas = is_odi_frame
    if (exclude_outlying_otas):
        exclude = (starcat[:,columns.index('OTA')] < 22) | \
                  (starcat[:,columns.index('OTA')] > 44)
        starcat = starcat[~exclude]

    readme_file = "%s/README" % (output_dir)
    readme = write_readme_file(hdulist, readme_file, os.path.abspath(infilename), starcat)    

    for mag_min, mag_max in mag_ranges:
        #print mag_min, mag_max

        # Select only stars in the right magnitude range
        in_mag_range = (starcat[:,-1] >= mag_min) & (starcat[:,-1] <= mag_max)
        n_stars_in_mag_range = numpy.sum(in_mag_range)

        if (n_stars_in_mag_range < 0):
            logger.warning("too few stars (%d) in magnitude range %.2f -- %.2f, skipping" % (
                n_stars_in_mag_range, mag_min, mag_max))
            continue

        # Also select only isolated stars
        logger.info("Found %5d stars in magnitude range %.2f -- %.2f, searching for isolated ones" % (
            n_stars_in_mag_range, mag_min, mag_max))

        #
        # Cherry-pick stars with the right magnitudes
        #
        selected_cat = starcat[in_mag_range]
        #numpy.savetxt(sys.stdout, selected_cat, col_format)
        print "---"

        print >>readme, """
  ==> Magnitude range %6.2f to %.2f mag (source count: %d):
  ---------------------------------------------------------------------------------
          Ra          Dec          X          Y   OTA  FWHM Background  Flags  Magnitude
  ---------------------------------------------------------------------------------\
""" % (
      mag_min, mag_max, selected_cat.shape[0])

        numpy.savetxt(readme, selected_cat, [
            '%12.6f', '%12.6f', '%10.3f', '%10.3f', '%5d', '%6.2f', '%11.3f', '%6x', '%10.4f'])
        print >>readme, "  ================================================================================="

        #
        # Now create the average star profile
        #

        all_r = numpy.array([])
        all_data = numpy.array([])

        # Loop over OTAs
        otas = set(list(selected_cat[:,columns.index('OTA')]))

        profile_file = open("%s/profile__%5.2f--%5.2f.dat" % (output_dir, mag_min, mag_max), "w")
        star_id = 0
        for ota in otas:

            in_this_ota = selected_cat[:,columns.index('OTA')] == ota
            ota_cat = selected_cat[in_this_ota]
            
            ota_extname = "OTA%02d.SCI" % (ota)
            data = hdulist[ota_extname].data.T

            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                wcs = astWCS.WCS(hdulist[ota_extname].header, mode='pyfits')
                pixelscale = wcs.getPixelSizeDeg() * 3600 # this is arcsec per pixel

            for star in range(ota_cat.shape[0]):
                star_id += 1
                fx = ota_cat[star,columns.index('X')] - 1.
                fy = ota_cat[star,columns.index('Y')] - 1.

                r, cutout = get_profile(data, center_x=fx, center_y=fy, 
                                        mx=0,  my=0, width=width, 
                                        mode='radial', 
                                        normalize=False,
                                    )
                # Subtract background as determined by Sextractor
                cutout -= ota_cat[star, columns.index('BACKGROUND')]

                # convert magnitude into raw flux, and scale star by flux
                inst_mag = ota_cat[star, -1] - hdulist[0].header['PHOTZP_X'] 
                flux = math.pow(10, -0.4*inst_mag)
                cutout /= flux

                print >>profile_file
                print >>profile_file, "# Source % 4d of % 4d:" % (star_id, selected_cat.shape[0])
                print >>profile_file, "# Ra/Dec: %13.7f %13.7f" % (
                    ota_cat[star,columns.index('RA')], ota_cat[star,columns.index('DEC')])
                print >>profile_file, "# X/Y:     %10.5f     %10.5f   @   OTA %d" % (
                    ota_cat[star,columns.index('X')], ota_cat[star,columns.index('Y')], ota_cat[star,columns.index('OTA')])
                
                within_width = r < width
                good_r = r[within_width]
                good_data = cutout[within_width]
                good_arcsec = good_r * pixelscale
                buffer = numpy.empty((good_r.shape[0], 3))
                buffer[:,0] = good_r
                buffer[:,1] = good_data
                buffer[:,2] = good_arcsec
                numpy.savetxt(profile_file, buffer)
                              # numpy.append(good_r, good_data, axis=1))
                              #numpy.append([within_width], cutout.reshape(-1,1)[within_width], axis=1))
                print >>profile_file

        profile_file.close()

    readme.close()
Exemplo n.º 12
0
def worker_slave(queue):
    """

    This function handles all work, either running collectcells locally or 
    remotely via ssh. Files to reduce are read from a queue.

    """

    logger = logging.getLogger("SAMPWorker")

    logger.info("Worker process started, ready for action...")

    if (not setup.use_ssh):
        # If we reduce frames locally, prepare the QR logging.
        options['clobber'] = False

    while (True):
        try:
            # print "\n\nWaiting for stuff to do\n\n"
            task = queue.get()
        except (KeyboardInterrupt, SystemExit) as e:
            # print "worker received termination notice"
            # Ignore the shut-down command here, and wait for the official
            # shutdown command from main task
            continue

        if (task is None):
            logger.info("Shutting down worker")
            queue.task_done()
            break

        filename, object_name, obsid = task

        logger.info("starting work on file %s" % (filename))

        ccopts = ""
        if (len(sys.argv) > 2):
            # There are some parameters to be forwarded to collectcells
            ccopts = " ".join(sys.argv[1:])
        # print "ccopts=",ccopts

        if (cmdline_arg_isset("-dryrun")):
            logger.info("DRYRUN: Sending off file %s for reduction" %
                        (filename))
            # print "task done!"
            queue.task_done()
            continue

        if (object_name.lower().find("focus") >= 0):
            #
            # This is most likely a focus exposure
            #
            n_stars = int(cmdline_arg_set_or_default("-nstars", 7))
            logger.info("New focus exposure to analyze (with %d stars)" %
                        (n_stars))

            if (setup.use_ssh):

                remote_inputfile = setup.translate_filename_local2remote(
                    filename)
                kw = {
                    'user': setup.ssh_user,
                    'host': setup.ssh_host,
                    'filename': remote_inputfile,
                    'podidir': setup.remote_podi_dir,
                    'outdir': setup.output_dir,
                    'nstars': n_stars,
                }
                ssh_command = "ssh %(user)s@%(host)s %(podidir)s/podi_focus.py -nstars=%(nstars)d %(filename)s %(outdir)s" % kw

                logger.info("Out-sourcing work to %(user)s@%(host)s via ssh" %
                            kw)
                process = subprocess.Popen(ssh_command.split(),
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
                _stdout, _stderr = process.communicate()
                process.stdout.close()
                process.stderr.close()
                try:
                    process.terminate()
                except:
                    pass

                #print "*"*80
                if (not _stdout == "" and _stdout is not None):
                    logger.info("Received STDOUT:\n%s" % (_stdout))
                    #print "*"*80
                if (not _stderr == "" and _stderr is not None):
                    logger.info("Received STDERR:\n%s" % (_stderr))
                    #print _stderr
                    #print "*"*80

            else:
                # Run locally
                logger.info("Analyzing focus sequence (%s) locally" %
                            (filename))
                podi_focus.get_focus_measurement(filename,
                                                 n_stars=n_stars,
                                                 output_dir=setup.output_dir)

            logger.info("Done with analysis")

            # Now check if we are supposed to open/display the focus plot
            if (setup.focus_display is not None):

                remote_filename = "%s/%s_focus.png" % (setup.output_dir, obsid)
                local_filename = setup.translate_filename_remote2local(
                    filename, remote_filename)

                cmd = "%s %s &" % (setup.focus_display, local_filename)
                logger.info("Opening and displaying plot")
                os.system(cmd)

        else:
            #
            # This is NOT a focus exposure
            #

            if (setup.use_ssh):

                # This is not a focus exposure, to treat it as a normal science exposure
                remote_inputfile = setup.translate_filename_local2remote(
                    filename)
                kw = {
                    'user': setup.ssh_user,
                    'host': setup.ssh_host,
                    'collectcells': setup.ssh_executable,
                    'options': ccopts,
                    'filename': remote_inputfile,
                    'outputfile': setup.output_format,
                }

                ssh_command = "ssh %(user)s@%(host)s %(collectcells)s %(filename)s %(outputfile)s %(options)s -noclobber" % kw

                process = subprocess.Popen(ssh_command.split(),
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.PIPE,
                                           close_fds=True)
                _stdout, _stderr = process.communicate()
                process.stdout.close()
                process.stderr.close()
                try:
                    process.terminate()
                except:
                    pass

                #print "*"*80
                if (not _stdout == "" and _stdout is not None):
                    logger.info("Received STDOUT:\n%s" % (_stdout))
                    #print "*"*80
                if (not _stderr == "" and _stderr is not None):
                    logger.info("Received STDERR:\n%s" % (_stderr))
                    #print _stderr
                    #print "*"*80

            else:
                logger.info("Running collectcells (%s)" % (filename))
                podi_collectcells.collectcells_with_timeout(
                    input=filename,
                    outputfile=setup.output_format,
                    options=options,
                    timeout=300,
                    process_tracker=process_tracker)

            #
            # If requested, also send the command to ds9
            #
            local_filename = setup.translate_filename_remote2local(
                filename, setup.output_format)
            if (cmdline_arg_isset("-forward2ds9")):
                forward2ds9_option = cmdline_arg_set_or_default(
                    "-forward2ds9", "image")
                if (forward2ds9_option == "irafmosaic"):
                    cmd = "mosaicimage iraf %s" % (local_filename)
                else:
                    cmd = "fits %s" % (local_filename)

                logger.info("Forwarding file to ds9")
                logger.debug("filename: %s" % (filename))
                logger.debug("remote file: %s" % (remote_inputfile))
                logger.debug("local file: %s" % (local_filename))

                try:
                    cli1 = sampy.SAMPIntegratedClient(metadata=metadata)
                    cli1.connect()
                    cli1.enotify_all(mtype='ds9.set', cmd='frame 2')
                    cli1.enotify_all(mtype='ds9.set', cmd='scale scope global')
                    cli1.enotify_all(mtype='ds9.set', cmd=cmd)
                    cli1.disconnect()
                except Exception as err:
                    logger.error("Problems sending message to ds9: %s" % err)
                    podi_logging.log_exception()
                    pass

            # By default, also open the psf diagnostic plot, if available
            psf_plot_fn = local_filename[:-5] + ".psf.png"
            if (os.path.isfile(psf_plot_fn)):
                cmd = "%s %s &" % (setup.focus_display, psf_plot_fn)
                logger.info("Opening and displaying PSF diagnostic plot (%s)" %
                            (psf_plot_fn))
                os.system(cmd)

        #
        # Once the file is reduced, mark the current task as done.
        #
        logger.info("task done!")
        queue.task_done()

    print("Terminating worker process...")

    return
def combine_pupilghost_slices(out_filename, filelist, op='sigclipmean'):

    logger = logging.getLogger("CombinePG")

    print filelist
    
    # 
    # Gather information about rotation angles and center positions
    # Also collect the association data.
    #
    rotangles = numpy.ones(len(filelist)) * -9999
    headers = [None] * len(filelist)

    #
    # Prepare the association data
    #
    assoc_table = {}
    logger.info("Reading data from input files")

    for idx, fn in enumerate(filelist):
        hdulist = pyfits.open(fn)
        
        # Read the center and angle keywords from the "COMBINED" extension
        comb_hdu = hdulist['COMBINED']
        _ra = comb_hdu.header['ROTANGLE']
        rotangles[idx] = _ra if _ra > 0 else _ra + 360.
        headers[idx] = comb_hdu.header
        logger.debug("%s: %.2f" % (fn, _ra))
        
        # comb_hdu.header['CNTRF%03d' % (norm_angle)] = (centerf_str[:-1], "PG center, fixed r [px]")
        # comb_hdu.header['CNTRV%03d' % (norm_angle)] = (centerv_str[:-1], "PG center, var. r [px]")
        # comb_hdu.header['ALPHA%03d' % (norm_angle)] = (d_angle_str[:-1], "OTA angle [arcmin]")
        # comb_hdu.header['OTAORDER'] = ota_str[:-1]

        this_assoc = {'pupilghost-slice': fn }
        assoc_table = podi_associations.collect_reduction_files_used(assoc_table, this_assoc)

        # Read the assocation table of this frame
        in_assoc  = podi_associations.read_associations(hdulist)
        if (in_assoc is not None):
            logger.debug("Found assocations:\n%s" % (str(in_assoc)))
            assoc_table = podi_associations.collect_reduction_files_used(assoc_table, in_assoc)

#    return

    #
    # Now sort the rotator angles from smallest to largest
    #
    angle_sort = numpy.argsort(rotangles)

    #
    # Combine all frames
    #
    logger.info("Stacking all slices into master pupilghost template")
    combined_hdulist = podi_imcombine.imcombine(
        filelist, 
        outputfile=None,
        operation=op, #nanmean.bn',
        return_hdu=True,
        subtract=None, scale=None)

    print combined_hdulist
    combined = combined_hdulist['COMBINED']

    combined.header['STACK_OP'] = op

    #
    # Add the sorted keywords back into the resulting file
    #
    logger.info("Adding metadata")
    primhdu = pyfits.PrimaryHDU()
    
    try:
        prev_hdr = None
        for header, label in [
                ('CNTRF%03d', "center, fixed radius"), 
                ('CNTRV%03d', "center, var. radius"),
                ('ALPHA%03d', "angle mismatch [arcmin]"),
                ('NORM_%03d', "sector normalizations"),
                
        ]:
            first_hdr = None
            for i in range(rotangles.shape[0]):
                idx = angle_sort[i]
                rotangle = rotangles[idx]
                round_angle = headers[idx]['RNDANGLE']
                keyname = header % round_angle
                #logger.info("Adding key: %s" % (keyname))
                combined.header[keyname] = headers[idx][keyname]
                #primhdu.header[keyname] = headers[idx][keyname]
                first_hdr = keyname if first_hdr is None else first_hdr
                if (prev_hdr is None):
                    print "adding header",keyname," somewhere"
                    primhdu.header.append((keyname, headers[idx][keyname]))
                    prev_hdr = keyname
                else:
                    print "adding header",keyname,"after",prev_hdr
                    primhdu.header.insert(prev_hdr, (keyname, headers[idx][keyname]), after=True)
                prev_hdr = keyname

            add_fits_header_title(primhdu.header, label, first_hdr)

        combined.header['OTAORDER'] = headers[0]['OTAORDER']
    except:
        podi_logging.log_exception()
        pass

    print assoc_table
    assoc_hdu = podi_associations.create_association_table(assoc_table)

    out_hdulist = [primhdu, combined, assoc_hdu]
    for name in ['PROFILE', 'RAWPROFILE']:
        try:
            logger.info("Adding in extension %s" % (name))
            out_hdulist.append(combined_hdulist[name])
        except:
            logger.warning("Unable to find extension %s" % (name))
            pass

    combined_hdulist.writeto("dummy.fits", clobber=True)

    logger.info("Writing output to %s" % (out_filename))
    out_hdulist = pyfits.HDUList(out_hdulist) #[primhdu, combined, assoc_hdu])
    out_hdulist.writeto(out_filename, clobber=True)

    logger.info("Work complete!")
Exemplo n.º 14
0
def imcombine(input_filelist,
              outputfile,
              operation,
              return_hdu=False,
              subtract=None,
              scale=None,
              gather_all_otas=True):

    logger = logging.getLogger("ImCombine")

    # First loop over all filenames and make sure all files exist
    filelist = []
    for file in input_filelist:
        if (os.path.isfile(file)):
            filelist.append(file)

    if (len(filelist) <= 0):
        logger.error(
            "No existing files found in input list, hence nothing to do!\n")
        return None

    logger.debug("Stacking the following files:\n -- %s" %
                 ("\n -- ".join(filelist)))

    # elif (len(filelist) == 1):
    #     # stdout_write("Only 1 file to combine, save the hassle and copy the file!\n")
    #     hdulist = pyfits.open(filelist[0])
    #     if (return_hdu):
    #         return hdulist
    #     hdulist.writeto(outputfile, clobber=True)
    #     return

    # Read the input parameters
    # Note that file headers are copied from the first file
    reference_filename = filelist[0]
    ref_hdulist = pyfits.open(reference_filename)

    # Create the primary extension of the output file
    # Copy all headers from the reference HDU
    primhdu = pyfits.PrimaryHDU(header=ref_hdulist[0].header)

    # Add PrimaryHDU to list of OTAs that go into the output file
    out_hdulist = [primhdu]

    ref_hdulist.close()
    del ref_hdulist

    #
    # Compile a list of OTAs that will be in the output frame
    #
    otas_found = []
    ota_sizes = {}
    ref_header = {}
    if (gather_all_otas):
        logger.info(
            "Checking all files to compile comprehensive list of available OTAs"
        )
    master_associations = None
    for fn in filelist:
        hdulist = pyfits.open(fn)
        for ext in hdulist:
            if (is_image_extension(ext)):
                try:
                    otas_found.append(ext.name)
                    if (ext.name not in ota_sizes):
                        ota_sizes[ext.name] = ext.data.shape
                        ref_header[ext.name] = ext.header
                except:
                    podi_logging.log_exception()

        #
        # Collect all individual association tables to create
        # a master association table combining all input data
        #
        assoc_table = podi_associations.read_associations(hdulist)
        if (assoc_table is None):
            logger.info("No association data available")
            assoc_table = {'input_simple': [fn]}
        if (master_associations is None):
            master_associations = assoc_table
        else:
            master_associations = podi_associations.collect_reduction_files_used(
                master_associations, assoc_table)

        hdulist.close()
        del hdulist
        if (not gather_all_otas):
            break

    otas_to_combine = set(otas_found)
    ota_counter = collections.Counter(otas_found)
    #print otas_to_combine
    #print ota_counter

    #
    # Now loop over all extensions and compute the mean
    #
    # for cur_ext in range(0, len(ref_hdulist)):
    for cur_ext, extname in enumerate(otas_to_combine):

        data_blocks = []
        # Check what OTA we are dealing with
        # if (not is_image_extension(ref_hdulist[cur_ext])):
        #     continue
        # ref_fppos = ref_hdulist[cur_ext].name #header['EXTNAME']

        #stdout_write("\rCombining frames for OTA %s (#% 2d/% 2d) ..." % (ref_fppos, cur_ext+1, len(ref_hdulist)))
        #logger.debug("Combining frames for OTA %s (#% 2d/% 2d) ..." % (ref_fppos, cur_ext+1, len(ref_hdulist)))
        logger.info("Combining frames for OTA %s (#% 2d/% 2d) ..." %
                    (extname, cur_ext + 1, len(otas_to_combine)))

        #
        # Add some weird-looking construct to move the memory allocation and actual
        # imcombine into a separate process. This has to do with if/how/when python releases
        # memory (or not), causing a massive short-term memory leak.
        # With the additional process, the memory is onwed by the other process and the memory
        # is freed once we destroy this helper process.
        #
        return_queue = multiprocessing.JoinableQueue()
        #worker_args=(ref_fppos, filelist, ref_hdulist[cur_ext].data.shape, operation, return_queue, verbose)

        kw_args = {
            'extension':
            extname,
            'filelist':
            filelist,
            'shape': (ota_sizes[extname][0], ota_sizes[extname][1],
                      ota_counter[extname]),  #ref_hdulist[cur_ext].data.shape,
            'operation':
            operation,
            'queue':
            return_queue,
            'verbose':
            verbose,
            'subtract':
            subtract,
            'scale':
            scale,
        }
        # p = multiprocessing.Process(target=imcombine_subprocess, args=worker_args)
        p = multiprocessing.Process(target=imcombine_subprocess,
                                    kwargs=kw_args)
        p.start()
        combined = return_queue.get()
        p.terminate()
        del p

        if (verbose): stdout_write(" done, creating fits extension ...")
        logger.debug("done with computing, creating fits extension ...")
        # Create new ImageHDU, insert the imcombined's data and copy the
        # header from the reference frame
        hdu = pyfits.ImageHDU(data=combined, header=ref_header[extname])

        # Append the new HDU to the list of result HDUs
        out_hdulist.append(hdu)

        if (verbose): stdout_write(" done\n")

    #
    # At this point, add a fits table listing all filenames that went into this combination
    #
    filenames_only = []
    for file in filelist:
        dirname, filename = os.path.split(file)
        filenames_only.append(filename)

    logger.debug("Adding association data to output file")

    out_hdulist[0].header["NCOMBINE"] = (len(filenames_only),
                                         "number of combined files")

    columns = [
        pyfits.Column(name='filenumber',
                      format="I4",
                      array=range(1,
                                  len(filenames_only) + 1)),
        pyfits.Column(name='filename', format="A100", array=filenames_only),
    ]
    coldefs = pyfits.ColDefs(columns)
    tablehdu = pyfits.BinTableHDU.from_columns(coldefs)
    tablehdu.header["EXTNAME"] = "FILELIST"
    out_hdulist.append(tablehdu)

    assoc_tbhdu = podi_associations.create_association_table(
        master_associations)
    out_hdulist.append(assoc_tbhdu)

    #
    # All work done now, prepare to return the data or write it to disk
    #
    out_hdu = pyfits.HDUList(out_hdulist)
    if (not return_hdu and outputfile != None):
        logger.debug(" writing results to file %s ..." % (outputfile))
        clobberfile(outputfile)
        try:
            out_hdu.writeto(outputfile, clobber=True, checksum=True)
        except TypeError:
            # this most likely is this error:
            # TypeError: object of type 'NoneType' has no len()
            # related to the checksum calculation
            clobberfile(outputfile)
            out_hdu.writeto(outputfile, clobber=True)
        except pyfits.VerifyError:
            logger.warning(
                "Encountered FITS verification error, writing anyway")
            try:
                out_hdu.writeto(outputfile,
                                clobber=True,
                                checksum=False,
                                output_verify='ignore')
            except:
                raise
        except:
            raise
        out_hdu.close()
        del out_hdu
        del out_hdulist
        stdout_write(" done!\n")
    elif (return_hdu):
        logger.debug(" returning HDU for further processing ...")
        return out_hdu
    else:
        logger.debug(" couldn't write output file, no filename given!")

    return None
Exemplo n.º 15
0
def imcombine_sharedmem_data(shmem_buffer, operation, sizes):

    size_x, size_y, n_frames = sizes
    shmem_results = SharedMemory(ctypes.c_float, (size_x, size_y))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y)

    logger = logging.getLogger("CombineMgr")

    #
    # Set up the parallel processing environment
    #
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.Queue()

    # Now compute median/average/sum/etc
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()
    for line in range(buffer.shape[0]):
        #print "Adding line",line,"to queue"
        queue.put(line)

    lines_done = numpy.zeros((buffer.shape[0]), dtype=numpy.bool)
    lines_read = 0

    #result_buffer = numpy.zeros(shape=(buffer.shape[0], buffer.shape[1]), dtype=numpy.float32)
    processes = []
    for i in range(number_cpus):
        worker_args = (queue, return_queue, shmem_buffer, shmem_results,
                       size_x, size_y, n_frames, operation)
        p = multiprocessing.Process(target=parallel_compute, args=worker_args)
        p.start()
        processes.append(p)

    while (lines_read < buffer.shape[0]
           and numpy.sum(lines_done) < buffer.shape[0]):
        try:
            line = return_queue.get(timeout=5)
            lines_read += 1
            try:
                lines_done[line] = True
            except:
                pass
        except Queue.Empty:
            logger.error("Encountered timeout while combinging data")
            # something bad has happened to one of the workers
            # find one of the lines that has not been processed yet
            missing_lines = (numpy.arange(buffer.shape[0]))[~lines_done]
            logger.info("Re-queuing %d lines for processing" %
                        (missing_lines.shape[0]))
            for line in missing_lines:
                queue.put(line)
        except:
            podi_logging.log_exception()

    # Tell all workers to shut down when no more data is left to work on
    logger.debug("telling all workers to shut down!")
    for i in range(number_cpus):
        logger.debug("telling all worker %d to shut down!" % (i))
        queue.put((None))

    # Once all command are sent out to the workers, join them to speed things up
    logger.debug("Terminating workers!")
    for p in processes:
        p.terminate()
        p.join(timeout=1)

    results = numpy.copy(
        shmem_results.to_ndarray())  #).reshape((size_x, size_y)))
    shmem_results.free()

    del shmem_results
    del queue
    del buffer

    return results
Exemplo n.º 16
0
def create_saturation_catalog(filename,
                              output_dir,
                              verbose=True,
                              mp=False,
                              redo=False,
                              saturation_limit=65535):
    """
    Create catalogs listing all saturated pixels to enable handling saturation
    and persistency effects later-on.

    The main purpose of this file is to call create_saturation_catalog_ota,
    possibly wrapped for with mp_create_saturation_table for parallel
    processing.

    Parameters
    ----------
    filename : string
    
        One file of the exposure. This file is mainly used to obtain the
        necessary information to create all the other filenames for this
        exposure.

    output_dir : string

        Directory to hold all the saturation catalogs. This is the directory
        that will be fed into collectcells via the -persistency command line
        flag.

    mp : bool - not used

    redo : bool

        Recreate the saturation catalog if it already exists

    Returns
    -------

    """

    logger = logging.getLogger("CreateSaturationCatalog")
    logger.info("Creating saturation mask for %s ..." % (filename))

    if (os.path.isfile(filename)):
        # This is one of the OTA fits files
        # extract the necessary information to generate the
        # names of all the other filenames
        try:
            hdulist = pyfits.open(filename)
        except IOError:
            logger.warning("\rProblem opening file %s...\n" % (filename))
            return
        except:
            podi_logging.log_exception()

        hdr_filename = hdulist[0].header['FILENAME']
        hdr_items = hdr_filename.split('.')
        basename = "%s.%s" % (hdr_items[0], hdr_items[1])
        hdulist.close()

        # Split the input filename to extract the directory part
        directory, dummy = os.path.split(filename)

    elif (os.path.isdir(filename)):
        # As a safety precaution, if the first parameter is the directory containing
        # the files, extract just the ID string to be used for this script
        if (filename[-1] == "/"):
            filename = filename[:-1]

        basedir, basename = os.path.split(filename)
        directory = filename

    else:
        logger.error("Input %s is neither a file nor a directory!" %
                     (filename))
        logger.error("Aborting operation due to illegal input.")
        return

    output_filename = "%s/%s.saturated.fits" % (output_dir, basename)
    logger.debug("Output saturation catalog: %s" % (output_filename))

    if (os.path.isfile(output_filename) and not redo):
        logger.debug("File (%s) exists, skipping!" % (output_filename))
        return

    # Setup parallel processing
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.JoinableQueue()
    #return_queue = multiprocessing.Queue()

    number_jobs_queued = 0
    first_fits_file = None
    ota_list = []

    for (ota_x, ota_y) in itertools.product(range(8), repeat=2):
        ota = ota_x * 10 + ota_y

        filename = "%s/%s.%02d.fits" % (directory, basename, ota)
        if (not os.path.isfile(filename)):
            filename = "%s/%s.%02d.fits.fz" % (directory, basename, ota)
            if (not os.path.isfile(filename)):
                continue

        queue.put((filename, saturation_limit))
        number_jobs_queued += 1

        # Remember the very first FITS file we find. This will serve as the primary HDU
        if (first_fits_file is None):
            # Create a primary HDU from the first found fits-file
            try:
                firsthdu = pyfits.open(filename)
            except IOError:
                logger.warning("Problem opening FITS file %s" % (filename))
                continue
            logger.debug("Copying general information from file %s" %
                         (filename))
            ota_list.append(pyfits.PrimaryHDU(header=firsthdu[0].header))
            firsthdu.close()
            firsthdu = None
            first_fits_file = filename

    if (first_fits_file is None):
        logger.warning("Couldn't find a valid FITS file, thus nothing to do")
        return

    # Now start all the workers
    logger.debug("Starting worker processes")
    processes = []
    for i in range(sitesetup.number_cpus):
        p = multiprocessing.Process(target=mp_create_saturation_catalog,
                                    args=(queue, return_queue, False))
        p.start()
        processes.append(p)
        time.sleep(0.01)

    # Tell all workers to shut down when no more data is left to work on
    logger.debug("Sending shutdown command to worker processes")
    for i in range(len(processes)):
        if (verbose): stdout_write("Sending quit command!\n")
        queue.put((None))

    logger.debug("Collecting catalogs for each OTA")
    for i in range(number_jobs_queued):
        if (verbose): print("reading return ", i)

        cat_name = return_queue.get()
        if (cat_name is not None):
            final_cat, extension_name = cat_name

            columns = [
                pyfits.Column(name='CELL_X', format='I', array=final_cat[:,
                                                                         0]),
                pyfits.Column(name='CELL_Y', format='I', array=final_cat[:,
                                                                         1]),
                pyfits.Column(name='X', format='I', array=final_cat[:, 2]),
                pyfits.Column(name='Y', format='I', array=final_cat[:, 3])
            ]
            # Create the table extension
            coldefs = pyfits.ColDefs(columns)
            tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
            tbhdu.name = extension_name
            ota_list.append(tbhdu)

        return_queue.task_done()

    # Join each process to make thre they terminate(d) correctly
    logger.debug("Joining process to ensure proper termination")
    for p in processes:
        p.join()

    hdulist = pyfits.HDUList(ota_list)
    output_filename = "%s/%s.saturated.fits" % (output_dir, basename)
    clobberfile(output_filename)
    logger.debug("Writing output file %s" % (output_filename))
    hdulist.writeto(output_filename, overwrite=True)

    logger.debug("all done!")
    return
Exemplo n.º 17
0
def create_saturation_catalog_ota(filename,
                                  output_dir,
                                  verbose=True,
                                  return_numpy_catalog=False,
                                  saturation_limit=65535):
    """
    Create a saturation table for a given OTA exposure.

    Parameters
    ----------
    filename : string
    
        Filename of the OTA FITS file.

    output_dir : string

        If return_numpy_catalog is not set, write the saturation catalog into
        this directory.

    return_numpy_catalog : bool

        If set, return the results as numpy array instead of writing individual
        files to disk.
    

    Returns
    -------
    None - if no saturated pixels are found in this frame
    
    ndarray, extname - if return_numpy_catalog is set

    Nothing - if return_numpy_array is not set

    """

    logger = logging.getLogger("OTASatCat")

    # Open filename
    logger.debug("Input filename: %s" % (filename))

    try:
        hdulist = pyfits.open(filename)
    except IOError:
        logger.debug("Can't open file %s" % (filename))
        return None
    except:
        podi_logging.log_exception()
        return None

    mjd = hdulist[0].header['MJD-OBS']
    obsid = hdulist[0].header['OBSID']
    ota = int(hdulist[0].header['FPPOS'][2:4])
    datatype = hdulist[0].header['FILENAME'][0]

    logger = logging.getLogger("CreateSatCat: %s, OTA %02d" % (obsid, ota))
    logger.debug("Starting work")

    full_coords = numpy.zeros(shape=(0, 4))  #, dtype=numpy.int16)
    saturated_pixels_total = 0

    for ext in range(1, len(hdulist)):
        if (not is_image_extension(hdulist[ext])):
            continue

        # Find all saturated pixels (values >= 65K)
        data = hdulist[ext].data
        saturated = (data >= saturation_limit)
        # print hdulist[ext].header['EXTNAME'], data.shape, numpy.max(data)

        # Skip this cell if no pixels are saturated
        number_saturated_pixels = numpy.sum(saturated)
        if (number_saturated_pixels <= 0):
            continue

        saturated_pixels_total += number_saturated_pixels

        wn_cellx = hdulist[ext].header['WN_CELLX']
        wn_celly = hdulist[ext].header['WN_CELLY']

        # logger.debug("number of saturated pixels in cell %d,%d: %d" % (wn_cellx, wn_celly, number_saturated_pixels))

        # Do some book-keeping preparing for the masking
        rows, cols = numpy.indices(data.shape)

        saturated_rows = rows[saturated]
        saturated_cols = cols[saturated]

        #print saturated_rows.shape, saturated_cols.shape

        coordinates = numpy.zeros(shape=(number_saturated_pixels, 4))
        coordinates[:, 0] = wn_cellx
        coordinates[:, 1] = wn_celly
        coordinates[:, 2] = saturated_cols[:]
        coordinates[:, 3] = saturated_rows[:]

        full_coords = numpy.append(
            full_coords, coordinates,
            axis=0)  #coordinates if full_coords == None else

    final_cat = numpy.array(full_coords, dtype=numpy.dtype('int16'))

    if (saturated_pixels_total <= 0):
        logger.debug("No saturated pixels found, well done!")
        return None

    logger.debug("Found %d saturated pixels, preparing catalog" %
                 (saturated_pixels_total))
    # Now define the columns for the table
    columns = [\
        pyfits.Column(name='CELL_X', format='I', array=final_cat[:, 0]),
        pyfits.Column(name='CELL_Y', format='I', array=final_cat[:, 1]),
        pyfits.Column(name='X',      format='I', array=final_cat[:, 2]),
        pyfits.Column(name='Y',      format='I', array=final_cat[:, 3])
        ]
    # Create the table extension
    coldefs = pyfits.ColDefs(columns)
    tbhdu = pyfits.BinTableHDU.from_columns(coldefs)
    extension_name = "OTA%02d.SATPIX" % (ota)
    tbhdu.name = extension_name

    if (return_numpy_catalog):
        logger.debug("Returning results as numpy catalog")
        return final_cat, extension_name

    # Also copy the primary header into the new catalog
    primhdu = pyfits.PrimaryHDU(header=hdulist[0].header)

    # Create a HDUList for output
    out_hdulist = pyfits.HDUList([primhdu, tbhdu])

    # And create the output file
    output_filename = "%s/%s%s.%02d.saturated.fits" % (output_dir, datatype,
                                                       obsid, ota)
    stdout_write("Writing output: %s\n" % (output_filename))

    clobberfile(output_filename)
    out_hdulist.writeto(output_filename, overwrite=True)

    if (verbose):
        print("some of the saturated pixels:\n", final_cat[0:10, :])

    #numpy.savetxt("test", final_cat)
    #print full_coords.shape

    logger.debug("Retuning final FITS table catalog")
    return final_cat
Exemplo n.º 18
0
def get_focus_measurement(filename, n_stars=5, output_dir="./", mp=False):

    """

    Parameters
    ----------
    filename : string
    
        One file of the exposure. This file is mainly used to obtain the
        necessary information to create all the other filenames for this
        exposure.

    output_dir : string

        Directory to hold all the saturation catalogs. This is the directory
        that will be fed into collectcells via the -persistency command line
        flag.

    Returns
    -------

    """

    logger = logging.getLogger("MeasureFocus")
    logger.info("Starting focus measurement for %s (%d *)..." % (filename, n_stars))

    if (os.path.isfile(filename)):
        # This is one of the OTA fits files
        # extract the necessary information to generate the 
        # names of all the other filenames
        try:
            hdulist = pyfits.open(filename)
        except IOError:
            logger.warning("\rProblem opening file %s...\n" % (filename))
            return
        except:
            podi_logging.log_exception()


        hdr_filename = hdulist[0].header['FILENAME']
        hdr_items = hdr_filename.split('.')
        basename = "%s.%s" % (hdr_items[0], hdr_items[1])
        hdulist.close()

        # Split the input filename to extract the directory part
        directory, dummy = os.path.split(filename)

    elif (os.path.isdir(filename)):
        # As a safety precaution, if the first parameter is the directory containing 
        # the files, extract just the ID string to be used for this script
        if (filename[-1] == "/"):
            filename = filename[:-1]

        basedir, basename = os.path.split(filename)
        directory = filename


    # Setup parallel processing
    queue        = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.JoinableQueue()
        
    number_jobs_queued = 0
    obsid = None
    for (ota_x, ota_y) in itertools.product(range(8), repeat=2):
        ota = ota_x * 10 + ota_y

        filename = "%s/%s.%02d.fits" % (directory, basename, ota)

        if (not os.path.isfile(filename)):
            filename = "%s/%s.%02d.fits.fz" % (directory, basename, ota)
            if (not os.path.isfile(filename)):
                continue

        logger.debug("Adding file %s to task list" % (filename))
        if (obsid is None):
            hdulist = pyfits.open(filename)
            obsid = hdulist[0].header['OBSID']
            hdulist.close()

        queue.put( (filename, n_stars) )
        number_jobs_queued += 1
        # break

    # Now start all the workers
    logger.debug("Starting worker processes")
    processes = []
    for i in range(sitesetup.number_cpus):
        p = multiprocessing.Process(target=mp_measure_focus, args=(queue, return_queue, False))
        p.start()
        processes.append(p)
        time.sleep(0.01)
        
        # Tell all workers to shut down when no more data is left to work on
        queue.put( (None) )

    logger.info("Collecting catalogs for each OTA")
    all_foci = None
    real_numbers = False
    for i in range(number_jobs_queued):
        returned = return_queue.get()
        if (returned is None):
            continue

        focus_positions, found_real_numbers = returned
        if (found_real_numbers):
            real_numbers = True

        logger.debug("Received %d focus positions" % (focus_positions.shape[0]))

        all_foci = focus_positions if all_foci is None else numpy.append(all_foci, focus_positions, axis=0)
         #print cat_name

        return_queue.task_done()

    # Join each process to make sure they terminate(d) correctly
    logger.debug("Joining process to ensure proper termination")
    for p in processes:
        p.join()

    #print all_foci, all_foci.ndim, all_foci.shape
    if (all_foci is None or
        all_foci.ndim < 2 or
        all_foci.shape[0] <= 0):
        logger.error("Couldn't find any star patterns!")
        return

    logger.info("Found a grand total of %d focus positions" % (all_foci.shape[0]))
    # print all_foci.shape

    with open("allfoci", "w") as f:
        for s in range(all_foci.shape[0]):
            numpy.savetxt(f, all_foci[s,:,:])
            print >>f, "\n"
    # numpy.savetxt("allfoci", all_foci.reshape((-1,all_foci.shape[2])))

    #
    # Eliminate all focus measurements that could be affected by the low-light 
    # CTE problem
    #
    min_background = numpy.min(all_foci[:,:,SXFocusColumn['background']], axis=1)
    detector_lots = all_foci[:,0,SXFocusColumn['ota_lot']]
    bad = (detector_lots < 7) & (min_background < 100)
    logger.info("Excluding %d focus samples that might have CTE issues" % (numpy.sum(bad)))
    all_foci = all_foci[~bad]
    
    stats = get_mean_focuscurve(all_foci)
    pfit, uncert, fwhm_median, fwhm_std, fwhm_cleaned, best_focus_position, best_focus = stats

    plotfilename = "%s/%s_focus.png" % (output_dir, obsid)
    create_focus_plot(all_foci, stats, basename, plotfilename, real_numbers)

    logger.debug("all done!")
    return
Exemplo n.º 19
0
def imcombine_sharedmem_data(shmem_buffer, operation, sizes):

    size_x, size_y, n_frames = sizes
    shmem_results = SharedMemory(ctypes.c_float, (size_x,size_y))
    # multiprocessing.RawArray(ctypes.c_float, size_x*size_y)

    logger = logging.getLogger("CombineMgr")

    #
    # Set up the parallel processing environment
    #
    queue = multiprocessing.JoinableQueue()
    return_queue = multiprocessing.Queue()

    # Now compute median/average/sum/etc
    # buffer = shmem_as_ndarray(shmem_buffer).reshape((size_x, size_y, n_frames))
    buffer = shmem_buffer.to_ndarray()
    for line in range(buffer.shape[0]):
        #print "Adding line",line,"to queue"
        queue.put(line)
        
    lines_done = numpy.zeros((buffer.shape[0]), dtype=numpy.bool)
    lines_read = 0

    #result_buffer = numpy.zeros(shape=(buffer.shape[0], buffer.shape[1]), dtype=numpy.float32)
    processes = []
    for i in range(number_cpus):
        worker_args = (queue, return_queue,
                       shmem_buffer, shmem_results,
                       size_x, size_y, n_frames, operation)
        p = multiprocessing.Process(target=parallel_compute, args=worker_args)
        p.start()
        processes.append(p)

    while (lines_read < buffer.shape[0] and numpy.sum(lines_done) < buffer.shape[0]):
        try:
            line = return_queue.get(timeout=5)
            lines_read += 1
            try:
                lines_done[line] = True
            except:
                pass
        except Queue.Empty:
            logger.error("Encountered timeout while combinging data")
            # something bad has happened to one of the workers
            # find one of the lines that has not been processed yet
            missing_lines = (numpy.arange(buffer.shape[0]))[~lines_done]
            logger.info("Re-queuing %d lines for processing" % (missing_lines.shape[0]))
            for line in missing_lines:
                queue.put(line)
        except:
            podi_logging.log_exception()

       
        
    # Tell all workers to shut down when no more data is left to work on
    logger.debug("telling all workers to shut down!")
    for i in range(number_cpus):
        logger.debug("telling all worker %d to shut down!" % (i))
        queue.put((None))

    # Once all command are sent out to the workers, join them to speed things up
    logger.debug("Terminating workers!")
    for p in processes:
        p.terminate()
        p.join(timeout=1)

    results = numpy.copy(shmem_results.to_ndarray()) #).reshape((size_x, size_y)))
    shmem_results.free()

    del shmem_results
    del queue
    del buffer

    return results
Exemplo n.º 20
0
    #
    grpids = {}
    print "Gathering exposure groupings"
    for ext in hdulist[1:]:
        grpid = ext.header['COGRPID']
        extname = ext.name

        if not grpid in grpids:
            grpids[grpid] = []

        grpids[grpid].append(extname)
        
    print "Found %d groups" % (len(grpids))


    # Now go through each GRPID, and extract the relevant extensions to a separate file
    print "Extracting and stacking individual objects"
    for idx, grpid in enumerate(grpids):
        try:
            do_work(hdulist, grpids, grpid)
        except:
            podi_logging.log_exception()
            logger.error("There was a problem processing %d" % (grpid))
            pass

        break
        if (idx > 5):
            break

    podi_logging.shutdown_logging(options)
Exemplo n.º 21
0
def measure_focus_ota(filename, n_stars=5):
    """

    Obtain a focus measurement from teh specified filename. To do so,

    1) run source extractor to get FWHM measurements and positions for all sources

    2) group sources into vertical sequences as expected from the function 
       of the focus tool

    3) assign physical focus positions to each measurement

    4) return final catalog back to master so a final, combined focus curve
       can be assembled.

    """

    # print"\n\n\nworking on file ",filename,"\n\n\n"

    try:
        hdulist = pyfits.open(filename)
    except IOError:
        logger.debug("Can't open file %s" % (filename))
        return None
    except:
        podi_logging.log_exception()
        return None

    obsid = hdulist[0].header['OBSID']
    ota = hdulist[1].header['WN_OTAX'] * 10 + hdulist[1].header['WN_OTAY']
    ota_id = hdulist[0].header['OTA_ID']

    logger = logging.getLogger("MeasureFocusOTA: %s(%02d)" % (obsid, ota))
    logger.info("Starting work ...")

    obsid = hdulist[0].header['OBSID']
    ota = int(hdulist[0].header['FPPOS'][2:4])

    # Check the object name to see if if contains the information about the exposure
    focus_positions = numpy.arange(n_stars)[::-1] + 1.
    real_focus_positions = False
    object_name = hdulist[0].header['OBJECT']
    if (object_name.startswith("Focus Center")):
        # This looks like it might be the right format
        try:
            items = object_name.split()
            # Check all items
            if (len(items) == 7 and items[0] == "Focus"
                    and items[1] == "Center" and items[3] == "NStep"
                    and items[5] == "DStep"):
                n_stars = int(items[4])
                focus_center = float(items[2])
                focus_step = float(items[6])
                focus_start = focus_center - (n_stars - 1) / 2 * focus_step
                focus_positions = numpy.arange(
                    n_stars,
                    dtype=numpy.float32)[::-1] * focus_step + focus_start
                logger.debug(
                    "Infom from header: N=%d, center=%.0f, step=%.0f, start=%.0f"
                    % (n_stars, focus_center, focus_Step, focus_step,
                       focus_start))
                logger.debug("Focus positions: %s" % (str(focus_positions)))
                real_focus_positions = True
        except:
            pass

    # Run SourceExtractor on the file
    sex_config = "%s/config/focus.sexconf" % (sitesetup.exec_dir)
    sex_param = "%s/config/focus.sexparam" % (sitesetup.exec_dir)
    catfile = "%s/tmp.%s_OTA%02d.cat" % (sitesetup.scratch_dir, obsid, ota)
    sex_cmd = "%(sexcmd)s -c %(sex_config)s -PARAMETERS_NAME %(sex_param)s -CATALOG_NAME %(catfile)s %(filename)s" % {
        "sexcmd": sitesetup.sextractor,
        "sex_config": sex_config,
        "sex_param": sex_param,
        "catfile": catfile,
        "filename": filename,
        "redirect": sitesetup.sex_redirect,
    }
    # print "\n"*10,sex_cmd,"\n"*10
    # Run source extractor
    # catfile = "/tmp//tmp.pid4383.20121008T221836.0_OTA33.cat"

    if (not os.path.isfile(catfile)):
        logger.debug("Running source extractor to search for stars")
        start_time = time.time()
        try:
            ret = subprocess.Popen(sex_cmd.split(),
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
            (sex_stdout, sex_stderr) = ret.communicate()
            if (ret.returncode != 0):
                logger.warning(
                    "Sextractor might have a problem, check the log")
                logger.info("Stdout=\n" + sex_stdout)
                logger.info("Stderr=\n" + sex_stderr)
        except OSError as e:
            podi_logging.log_exception()
            print >> sys.stderr, "Execution failed:", e
        end_time = time.time()
        logger.debug("SourceExtractor finished after %.2f seconds" %
                     (end_time - start_time))
    else:
        logger.debug("Source catalog already exists, re-using the old file")

    #
    # delete the tmp catalog
    #

    #
    # Load the source catalog.
    # Handle cases of non-existing or empty catalogs
    #
    logger.debug("loading the source catalog from %s" % (catfile))
    try:
        source_cat = numpy.loadtxt(catfile)
    except IOError:
        logger.warning("The Sextractor catalog is empty, ignoring this OTA")
        source_cat = None
        return None
    if (source_cat.shape[0] <= 0):
        # no sources found
        return None

    # print "\n\n total sources in raw file",source_cat.shape,"\n\n"
    logger.debug("Found %d sources in raw SourceExtractor catalog %s" %
                 (source_cat.shape[0], catfile))

    #
    # Now convert all X/Y values to proper OTA X/Y coordinates based on their
    # extension number
    #
    corr_cat = None
    # print "extensions:", source_cat[:,SXFocusColumn['extension']]

    for i in range(len(hdulist)):
        if (not is_image_extension(hdulist[i])):
            # print "skipping extension",i,", this is not an image extension"
            continue

        # get cell_x, cell_y from this extension
        cell_x = hdulist[i].header['WN_CELLX']
        cell_y = hdulist[i].header['WN_CELLY']
        x1, c2, y1, y2 = cell2ota__get_target_region(cell_x, cell_y, 1)

        # Create a mask for all sources in this cell
        in_this_cell = (source_cat[:, SXFocusColumn['extension']] == i)
        if (numpy.sum(in_this_cell) <= 0):
            logger.debug("Couldn't find any sources in cell %d,%d" %
                         (cell_x, cell_y))
            continue

        # print "found",numpy.sum(in_this_cell),"sources for cell",cell_x, cell_y, "  adding", x1, y1

        cell_cat = source_cat[in_this_cell]
        cell_cat[:, SXFocusColumn['x']] += x1
        cell_cat[:, SXFocusColumn['y']] += y1

        #
        # get overscan-level data for this cell
        #
        binning = get_binning(hdulist[i].header)
        overscan_data = extract_biassec_from_cell(hdulist[i].data, binning)
        overscan_level = numpy.mean(overscan_data)
        cell_cat[:, SXFocusColumn['background']] -= overscan_level

        corr_cat = cell_cat if corr_cat is None else numpy.append(
            corr_cat, cell_cat, axis=0)

    #
    # Attach to each measurement what detector lot it came from
    #
    fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist)
    detector_lot = fpl.get_detector_generation(ota_id)
    corr_cat[:, SXFocusColumn['ota_lot']] = detector_lot

    #
    # Also override the extension number in the source catalog with the
    # position in the overall focal plane
    #
    corr_cat[:, SXFocusColumn['extension']] = ota

    # print "\n\n\n\ntotal corrected catalog:",corr_cat.shape
    # save the source catalog
    #numpy.savetxt("focus_cat.ota%02d" % (ota), source_cat)
    #numpy.savetxt("focus_cat2.ota%02d" % (ota), corr_cat)
    logger.debug("done fixing the pixel coordinates")

    # only select bright enough sources
    bright_enough = corr_cat[:, SXFocusColumn['mag_auto']] < -10
    corr_cat = corr_cat[bright_enough]
    #numpy.savetxt("focus_cat3.ota%02d" % (ota), corr_cat)

    #dummy_test = open("dummy.test", "w")
    # Now try to match up stars in a sequence
    all_angles, all_distances = [], []
    for s1 in range(corr_cat.shape[0]):
        # Assume this is the middle star in the sequence

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:, SXFocusColumn['x']] - corr_cat[s1, 2]
        dy = corr_cat[:, SXFocusColumn['y']] - corr_cat[s1, 3]
        d_total = numpy.hypot(dx, dy)

        in_cone = numpy.fabs(dx / dy) < 0.1

        # Need to be at most n_stars * 10'' and at least 5''
        close_enough = (d_total <
                        (n_stars * 10. / 0.11)) & (d_total > 5 / 0.11)
        good_so_far = in_cone & close_enough
        if (numpy.sum(good_so_far) <= 0):
            continue

        candidates = corr_cat[good_so_far]
        # if (numpy.sum(candidates) < n_stars):
        #     # Only use full sequences
        #     continue

        # are the magnitudes comparable
        delta_mag = numpy.fabs(candidates[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']])
        similar_brightness = delta_mag < 1
        #print >>dummy_test, "#", candidates.shape[0], numpy.sum(similar_brightness)
        #print similar_brightness
        if (numpy.sum(similar_brightness) <= 0):
            continue

        good_candidates = candidates[similar_brightness]

        # Now sort the data with increasing y values
        si = numpy.argsort(good_candidates[:, SXFocusColumn['y']])
        sorted_candidates = good_candidates[si]

        #numpy.savetxt(dummy_test, sorted_candidates)
        #print >>dummy_test, "\n\n\n"

        # Now compute the slope and distance between each point and each point
        # above it
        for p1, p2 in itertools.combinations(range(sorted_candidates.shape[0]),
                                             2):
            angle = numpy.arctan2(
                sorted_candidates[p1, 2] - sorted_candidates[p2, 2],
                sorted_candidates[p1, 3] - sorted_candidates[p2, 3])
            distance = numpy.sqrt(
                (sorted_candidates[p1, 2] - sorted_candidates[p2, 2])**2 +
                (sorted_candidates[p1, 3] - sorted_candidates[p2, 3])**2)
            all_angles.append(angle)
            all_distances.append(distance)

    #dummy_test.close()

    # Once we are through with the first iteration find the best-fitting angle

    all_angles = numpy.array(all_angles)
    all_distances = numpy.array(all_distances)

    # Find the best or rather most frequently occuring angle
    all_angles[all_angles < 0] += 2 * math.pi

    #numpy.savetxt("dummy.angles", all_angles)
    #numpy.savetxt("dummy.distances", all_distances)

    filtered_angles = three_sigma_clip(all_angles)
    if (filtered_angles is None or filtered_angles.ndim < 1
            or filtered_angles.shape[0] <= 0):
        return None

    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [16, 84])
    angle_width = scipy.stats.scoreatpercentile(filtered_angles, [5, 95])

    logger.debug(
        "Found median angle %f [%f ...%f]" %
        (numpy.degrees(numpy.median(filtered_angles)),
         numpy.degrees(angle_width[0]), numpy.degrees(angle_width[1])))

    #
    # Now we can do another proper search for all stars
    # This time, only search for complete series (#stars as specified)
    #
    #focus_stars = open("focus_stars", "w")
    all_candidates = []
    for s1 in range(corr_cat.shape[0]):

        # Find all stars above and below it in a cone
        # compute the angle to all other stars in the catalog
        dx = corr_cat[:, SXFocusColumn['x']] - corr_cat[s1, 2]
        dy = corr_cat[:, SXFocusColumn['y']] - corr_cat[s1, 3]

        angles = numpy.arctan2(dx, dy)
        angles[angles < 0] += 2 * math.pi

        d_total = numpy.hypot(dx, dy)
        #print numpy.degrees(angle_width), numpy.degrees(angles)

        #print angle_width[0], angle_width[1]
        in_cone1 = (angles > angle_width[0]) & (angles < angle_width[1])
        in_cone2 = (angles + math.pi > angle_width[0]) & (angles + math.pi <
                                                          angle_width[1])
        in_cone = in_cone1 | in_cone2
        # print angles[in_cone]
        #print angles[in_cone][0], angles[in_cone][0] > angle_width[0], angles[in_cone][0] < angle_width[1]
        close_enough = (d_total <
                        ((n_stars + 1) * 10. / 0.11)) & (d_total > 5 / 0.11)
        similar_brightness = numpy.fabs(corr_cat[:,SXFocusColumn['mag_auto']] \
                                            - corr_cat[s1,SXFocusColumn['mag_auto']]) < 1
        good = in_cone & close_enough & similar_brightness
        good[s1] = True
        # print s1, ":", numpy.sum(in_cone), numpy.sum(close_enough), numpy.sum(similar_brightness), numpy.sum(good)

        if (numpy.sum(good) <= 1):
            continue

        candidates = corr_cat[good]
        # print "# canddates =", candidates.shape[0]

        if (not candidates.shape[0] == n_stars):
            # Only use full sequences
            continue
            pass

        # print "found match:",s1

        # Now we have a set with the right number of stars, matching the overall
        # angle, and with similar brightnesses
        # sort them top to bottom
        si = numpy.argsort(candidates[:, 3])
        #numpy.savetxt(focus_stars, candidates[:,3])
        #numpy.savetxt(focus_stars, si)
        sorted_candidates = candidates[si]

        # print "XXX", sorted_candidates.shape, sorted_candidates[:,0].shape, focus_positions.shape, n_stars, candidates.shape[0]

        sorted_candidates[:, 0] = focus_positions
        #numpy.savetxt(focus_stars, sorted_candidates)
        #numpy.savetxt(focus_stars, numpy.degrees(angles[good]))
        #numpy.savetxt(focus_stars, in_cone[good])
        #numpy.savetxt(focus_stars, d_total[good])
        #numpy.savetxt(focus_stars, (corr_cat[:,10] - corr_cat[s1,10])[good])
        #print >>focus_stars, "\n\n\n\n"

        all_candidates.append(sorted_candidates)

    #focus_stars.close()

    all_candidates = numpy.array(all_candidates)
    logger.debug(str(all_candidates.shape))

    #xxx = open("steps", "w")
    # Now compute the distances from each star to the previous

    step_vectors = []

    for i in range(1, n_stars):
        # logger.debug("Candidates: %d %s\n%s" % (all_candidates.ndim, str(all_candidates.shape), str(all_candidates)))
        if (all_candidates.ndim < 1 or all_candidates.shape[0] <= 0):
            # We ran out of candidates
            logger.debug("We ran out of viable candidates after %s stars" %
                         (i))
            return None

        steps = all_candidates[:, i, 2:4] - all_candidates[:, i - 1, 2:4]
        #numpy.savetxt(xxx, steps)
        #print >>xxx, "\n\n\n\n"

        logger.debug("Computing average step size, star %d" % (i))
        logger.debug("Steps-X:\n%s" % (str(steps[:, 0])))
        logger.debug("Steps-y:\n%s" % (str(steps[:, 1])))
        clean_dx = three_sigma_clip(steps[:, 0])
        clean_dy = three_sigma_clip(steps[:, 1])

        # Check if both clean_dx and clean_dy are not empty
        logger.debug("clean-dx=%s" % (str(clean_dx)))
        logger.debug("clean-dy=%s" % (str(clean_dy)))
        if (clean_dx.ndim < 1 or clean_dx.shape[0] <= 0 or clean_dy.ndim < 1
                or clean_dy.shape[0] <= 0):
            logger.debug("Can't find a clean dx/dy shift in iteration %d" %
                         (i))
            return None

        dx = numpy.median(clean_dx)
        dy = numpy.median(clean_dy)

        distx = scipy.stats.scoreatpercentile(clean_dx, [16, 84])
        disty = scipy.stats.scoreatpercentile(clean_dy, [16, 84])
        sigma_x = 0.5 * (distx[1] - distx[0])
        sigma_y = 0.5 * (disty[1] - disty[0])

        step_vectors.append([dx, dy, sigma_x, sigma_y])

        good_steps = (steps[:,0] > (dx - 3*sigma_x)) & (steps[:,0] < (dx + 3*sigma_x)) \
            & (steps[:,1] > (dy - 3*sigma_y)) & (steps[:,1] < (dy + 3*sigma_y))

        logger.debug("before step-matching #%d: %s" %
                     (i, str(all_candidates.shape)))
        all_candidates = all_candidates[good_steps]
        logger.debug("after step-matching: #%d: %s" %
                     (i, str(all_candidates.shape)))

    logger.debug("%s: %s" % (filename, str(step_vectors)))

    # final_focus = open("final_focus", "w")
    # for i in range(all_candidates.shape[0]):
    #     numpy.savetxt(final_focus, all_candidates[i])
    #     print >>final_focus, "\n\n\n\n\n"
    # final_focus.close()

    logger.debug("Found %d focus stars" % (all_candidates.shape[0]))
    return all_candidates, real_focus_positions

    logger.debug("Returning final FITS table catalog")
    return None
Exemplo n.º 22
0
def run_swarp(input_list, outputimage, combine="MEDIAN"):

    logger = logging.getLogger("Swarp")

    #
    #
    # Now run swarp to create the stack
    # Use the default.swarp from QR as the fallback swarp option
    #
    #

    # make sure the input is a list and not just a single filename
    if (not type(input_list) == list):
        input_list = [input_list]

    # trim the .fits from the filename (if exists), we'll add that back in later
    if (outputimage.endswith(".fits")):
        outputimage = outputimage[:-5]

    dic = {
        'swarp_default': "%s/.config/swarp.default" % (qr_dir),
        'img_out': "%s.fits" % (outputimage),
        'img_w_out': "%s.weight.fits" % (outputimage),
        'resample_dir': sitesetup.scratch_dir,
        'inputfile': " ".join(input_list),
        'combinetype': combine.upper(),
    }
    swarp_opts = """
             -c %(swarp_default)s 
             -IMAGEOUT_NAME %(img_out)s 
             -WEIGHTOUT_NAME %(img_w_out)s
             -PIXEL_SCALE 0
             -PIXELSCALE_TYPE MEDIAN
             -COMBINE Y 
             -COMBINE_TYPE %(combinetype)s
             -CENTER_TYPE ALL
             -RESAMPLE_DIR %(resample_dir)s 
             -SUBTRACT_BACK Y
             -FSCALE_KEYWORD XXXXXXXX
             -FSCALE_DEFAULT 1.0
             -WEIGHT_TYPE NONE
             -WEIGHT_SUFFIX .weight.fits 
             -RESCALE_WEIGHTS N
             -DELETE_TMPFILES Y
             %(inputfile)s 
             """ % dic
    swarp_cmd = "%s %s" % (sitesetup.swarp_exec, swarp_opts)

    logger.info("Creating %s" % (dic['img_out']))
    logger.debug(" ".join(swarp_cmd.split()))

    try:
        ret = subprocess.Popen(swarp_cmd.split(), 
                                   stdout=subprocess.PIPE, 
                                   stderr=subprocess.PIPE)
        (swarp_stdout, swarp_stderr) = ret.communicate()

        # logger.debug("swarp stdout:\n"+swarp_stdout)
        # if (len(swarp_stderr) > 0 and ret.returncode != 0):
        #     logger.warning("swarp stderr:\n"+swarp_stderr)
        # else:
        #     logger.debug("swarp stderr:\n"+swarp_stderr)
        #print "\n".join(swarp_stderr)
        logger.info("done, swarp returned (ret-code: %d)!" % ret.returncode)
    except OSError as e:
        podi_logging.log_exception()
        print >>sys.stderr, "Execution failed:", e


    return
Exemplo n.º 23
0
def combine_pupilghost_slices(out_filename, filelist, op='sigclipmean'):

    logger = logging.getLogger("CombinePG")

    print filelist

    #
    # Gather information about rotation angles and center positions
    # Also collect the association data.
    #
    rotangles = numpy.ones(len(filelist)) * -9999
    headers = [None] * len(filelist)

    #
    # Prepare the association data
    #
    assoc_table = {}
    logger.info("Reading data from input files")

    for idx, fn in enumerate(filelist):
        hdulist = pyfits.open(fn)

        # Read the center and angle keywords from the "COMBINED" extension
        comb_hdu = hdulist['COMBINED']
        _ra = comb_hdu.header['ROTANGLE']
        rotangles[idx] = _ra if _ra > 0 else _ra + 360.
        headers[idx] = comb_hdu.header
        logger.debug("%s: %.2f" % (fn, _ra))

        # comb_hdu.header['CNTRF%03d' % (norm_angle)] = (centerf_str[:-1], "PG center, fixed r [px]")
        # comb_hdu.header['CNTRV%03d' % (norm_angle)] = (centerv_str[:-1], "PG center, var. r [px]")
        # comb_hdu.header['ALPHA%03d' % (norm_angle)] = (d_angle_str[:-1], "OTA angle [arcmin]")
        # comb_hdu.header['OTAORDER'] = ota_str[:-1]

        this_assoc = {'pupilghost-slice': fn}
        assoc_table = podi_associations.collect_reduction_files_used(
            assoc_table, this_assoc)

        # Read the assocation table of this frame
        in_assoc = podi_associations.read_associations(hdulist)
        if (in_assoc is not None):
            logger.debug("Found assocations:\n%s" % (str(in_assoc)))
            assoc_table = podi_associations.collect_reduction_files_used(
                assoc_table, in_assoc)

#    return

#
# Now sort the rotator angles from smallest to largest
#
    angle_sort = numpy.argsort(rotangles)

    #
    # Combine all frames
    #
    logger.info("Stacking all slices into master pupilghost template")
    combined_hdulist = podi_imcombine.imcombine(
        filelist,
        outputfile=None,
        operation=op,  #nanmean.bn',
        return_hdu=True,
        subtract=None,
        scale=None)

    print combined_hdulist
    combined = combined_hdulist['COMBINED']

    combined.header['STACK_OP'] = op

    #
    # Add the sorted keywords back into the resulting file
    #
    logger.info("Adding metadata")
    primhdu = pyfits.PrimaryHDU()

    try:
        prev_hdr = None
        for header, label in [
            ('CNTRF%03d', "center, fixed radius"),
            ('CNTRV%03d', "center, var. radius"),
            ('ALPHA%03d', "angle mismatch [arcmin]"),
            ('NORM_%03d', "sector normalizations"),
        ]:
            first_hdr = None
            for i in range(rotangles.shape[0]):
                idx = angle_sort[i]
                rotangle = rotangles[idx]
                round_angle = headers[idx]['RNDANGLE']
                keyname = header % round_angle
                #logger.info("Adding key: %s" % (keyname))
                combined.header[keyname] = headers[idx][keyname]
                #primhdu.header[keyname] = headers[idx][keyname]
                first_hdr = keyname if first_hdr is None else first_hdr
                if (prev_hdr is None):
                    print "adding header", keyname, " somewhere"
                    primhdu.header.append((keyname, headers[idx][keyname]))
                    prev_hdr = keyname
                else:
                    print "adding header", keyname, "after", prev_hdr
                    primhdu.header.insert(prev_hdr,
                                          (keyname, headers[idx][keyname]),
                                          after=True)
                prev_hdr = keyname

            add_fits_header_title(primhdu.header, label, first_hdr)

        combined.header['OTAORDER'] = headers[0]['OTAORDER']
    except:
        podi_logging.log_exception()
        pass

    print assoc_table
    assoc_hdu = podi_associations.create_association_table(assoc_table)

    out_hdulist = [primhdu, combined, assoc_hdu]
    for name in ['PROFILE', 'RAWPROFILE']:
        try:
            logger.info("Adding in extension %s" % (name))
            out_hdulist.append(combined_hdulist[name])
        except:
            logger.warning("Unable to find extension %s" % (name))
            pass

    combined_hdulist.writeto("dummy.fits", overwrite=True)

    logger.info("Writing output to %s" % (out_filename))
    out_hdulist = pyfits.HDUList(out_hdulist)  #[primhdu, combined, assoc_hdu])
    out_hdulist.writeto(out_filename, overwrite=True)

    logger.info("Work complete!")
Exemplo n.º 24
0
def read_fits_catalog(fn, extension, flatten=True):
    logger = logging.getLogger("ReadFITScat")

    logger.debug("Opening FITS catalog from %s" % (fn))
    if (type(fn) is str):
        hdulist = pyfits.open(fn)
    else:
        hdulist = fn

    if (type(extension) is list):
        ext_list = extension
    else:
        ext_list = [extension]

    return_tables = []

    for ext_id in ext_list:

        try:
            ext = hdulist[ext_id]
        except KeyError:
            logger.warning("Extension %s not found in %s" % (ext_id, fn))
            hdulist.info()
            return_tables.append(None)
            continue

        n_fields = ext.header['TFIELDS']
        n_rows = ext.header['NAXIS2']
        # table = numpy.empty((n_rows, n_fields))
        logger.debug("Reading data for %d fields" % (n_fields))
        # print "Reading data for %d fields" % (n_fields)
        table = []
        for f in range(n_fields):
            fd = ext.data.field(f)
            if (fd.ndim > 2 and flatten):
                logger.warning("Unable to handle catalog field %s" % (
                    ext.header['TTYPE%d' % (f + 1)]))
                continue
            if (fd.ndim > 1 and flatten):
                for c2 in range(fd.shape[1]):
                    table.append(fd[:, c2])
            else:
                table.append(fd)

        try:
            if (flatten):
                table = numpy.array(table).T
                logger.debug("Table data: %s" % (str(table.shape)))
            else:
                logger.debug("Table data: %d columns" % (len(table)))
            # np_table = numpy.array(table)
            # print table
        except:
            podi_logging.log_exception()
            pass

        # print table.shape

        # print table[1:3]

        # print ext.header
        return_tables.append(table)

    if (type(extension) is not list):
        return return_tables[0]

    return return_tables