Beispiel #1
0
    def test_generate_catalog(self,input_filenames, truth_file):
        """ Verify whether sources from astrometric catalogs can be extracted from images.

        Success Criteria
        -----------------
            * Initially, source catalog matches >80% of 'truth' catalog sources

        """
        self.input_loc = 'catalog_tests'
        self.curdir = os.getcwd()
        truth_path = [self.input_repo, self.tree, self.input_loc, *self.ref_loc]

        if not isinstance(input_filenames, list):
            input_filenames = [input_filenames]

        try:
            # Make local copies of input files
            local_files = []
            for infile in input_filenames:
                downloaded_files = self.get_input_file(infile, docopy=True)
                local_files.extend(downloaded_files)
            
            test_image = local_files[0]
            print("Testing with {}".format(test_image))
            imghdu = fits.open(test_image)
            instrume = imghdu[0].header['instrume'].lower()
            detector = imghdu[0].header['detector'].lower()
            instr_pars = detector_specific_params[instrume][detector]
            reference_wcs = amutils.build_reference_wcs(local_files)
            imcat = amutils.generate_sky_catalog(imghdu, reference_wcs, **instr_pars)
            imcat.rename_column('xcentroid', 'x')
            imcat.rename_column('ycentroid', 'y')

            # create FITS WCS corrector object
            wcs_corrector = tweakwcs.FITSWCS(reference_wcs)

            # get reference catalog as 'truth' files
            reference_catalog = get_bigdata(*truth_path, truth_file, docopy=True)
            if os.path.basename(reference_catalog).endswith('ecsv'):
                tab_format = 'ascii.ecsv'
            else:
                tab_format = 'ascii.fast_commented_header'
            reference_table = Table.read(reference_catalog, format=tab_format)
            num_expected = len(reference_table)

            # Perform matching
            match = tweakwcs.TPMatch(searchrad=200, separation=0.1, tolerance=5, use2dhist=True)
            ridx, iidx = match(reference_table, imcat, wcs_corrector)
            nmatches = len(ridx)

        except Exception:
            exc_type, exc_value, exc_tb = sys.exc_info()
            traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
            sys.exit()

        assert (nmatches > 0.8*num_expected)
Beispiel #2
0
    def test_astroquery(self):
        """Verify that new astroquery interface will work"""
        self.curdir = os.getcwd()
        self.input_loc = ''

        shift_file, local_files = self.run_align('ib6v06060')
        rms_x = max(shift_file['col6'])
        rms_y = max(shift_file['col7'])

        reference_wcs = amutils.build_reference_wcs(local_files)
        test_limit = self.fit_limit / reference_wcs.pscale
        assert (rms_x <= test_limit and rms_y <= test_limit)
Beispiel #3
0
    def test_align_47tuc(self):
        """ Verify whether 47Tuc exposures can be aligned to an astrometric standard.

        Characeteristics of this test:
          * Input exposures include both ACS and WFC3 images of the same general field-of-view
            of 47Tuc suitable for creating a combined mosaic using both instruments.
        """
        self.input_loc = 'mosaic_47tuc'
        input_filenames = [
            'ib6v06c4q_flc.fits', 'ib6v06c7q_flc.fits', 'ib6v25aqq_flc.fits',
            'ib6v25atq_flc.fits', 'jddh02gjq_flc.fits', 'jddh02glq_flc.fits',
            'jddh02goq_flc.fits'
        ]
        self.output_shift_file = 'test_mosaic_47tuc_shifts.txt'
        shift_file, local_files = self.run_align(input_filenames)

        rms_x = max(shift_file['col6'])
        rms_y = max(shift_file['col7'])

        reference_wcs = amutils.build_reference_wcs(local_files)
        test_limit = self.fit_limit / reference_wcs.pscale
        assert (rms_x <= test_limit and rms_y <= test_limit)
Beispiel #4
0
    def test_align_ngc188(self):
        """ Verify whether NGC188 exposures can be aligned to an astrometric standard.

        Characeteristics of this test:
          * Input exposures include both ACS and WFC3 images of the same general field-of-view
            of NGC188 suitable for creating a combined mosaic using both instruments.
        """
        self.input_loc = 'mosaic_ngc188'
        input_filenames = [
            'iaal01hxq_flc.fits', 'iaala3btq_flc.fits', 'iaal01hyq_flc.fits',
            'iaala3bsq_flc.fits', 'j8boa1m8q_flc.fits', 'j8boa1m4q_flc.fits',
            'j8boa1maq_flc.fits', 'j8boa1m6q_flc.fits'
        ]
        self.output_shift_file = 'test_mosaic_ngc188_shifts.txt'
        shift_file, local_files = self.run_align(input_filenames)

        rms_x = max(shift_file['col6'])
        rms_y = max(shift_file['col7'])

        reference_wcs = amutils.build_reference_wcs(local_files)
        test_limit = self.fit_limit / reference_wcs.pscale
        assert (rms_x <= test_limit and rms_y <= test_limit)
Beispiel #5
0
    def test_align_single_visits(self, input_filenames):
        """ Verify whether single-visit exposures can be aligned to an astrometric standard.

        Characteristics of these tests:
          * Input exposures include exposures from a number of single visit datasets to explore what impact differing
            observing modes (differing instruments, detectors, filters, subarray size, etc.) have on astrometry.

        The following datasets are used in these tests:

            * ACS dataset 10048_a1: 2x F344N, 1x F435W, 1x F475W, 2x F502N, 2x F550M, 1x F555W, 1x F606W, 1x F625W,
              2x F658N, 1x F775W, 1x F814W, 1x F850LP, and 2x F892N ACS/HRC images
            * ACS dataset 10265_01: 4x F606W full-frame ACS/WFC images
            * ACS dataset 12580_02: 5x F475W & 6x F814W ACS/WFC images
            * WFC3 dataset 11663_12: 4x F160W full-frame WFC3/IR images
            * WFC3 dataset 12219_01: 8x F160W full-frame WFC3/IR images, 9x F336W full-frame WFC3/UVIS images
            * WFC3 dataset 12379_02: 4X F606W, 4x F502N full-frame WFC3/UVIS images

        """
        self.input_loc = 'base_tests'
        self.curdir = os.getcwd()
        try:
            shift_file, local_files = self.run_align(input_filenames)
            x_shift = numpy.alltrue(numpy.isnan(shift_file['col2']))
            y_shift = numpy.alltrue(numpy.isnan(shift_file['col3']))
            rms_x = max(shift_file['col6'])
            rms_y = max(shift_file['col7'])
        except Exception:
            exc_type, exc_value, exc_tb = sys.exc_info()
            traceback.print_exception(exc_type,
                                      exc_value,
                                      exc_tb,
                                      file=sys.stdout)
            sys.exit()

        reference_wcs = amutils.build_reference_wcs(local_files)
        test_limit = self.fit_limit / reference_wcs.pscale
        assert (x_shift == False and y_shift == False and rms_x <= test_limit
                and rms_y <= test_limit)
Beispiel #6
0
def perform_align(input_list,
                  archive=False,
                  clobber=False,
                  update_hdr_wcs=False):
    """Main calling function.

    Parameters
    ----------
    input_list : list
        List of one or more IPPSSOOTs (rootnames) to align.

    archive : Boolean
        Retain copies of the downloaded files in the astroquery created sub-directories?

    clobber : Boolean
        Download and overwrite existing local copies of input files?

    update_hdr_wcs : Boolean
        Write newly computed WCS information to image image headers?

    Returns
    -------
    int value 0 if successful, int value 1 if unsuccessful

    """

    # Define astrometric catalog list in priority order
    catalogList = ['GAIADR2', 'GSC241']
    numCatalogs = len(catalogList)

    # 0: print git info
    print(
        "-------------------- STEP 0: Display Git revision info  --------------------"
    )
    full_path = os.path.dirname(__file__) + "/utils"
    repo_path = None
    if "hlapipeline/hlapipeline" in full_path:
        repo_path = full_path.split(
            "hlapipeline/hlapipeline")[0] + "hlapipeline"
    elif "hlapipeline" in full_path:
        repo_path = full_path.split("hlapipeline")[0] + "hlapipeline"
    else:
        pass
    if not os.path.exists(repo_path):
        repo_path = None  # protect against non-existent paths
    if repo_path:
        get_git_rev_info.print_rev_id(
            repo_path)  # Display git repository information
    else:
        print(
            "WARNING: Unable to display Git repository revision information.")

    # 1: Interpret input data and optional parameters
    print("-------------------- STEP 1: Get data --------------------")
    imglist = check_and_get_data(input_list, archive=archive, clobber=clobber)
    print("\nSUCCESS")

    # 2: Apply filter to input observations to insure that they meet minimum criteria for being able to be aligned
    print("-------------------- STEP 2: Filter data --------------------")
    filteredTable = filter.analyze_data(imglist)

    # Check the table to determine if there is any viable data to be aligned.  The
    # 'doProcess' column (bool) indicates the image/file should or should not be used
    # for alignment purposes.
    if filteredTable['doProcess'].sum() == 0:
        print("No viable images in filtered table - no processing done.\n")
        return (1)

    # Get the list of all "good" files to use for the alignment
    processList = filteredTable['imageName'][np.where(
        filteredTable['doProcess'])]
    processList = list(
        processList
    )  #Convert processList from numpy list to regular python list
    print("\nSUCCESS")

    # 3: Build WCS for full set of input observations
    print("-------------------- STEP 3: Build WCS --------------------")
    refwcs = amutils.build_reference_wcs(processList)
    print("\nSUCCESS")

    # 4: Retrieve list of astrometric sources from database
    # While loop to accommodate using multiple catalogs
    doneFitting = False
    catalogIndex = 0
    extracted_sources = None
    while not doneFitting:
        skip_all_other_steps = False
        retry_fit = False
        print(
            "-------------------- STEP 4: Detect astrometric sources --------------------"
        )
        print("Astrometric Catalog: ", catalogList[catalogIndex])
        reference_catalog = generate_astrometric_catalog(
            processList, catalog=catalogList[catalogIndex])
        # The table must have at least MIN_CATALOG_THRESHOLD entries to be useful
        if len(reference_catalog) >= MIN_CATALOG_THRESHOLD:
            print("\nSUCCESS")
        else:
            if catalogIndex < numCatalogs - 1:
                print("Not enough sources found in catalog " +
                      catalogList[catalogIndex])
                print("Try again with the next catalog")
                catalogIndex += 1
                retry_fit = True
                skip_all_other_steps = True
            else:
                print(
                    "Not enough sources found in any catalog - no processing done."
                )
                return (1)
        if not skip_all_other_steps:
            # 5: Extract catalog of observable sources from each input image
            print(
                "-------------------- STEP 5: Source finding --------------------"
            )
            if not extracted_sources:
                extracted_sources = generate_source_catalogs(processList)
                for imgname in extracted_sources.keys():
                    table = extracted_sources[imgname]["catalog_table"]
                    # The catalog of observable sources must have at least MIN_OBSERVABLE_THRESHOLD entries to be useful
                    total_num_sources = 0
                    for chipnum in table.keys():
                        total_num_sources += len(table[chipnum])
                    if total_num_sources < MIN_OBSERVABLE_THRESHOLD:
                        print(
                            "Not enough sources ({}) found in image {}".format(
                                total_num_sources, imgname))
                        return (1)
            # Convert input images to tweakwcs-compatible NDData objects and
            # attach source catalogs to them.
            imglist = []
            for group_id, image in enumerate(processList):
                imglist.extend(
                    amutils.build_nddata(
                        image, group_id,
                        extracted_sources[image]['catalog_table']))
            print("\nSUCCESS")

            # 6: Cross-match source catalog with astrometric reference source catalog, Perform fit between source catalog and reference catalog
            print(
                "-------------------- STEP 6: Cross matching and fitting --------------------"
            )
            # Specify matching algorithm to use
            match = tweakwcs.TPMatch(searchrad=250,
                                     separation=0.1,
                                     tolerance=100,
                                     use2dhist=False)
            # Align images and correct WCS
            tweakwcs.tweak_image_wcs(imglist, reference_catalog, match=match)
            # Interpret RMS values from tweakwcs
            interpret_fit_rms(imglist, reference_catalog)

            tweakwcs_info_keys = OrderedDict(
                imglist[0].meta['tweakwcs_info']).keys()
            imgctr = 0
            for item in imglist:
                retry_fit = False
                #Handle fitting failures (no matches found)
                if item.meta['tweakwcs_info']['status'].startswith(
                        "FAILED") == True:
                    if catalogIndex < numCatalogs - 1:
                        print(
                            "No cross matches found between astrometric catalog and sources found in images"
                        )
                        print("Try again with the next catalog")
                        catalogIndex += 1
                        retry_fit = True
                        break
                    else:
                        print(
                            "No cross matches found in any catalog - no processing done."
                        )
                        return (1)
                max_rms_val = item.meta['tweakwcs_info']['TOTAL_RMS']
                num_xmatches = item.meta['tweakwcs_info']['nmatches']
                # print fit params to screen
                print(
                    "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIT PARAMETERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
                )
                if item.meta['chip'] == 1:
                    image_name = processList[imgctr]
                    imgctr += 1
                print("image: {}".format(image_name))
                print("chip: {}".format(item.meta['chip']))
                print("group_id: {}".format(item.meta['group_id']))
                for tweakwcs_info_key in tweakwcs_info_keys:
                    if not tweakwcs_info_key.startswith("matched"):
                        print("{} : {}".format(
                            tweakwcs_info_key,
                            item.meta['tweakwcs_info'][tweakwcs_info_key]))
                # print("Radial shift: {}".format(math.sqrt(item.meta['tweakwcs_info']['shift'][0]**2+item.meta['tweakwcs_info']['shift'][1]**2)))
                print(
                    "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
                )

                if num_xmatches < MIN_CROSS_MATCHES:
                    if catalogIndex < numCatalogs - 1:
                        print(
                            "Not enough cross matches found between astrometric catalog and sources found in images"
                        )
                        print("Try again with the next catalog")
                        catalogIndex += 1
                        retry_fit = True
                        break
                    else:
                        print(
                            "Not enough cross matches found in any catalog - no processing done."
                        )
                        return (1)
                elif max_rms_val > MAX_FIT_RMS:
                    if catalogIndex < numCatalogs - 1:
                        print(
                            "Fit RMS value = {}mas greater than the maximum threshold value {}."
                            .format(
                                item.meta['tweakwcs_info']['FIT_RMS'].value,
                                MAX_FIT_RMS))
                        print("Try again with the next catalog")
                        catalogIndex += 1
                        retry_fit = True
                        break
                    else:
                        print(
                            "Fit RMS values too large using any catalog - no processing done."
                        )
                        return (1)
                else:
                    print("Fit calculations successful.")
        if not retry_fit:
            print("\nSUCCESS")

            # 7: Write new fit solution to input image headers
            print(
                "-------------------- STEP 7: Update image headers with new WCS information --------------------"
            )
            if update_hdr_wcs:
                update_image_wcs_info(imglist, processList)
                print("\nSUCCESS")
            else:
                print("\n STEP SKIPPED")
            return (0)
Beispiel #7
0
def perform_align(input_list,
                  archive=False,
                  clobber=False,
                  update_hdr_wcs=False,
                  print_fit_parameters=True,
                  print_git_info=False):
    """Main calling function.

    Parameters
    ----------
    input_list : list
        List of one or more IPPSSOOTs (rootnames) to align.

    archive : Boolean
        Retain copies of the downloaded files in the astroquery created sub-directories?

    clobber : Boolean
        Download and overwrite existing local copies of input files?

    update_hdr_wcs : Boolean
        Write newly computed WCS information to image image headers?

    print_fit_parameters : Boolean
        Specify whether or not to print out FIT results for each chip.

    print_git_info : Boolean
        Display git repository information?

    Returns
    -------
    int value 0 if successful, int value 1 if unsuccessful

    """

    # Define astrometric catalog list in priority order
    catalogList = ['GAIADR2', 'GSC241']

    # 0: print git info
    if print_git_info:
        print(
            "-------------------- STEP 0: Display Git revision info  --------------------"
        )
        full_path = os.path.dirname(__file__) + "/utils"
        repo_path = None
        if "hlapipeline/hlapipeline" in full_path:
            repo_path = full_path.split(
                "hlapipeline/hlapipeline")[0] + "hlapipeline"
        elif "hlapipeline" in full_path:
            repo_path = full_path.split("hlapipeline")[0] + "hlapipeline"
        else:
            pass
        if not os.path.exists(repo_path):
            repo_path = None  # protect against non-existent paths
        if repo_path:
            get_git_rev_info.print_rev_id(
                repo_path)  # Display git repository information
        else:
            print(
                "WARNING: Unable to display Git repository revision information."
            )

    # 1: Interpret input data and optional parameters
    print("-------------------- STEP 1: Get data --------------------")
    zeroDT = startingDT = datetime.datetime.now()
    print(str(startingDT))
    imglist = check_and_get_data(input_list, archive=archive, clobber=clobber)
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 1]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 2: Apply filter to input observations to insure that they meet minimum criteria for being able to be aligned
    print("-------------------- STEP 2: Filter data --------------------")
    filteredTable = filter.analyze_data(imglist)

    # Check the table to determine if there is any viable data to be aligned.  The
    # 'doProcess' column (bool) indicates the image/file should or should not be used
    # for alignment purposes.  For filtered data, 'doProcess=0' and 'status=9999' in the table
    # (the status value by default), so there is no need to update the filteredTable here.
    if filteredTable['doProcess'].sum() == 0:
        print("No viable images in filtered table - no processing done.\n")
        return (filteredTable)

    # Get the list of all "good" files to use for the alignment
    processList = filteredTable['imageName'][np.where(
        filteredTable['doProcess'])]
    processList = list(
        processList
    )  #Convert processList from numpy list to regular python list
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 2]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 3: Build WCS for full set of input observations
    print("-------------------- STEP 3: Build WCS --------------------")
    refwcs = amutils.build_reference_wcs(processList)
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 3]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 4: Extract catalog of observable sources from each input image
    print("-------------------- STEP 4: Source finding --------------------")
    extracted_sources = generate_source_catalogs(processList,
                                                 centering_mode='starfind',
                                                 nlargest=MAX_SOURCES_PER_CHIP)

    for imgname in extracted_sources.keys():
        table = extracted_sources[imgname]["catalog_table"]

        # Get the location of the current image in the filtered table
        index = np.where(filteredTable['imageName'] == imgname)[0][0]

        # First ensure sources were found
        if table[1] == None:
            print("No sources found in image {}".format(imgname))
            filteredTable[index]['status'] = 1
            return (filteredTable)

        # The catalog of observable sources must have at least MIN_OBSERVABLE_THRESHOLD entries to be useful
        total_num_sources = 0
        for chipnum in table.keys():
            total_num_sources += len(table[chipnum])

        # Update filtered table with number of found sources
        filteredTable[index]['foundSources'] = total_num_sources

        if total_num_sources < MIN_OBSERVABLE_THRESHOLD:
            print("Not enough sources ({}) found in image {}".format(
                total_num_sources, imgname))
            filteredTable[index]['status'] = 1
            return (filteredTable)

    print("\nSUCCESS")
    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 4]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 5: Retrieve list of astrometric sources from database

    # Convert input images to tweakwcs-compatible NDData objects and
    # attach source catalogs to them.
    imglist = []
    for group_id, image in enumerate(processList):
        img = amutils.build_nddata(image, group_id,
                                   extracted_sources[image]['catalog_table'])
        # add the name of the image to the imglist object
        for im in img:
            im.meta['name'] = image
        imglist.extend(img)

    best_fit_rms = -99999.0
    fit_algorithm_list = [match_2dhist_fit, match_default_fit]
    for catalogIndex in range(
            0, len(catalogList)):  #loop over astrometric catalog
        print(
            "-------------------- STEP 5: Detect astrometric sources --------------------"
        )
        print("Astrometric Catalog: ", catalogList[catalogIndex])
        reference_catalog = generate_astrometric_catalog(
            processList, catalog=catalogList[catalogIndex])

        currentDT = datetime.datetime.now()
        deltaDT = (currentDT - startingDT).total_seconds()
        print('Processing time of [STEP 5]: {} sec'.format(deltaDT))
        startingDT = currentDT

        if len(reference_catalog) < MIN_CATALOG_THRESHOLD:
            print("Not enough sources found in catalog " +
                  catalogList[catalogIndex])
            if catalogIndex < len(catalogList) - 1:
                print("Try again with other catalog")
            else:
                print(
                    "ERROR! No astrometric sources found in any catalog. Exiting..."
                )  #bail out if not enough sources can be found any of the astrometric catalogs
                filteredTable['status'][:] = 1
                return (filteredTable)
        else:
            print(
                "-------------------- STEP 5b: Cross matching and fitting --------------------"
            )
            for algorithm_name in fit_algorithm_list:  #loop over fit algorithm type
                print(
                    "------------------ Catalog {} matched using {} ------------------ "
                    .format(catalogList[catalogIndex],
                            algorithm_name.__name__))

                #execute the correct fitting/matching algorithm
                try:
                    fit_rms, fit_num = algorithm_name(
                        imglist,
                        reference_catalog,
                        print_fit_parameters=print_fit_parameters)

                    # Potentially update the previously set best fit with improved values
                    if best_fit_rms >= 0.:
                        if fit_rms < best_fit_rms:
                            best_fit_rms = fit_rms
                            best_fit_num = fit_num
                            for item in imglist:
                                item.best_meta = item.meta.copy()
                    # If a reasonable fit has been found, this is the initial setting of the best_fit_xxx variables
                    else:
                        if fit_rms < MAX_FIT_LIMIT:
                            best_fit_rms = fit_rms
                            best_fit_num = fit_num
                            for item in imglist:
                                item.best_meta = item.meta.copy()

                except Exception:
                    print(
                        "WARNING: Catastrophic fitting failure with catalog {} and matching algorithm {}."
                        .format(catalogList[catalogIndex],
                                algorithm_name.__name__))
                    filteredTable['status'][:] = 1
                    # It may be there are additional catalogs and algorithms to try, so keep going
                    continue

                # If this is true, we are done so break out of the catalog loop (inner for loop)
                # THIS AND THE NEXT BREAK ARE FIXES TO THE CURRENT LOGIC.  IT IS ASSUMED THESE
                # FIXES WILL BECOME OBSOLETE ONCE THE ENHANCED LOGIC FOR CHOOSING THE BEST
                # SOLUTION IS IMPLEMENTED.
                if best_fit_rms > 0 and best_fit_rms < MAX_FIT_RMS:
                    break

            # If this is true, we are done so break out of the catalog loop (outer for loop)
            if best_fit_rms > 0 and best_fit_rms < MAX_FIT_RMS:
                break

                #imglist_temp = imglist.copy() # preserve best fit solution so that it can be inserted into a reinitialized imglist next time through.

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 5b]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 6: Populate the filteredTable
    print(
        "-------------------- STEP 6: Collect up information and populate the filtered table --------------------"
    )
    if best_fit_rms > 0 and best_fit_rms < MAX_FIT_RMS:
        print(
            "The fitting process was successful with a best fit total rms of {} mas"
            .format(best_fit_rms))
    else:
        print(
            "The fitting process was unsuccessful with a best fit total rms of {} mas"
            .format(best_fit_rms))

    if best_fit_rms > 0 and best_fit_rms < MAX_FIT_LIMIT:
        # update to the meta information with the lowest rms if it is reasonable
        for item in imglist:
            item.meta = item.best_meta.copy()
        filteredTable['status'][:] = 0

        # Protect the writing of the table within the best_fit_rms
        info_keys = OrderedDict(imglist[0].meta['tweakwcs_info']).keys()
        # Update filtered table with number of matched sources and other information
        for item in imglist:
            imgname = item.meta['name']
            index = np.where(filteredTable['imageName'] == imgname)[0][0]

            if item.meta['tweakwcs_info']['status'].startswith(
                    "FAILED") != True:
                for tweakwcs_info_key in info_keys:
                    if not tweakwcs_info_key.startswith("matched"):
                        if tweakwcs_info_key.lower() == 'rms':
                            filteredTable[index]['rms_x'] = item.meta[
                                'tweakwcs_info'][tweakwcs_info_key][0]
                            filteredTable[index]['rms_y'] = item.meta[
                                'tweakwcs_info'][tweakwcs_info_key][1]

                filteredTable[index]['catalog'] = item.meta['tweakwcs_info'][
                    'catalog']
                filteredTable[index]['catalogSources'] = len(reference_catalog)
                filteredTable[index]['matchSources'] = item.meta[
                    'tweakwcs_info']['nmatches']
                filteredTable[index]['rms_ra'] = item.meta['tweakwcs_info'][
                    'RMS_RA'].value
                filteredTable[index]['rms_dec'] = item.meta['tweakwcs_info'][
                    'RMS_DEC'].value
                filteredTable[index]['fit_rms'] = item.meta['tweakwcs_info'][
                    'FIT_RMS']
                filteredTable[index]['total_rms'] = item.meta['tweakwcs_info'][
                    'TOTAL_RMS']
                #filteredTable.pprint(max_width=-1)

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 6]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 7: Write new fit solution to input image headers
    print(
        "-------------------- STEP 7: Update image headers with new WCS information --------------------"
    )
    if best_fit_rms > 0 and update_hdr_wcs:
        headerlet_dict = update_image_wcs_info(imglist)
        for tableIndex in range(0, len(filteredTable)):
            filteredTable[tableIndex]['headerletFile'] = headerlet_dict[
                filteredTable[tableIndex]['imageName']]
        print("\nSUCCESS")
    else:
        print("\n STEP SKIPPED")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 7]: {} sec'.format(deltaDT))
    print('TOTAL Processing time of {} sec'.format(
        (currentDT - zeroDT).total_seconds()))
    return (filteredTable)
Beispiel #8
0
    def align_randomFields(self, randomTable):
        """ Process randomly selected fields (aka datasets) stored in an Astropy table.

            Each field is used as input to determine if it can be aligned to an
            astrometric standard.  The success or fail status for each test is retained
            as the overall success or fail statistic is the necessary output from
            this test.
        """

        numSuccess = 0
        numAllDatasets = 0

        # Read the table and extract a list of each dataset name in IPPSSOOT format
        # which is either an association ID or an individual filename
        dataset_list = get_dataset_list(randomTable)

        numAllDatasets = len(dataset_list)

        # Process the dataset names in the list
        #
        # If the dataset name represents an association ID, the multiplicity
        # of images within the association need to be processed.  Otherwise,
        # the dataset is a single image.
        #
        # If the "alignment" of a field/dataset fails for any reason, trap
        # the exception and keep going.
        for dataset in dataset_list:

            print("TEST_ALIGN. Dataset: ", dataset)
            currentDT = datetime.datetime.now()
            print(str(currentDT))
            try:
                shift_file, local_files = self.run_align([dataset])
                x_shift = numpy.alltrue(numpy.isnan(shift_file['col2']))
                rms_x = max(shift_file['col6'])
                rms_y = max(shift_file['col7'])

                reference_wcs = amutils.build_reference_wcs(local_files)
                test_limit = self.fit_limit / reference_wcs.pscale

                if not x_shift and ((rms_x <= test_limit) and
                                    (rms_y <= test_limit)):
                    numSuccess += 1
                    print("TEST_ALIGN. Successful Dataset: ", dataset, "\n")
                else:
                    print("TEST_ALIGN. Unsuccessful Dataset: ", dataset, "\n")

            # Catch anything that happens as this dataset will be considered a failure, but
            # the processing of datasets should continue.  Generate sufficient output exception
            # information so problems can be addressed.
            except Exception:
                exc_type, exc_value, exc_tb = sys.exc_info()
                traceback.print_exception(exc_type,
                                          exc_value,
                                          exc_tb,
                                          file=sys.stdout)
                print("TEST_ALIGN. Exception Dataset: ", dataset, "\n")
                continue

        # Determine the percent success over all datasets processed
        percentSuccess = numSuccess / numAllDatasets
        print('TEST_ALIGN. Number of successful tests: ', numSuccess,
              ' Total number of tests: ', numAllDatasets, ' Percent success: ',
              percentSuccess * 100.0)

        return percentSuccess
Beispiel #9
0
def perform_align(input_list, archive=False, clobber=False, update_hdr_wcs=False, print_fit_parameters=True,
                    print_git_info=False):
    """Main calling function.

    Parameters
    ----------
    input_list : list
        List of one or more IPPSSOOTs (rootnames) to align.

    archive : Boolean
        Retain copies of the downloaded files in the astroquery created sub-directories?

    clobber : Boolean
        Download and overwrite existing local copies of input files?

    update_hdr_wcs : Boolean
        Write newly computed WCS information to image image headers?

    Returns
    -------
    int value 0 if successful, int value 1 if unsuccessful

    """

    # Define astrometric catalog list in priority order
    catalogList = ['GAIADR2', 'GSC241']

    # 0: print git info
    if print_git_info:
        print("-------------------- STEP 0: Display Git revision info  --------------------")
        full_path = os.path.dirname(__file__)+"/utils"
        repo_path=None
        if "hlapipeline/hlapipeline" in full_path:
            repo_path = full_path.split("hlapipeline/hlapipeline")[0]+"hlapipeline"
        elif "hlapipeline" in full_path:
            repo_path = full_path.split("hlapipeline")[0]+"hlapipeline"
        else:
            pass
        if not os.path.exists(repo_path): repo_path = None # protect against non-existent paths
        if repo_path:
            get_git_rev_info.print_rev_id(repo_path) # Display git repository information
        else:
            print("WARNING: Unable to display Git repository revision information.")

    # 1: Interpret input data and optional parameters
    print("-------------------- STEP 1: Get data --------------------")
    startingDT = datetime.datetime.now()
    print(str(startingDT))
    imglist = check_and_get_data(input_list, archive=archive, clobber=clobber)
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 1]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 2: Apply filter to input observations to insure that they meet minimum criteria for being able to be aligned
    print("-------------------- STEP 2: Filter data --------------------")
    filteredTable = filter.analyze_data(imglist)

    # Check the table to determine if there is any viable data to be aligned.  The
    # 'doProcess' column (bool) indicates the image/file should or should not be used
    # for alignment purposes.  For filtered data, 'doProcess=0' and 'status=9999' in the table.
    if filteredTable['doProcess'].sum() == 0:
        print("No viable images in filtered table - no processing done.\n")
        return(filteredTable)

    # Get the list of all "good" files to use for the alignment
    processList = filteredTable['imageName'][np.where(filteredTable['doProcess'])]
    processList = list(processList) #Convert processList from numpy list to regular python list
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 2]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 3: Build WCS for full set of input observations
    print("-------------------- STEP 3: Build WCS --------------------")
    refwcs = amutils.build_reference_wcs(processList)
    print("\nSUCCESS")


    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 3]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 4: Extract catalog of observable sources from each input image
    print("-------------------- STEP 4: Source finding --------------------")
    extracted_sources = generate_source_catalogs(processList,
                                                 centering_mode='starfind',
                                                 nlargest=MAX_SOURCES_PER_CHIP)

    for imgname in extracted_sources.keys():
        table=extracted_sources[imgname]["catalog_table"]
        # The catalog of observable sources must have at least MIN_OBSERVABLE_THRESHOLD entries to be useful
        total_num_sources = 0
        for chipnum in table.keys():
            total_num_sources += len(table[chipnum])

        # Update filtered table with number of found sources
        index = np.where(filteredTable['imageName']==imgname)[0][0]
        filteredTable[index]['foundSources'] = total_num_sources

        if total_num_sources < MIN_OBSERVABLE_THRESHOLD:
            print("Not enough sources ({}) found in image {}".format(total_num_sources,imgname))
            filteredTable[index]['status'] = 1
            return(filteredTable)
    # Convert input images to tweakwcs-compatible NDData objects and
    # attach source catalogs to them.
    imglist = []
    for group_id, image in enumerate(processList):
        img = amutils.build_nddata(image, group_id,
                                   extracted_sources[image]['catalog_table'])
        for im in img:
            im.meta['name'] = image
        imglist.extend(img)

    # add the name of the image to the imglist object
    print("\nSUCCESS")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 4]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 5: Retrieve list of astrometric sources from database
    catalogIndex = 0
    best_fit = MAX_FIT_LIMIT
    print("-------------------- STEP 5: Detect Gaia astrometric sources --------------------")
    print("Astrometric Catalog: ",catalogList[catalogIndex])
    reference_catalog = generate_astrometric_catalog(processList, catalog=catalogList[catalogIndex])

    best_fit_rms = MAX_FIT_RMS
    if len(reference_catalog) < MIN_CATALOG_THRESHOLD:
        print("Not enough sources found in Gaia catalog " + catalogList[catalogIndex])
        print("Try again with other catalog")
        catalogIndex += 1
        retry_fit = True
        skip_all_other_steps = True
    else:

        currentDT = datetime.datetime.now()
        deltaDT = (currentDT - startingDT).total_seconds()
        print('Processing time of [STEP 5]: {} sec'.format(deltaDT))
        startingDT = currentDT
        print("-------------------- STEP 5b: Cross matching and fitting --------------------")
        best_fit_rms, best_fit_num = match_2dhist_fit(imglist, reference_catalog,
                                     print_fit_parameters=print_fit_parameters)

        info_keys = OrderedDict(imglist[0].meta['tweakwcs_info']).keys()
        # Update filtered table with number of matched sources and other information
        for item in imglist:
            imgname = item.meta['name']
            index = np.where(filteredTable['imageName']==imgname)[0][0]

            if item.meta['tweakwcs_info']['status'].startswith("FAILED") != True:
                for tweakwcs_info_key in info_keys:
                    if not tweakwcs_info_key.startswith("matched"):
                        if tweakwcs_info_key.lower() == 'rms':
                            filteredTable[index]['rms_x'] = item.meta['tweakwcs_info'][tweakwcs_info_key][0]
                            filteredTable[index]['rms_y'] = item.meta['tweakwcs_info'][tweakwcs_info_key][1]

                filteredTable[index]['catalog'] = item.meta['tweakwcs_info']['catalog']
                filteredTable[index]['catalogSources'] = len(reference_catalog)
                filteredTable[index]['matchSources'] = item.meta['tweakwcs_info']['nmatches']
                filteredTable[index]['rms_ra'] = item.meta['tweakwcs_info']['RMS_RA'].value
                filteredTable[index]['rms_dec'] = item.meta['tweakwcs_info']['RMS_DEC'].value
                filteredTable[index]['fit_rms'] = item.meta['tweakwcs_info']['FIT_RMS']
                filteredTable[index]['total_rms'] = item.meta['tweakwcs_info']['TOTAL_RMS']
                #filteredTable.pprint(max_width=-1)

        # 6b: If available, the logic tree for fitting with different algorithms
        # would be here.   These would only be invoked if the above step failed.
        # At this time, only one algorithm is being used and there are not
        # currently other cases to run on the images and so this is empty.   This
        # section might be a good area to create as a function if this will be repeated
        # with other catalogs.


    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 5b]: {} sec'.format(deltaDT))
    startingDT = currentDT
    # 8: If available, try the fitting with different catalogs
    if best_fit_rms >= MAX_FIT_RMS:
        for catalogIndex in range(1, len(catalogList)):
            print("-------------------- STEP 6: Detect catalog astrometric sources --------------------")
            print("Astrometric Catalog: ",catalogList[catalogIndex])
            reference_catalog = generate_astrometric_catalog(processList, catalog=catalogList[catalogIndex])

            currentDT = datetime.datetime.now()
            deltaDT = (currentDT - startingDT).total_seconds()
            print('Processing time of [STEP 6]: {} sec'.format(deltaDT))
            startingDT = currentDT

            if len(reference_catalog) < MIN_CATALOG_THRESHOLD:
                print("Not enough sources found in catalog " + catalogList[catalogIndex])
                print("Try again with other catalog")
                catalogIndex += 1
                retry_fit = True
                skip_all_other_steps = True
            else:
                print("-------------------- STEP 6b: Cross matching and fitting --------------------")
                fit_rms, fit_num = match_default_fit(imglist, reference_catalog,
                                     print_fit_parameters=print_fit_parameters)
                # update the best fit
                if fit_rms < best_fit_rms:
                   best_fit_rms = fit_rms
                   best_fit_num = fit_num
                   for item in imglist:
                       item.best_meta = item.meta.copy()

                currentDT = datetime.datetime.now()
                deltaDT = (currentDT - startingDT).total_seconds()
                print('Processing time of [STEP 6b]: {} sec'.format(deltaDT))
                startingDT = currentDT

    if best_fit_rms < MAX_FIT_RMS:
       print("The fitting process was successful with a best fit total rms of {} mas".format(best_fit_rms))
    else:
       print("The fitting process was unsuccessful with a best fit total rms of {} mas".format(best_fit_rms))

    if best_fit_rms < MAX_FIT_LIMIT:
        # update to the meta information with the lowest rms if it is reasonable
        for item in imglist:
            item.meta = item.best_meta


    # 7: Write new fit solution to input image headers
    print("-------------------- STEP 7: Update image headers with new WCS information --------------------")
    if update_hdr_wcs:
        update_image_wcs_info(imglist, processList)
        print("\nSUCCESS")
    else:
        print("\n STEP SKIPPED")

    currentDT = datetime.datetime.now()
    deltaDT = (currentDT - startingDT).total_seconds()
    print('Processing time of [STEP 7]: {} sec'.format(deltaDT))
    startingDT = currentDT
    filteredTable['status'][:] = 0
    return (filteredTable)