def random_align(self, randomTable): """ Process randomly selected fields (aka datasets) stored in an Astropy table. Each field is used as input to determine if it can be aligned to an astrometric standard. The success or fail status for each test is retained as the overall success or fail statistic is the necessary output from this test. """ numSuccess = 0 numAllDatasets = 0 # Read the table and extract a list of each dataset name in IPPSSOOT format # which is either an association ID or an individual filename dataset_list = get_dataset_list(randomTable) numAllDatasets = len(dataset_list) # Process the dataset names in the list # # If the dataset name represents an association ID, the multiplicity # of images within the association need to be processed. Otherwise, # the dataset is a single image. # # If the "alignment" of a field/dataset fails for any reason, trap # the exception and keep going. for dataset in dataset_list: print("TEST_RANDOM. Dataset: ", dataset) try: result = alignimages.perform_align([dataset]) if (result == 0): print("TEST_RANDOM. Successful Dataset: ", dataset, "\n") numSuccess += 1 else: print("TEST_RANDOM. Unsuccessful Dataset: ", dataset, "\n") # Catch anything that happens as this dataset will be considered a failure, but # the processing of datasets should continue. Generate sufficient output exception # information so problems can be addressed. except Exception: exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) print("TEST_RANDOM. Exception Dataset: ", dataset, "\n") continue # Determine the percent success over all datasets processed percentSuccess = numSuccess / numAllDatasets print('TEST_RANDOM. Number of successful tests: ', numSuccess, ' Total number of tests: ', numAllDatasets, ' Percent success: ', percentSuccess * 100.0) return percentSuccess
def random_align(self, randomTable): """ Process randomly selected fields (aka datasets) stored in an Astropy table. Each field is used as input to determine if it can be aligned to an astrometric standard. The success or fail status for each test is retained as the overall success or fail statistic is the necessary output from this test. """ numSuccess = 0 numAllDatasets = 0 # Read the table and extract a list of each dataset name in IPPSSOOT format # which is either an association ID or an individual filename dataset_list = get_dataset_list(randomTable) numAllDatasets = len(dataset_list) # Process the dataset names in the list # # If the dataset name represents an association ID, the multiplicity # of images within the association need to be processed. Otherwise, # the dataset is a single image. # # If the "alignment" of a field/dataset fails for any reason, trap # the exception and keep going. allDatasetTable = Table() datasetKey = -1 for dataset in dataset_list: datasetKey += 1 outputName = dataset + '.ecsv' print("TEST_RANDOM. Dataset: ", dataset, ' DatasetKey: ', datasetKey) try: datasetTable = alignimages.perform_align([dataset]) # Filtered datasets if datasetTable['doProcess'].sum() == 0: print("TEST_RANDOM. Filtered Dataset: ", dataset, "\n") numAllDatasets -= 1 # Datasets to process elif datasetTable['doProcess'].sum() > 0: # Determine images in dataset to be processed and the number of images # This is in case an image was filtered out (e.g., expotime = 0) index = np.where(datasetTable['doProcess'] == 1)[0] sumOfStatus = datasetTable['status'][index].sum() # Update the table with the datasetKey which is really just a counter datasetTable['datasetKey'][:] = datasetKey datasetTable['completed'][:] = True datasetTable.write(outputName, format='ascii.ecsv') datasetTable.pprint(max_width=-1) # Successful datasets if (sumOfStatus == 0): print("TEST_RANDOM. Successful Dataset: ", dataset, "\n") numSuccess += 1 # Unsuccessful datasets else: print("TEST_RANDOM. Unsuccessful Dataset: ", dataset, "\n") # Append the latest dataset table to the summary table allDatasetTable = vstack([allDatasetTable, datasetTable]) # Perform some clean up if os.path.exists('ref_cat.ecsv'): os.remove('ref_cat.ecsv') if os.path.exists('refcatalog.cat'): os.remove('refcatalog.cat') for f in sorted(glob.glob('*fl?.fits')): os.remove(f) # Catch anything that happens as this dataset will be considered a failure, but # the processing of datasets should continue. Generate sufficient output exception # information so problems can be addressed. except Exception: exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) print("TEST_RANDOM. Exception Dataset: ", dataset, "\n") continue # Write out the table allDatasetTable.write('resultsBigTest.ecsv', format='ascii.ecsv') #allDatasetTable.pprint(max_width=-1) # Determine the percent success over all datasets processed percentSuccess = numSuccess / numAllDatasets print('TEST_RANDOM. Number of successful tests: ', numSuccess, ' Total number of tests: ', numAllDatasets, ' Percent success: ', percentSuccess * 100.0) return percentSuccess