def test_NObsPriority(): runName = os.path.split(TEST_DB_PATH)[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(TEST_DB_PATH) map_data_table = compare_survey_footprints.load_map_data(TEST_MAP_PATH) #print(map_data_table.columns) mapName = 'combined_map' tau_obs = 20.0 log = open('./results/test_data.log', 'w') nLoop = 10 for i in range(0, nLoop, 1): log.write('mapName: ' + mapName + '\n') log.write('runName: ' + runName + '\n') log.write('tau_obs: ' + str(tau_obs) + '\n') FoM = compare_survey_footprints.FiguresOfMerit() bundleDict = compare_survey_footprints.calcNVisits( opsim_db, runName, mapName) rootName = 'GalplaneFootprintMetric_' + mapName + '_' #outputName = rootName+'NObsPriority' outputName = rootName + 'Tau_' + str(tau_obs).replace('.', '_') log.write('OutputName: ' + outputName + '\n') metricData = bundleDict[outputName].metricValues log.write('metricData: ' + repr(metricData) + '\n') rubin_visibility_zone = compare_survey_footprints.calc_rubin_visibility( bundleDict, runName) log.write('rubin_visibility_zone: ' + repr(rubin_visibility_zone) + '\n') map_data = getattr(map_data_table, mapName) log.write('map_data: ' + repr(map_data) + '\n') desired_healpix = compare_survey_footprints.calc_desired_survey_map( mapName, map_data, rubin_visibility_zone) log.write('N survey pixels: ' + str(len(desired_healpix)) + '\n') #log.write('Desired survey pixels: '+repr(desired_healpix)+'\n') FoM = compare_survey_footprints.calcFootprintOverlap( runName, mapName, tau_obs, desired_healpix, bundleDict, FoM) log.write('Overlap pixels: ' + str(FoM.overlap_healpix) + '\n') log.write('Overlap percent: ' + str(FoM.overlap_percent) + '\n') log.write('Missing pixels: ' + str(FoM.missing_healpix) + '\n') log.write('Missing percent: ' + str(FoM.missing_percent) + '\n') idx = np.argwhere(np.isnan(metricData)) log.write('NaN values: ' + repr(idx) + '\n') log.write('metricData entries: ' + repr(metricData[idx]) + '\n') log.write('Metric sum values: ' + repr(metricData.sum()) + '\n') log.write('-------------------------------------------\n') log.close()
def eval_footprint_priority(): print('''THIS CODE IS DEPRECIATED AND MAINTAINED AS A RECORD. PLEASE USE compare_survey_footprints.py INSTEAD''') exit() params = get_args() # Load the current OpSim database runName = os.path.split(params['opSim_db_file'])[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(params['opSim_db_file']) # Load the Galactic Plane Survey footprint map data map_data_table = load_map_data(params['map_file_path']) mapName = os.path.basename(params['map_file_path'].replace('.fits','')) print('Total number of pixels in map: '+str(len(map_data_table))) # Assign threshold numbers of visits: n_visits_thresholds = calcNVisitsThresholds() # Start logging, and loop metric over all science maps: logfile = open('footprint_metric_data.txt','w') logfile.write('runName mapName tau_obs NVisits_threshold nObserved_pixels footprint_priority ideal_footprint_priority %ofPriority %ofNObsPriority\n') for column in map_data_table.columns: # Calculate the total numbers of visits per HEALpix for this strategy bundleDict = calcMetric(opsim_db, runName, column.name, diagnostics=False) map_data = getattr(map_data_table, column.name) print('Calculating survey region overlap for '+column.name) #metric_data = unpackMetricData(runName, bundleDict) evalFootprintPriority(opsim_db, map_data, runName, column.name, n_visits_thresholds, bundleDict, logfile) logfile.close()
def time_per_filter(): params = get_args() # Load the current OpSim database runName = os.path.split(params['opSim_db_file'])[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(params['opSim_db_file']) # Load the Galactic Plane Survey footprint map data map_data_table = compare_survey_footprints.load_map_data( params['map_file_path']) print('Total number of pixels in map: ' + str(len(map_data_table))) # Start logfile logfile = open( os.path.join(output_dir, runName + '_time_per_filter_metric_data.txt'), 'w') logfile.write( 'runName mapName filter mean(fexpt_ratio) stddev(fexpt_ratio) npix_obs %desired_pixels\n' ) # Loop over all science maps: for mapName in SCIENCE_MAPS: map_data = getattr(map_data_table, mapName) # Compute metrics bundleDict = calc_filter_metric(opsim_db, runName, mapName) print(bundleDict.keys()) # Calculate the Rubin visibility zone: rubin_visibility_zone = compare_survey_footprints.calc_rubin_visibility( bundleDict, runName) # Determine the HEALpix index of the desired science survey region, # taking the Rubin visbility zone into account: desired_healpix = compare_survey_footprints.calc_desired_survey_map( mapName, map_data, rubin_visibility_zone) eval_filters_by_region(params, bundleDict, runName, mapName, map_data, desired_healpix) logfile.close()
def eval_nobs_priority_function(): params = get_args() # Load the current OpSim database runName = os.path.split(params['opSim_db_file'])[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(params['opSim_db_file']) # Load the Galactic Plane Survey footprint map data map_data_table = compare_survey_footprints.load_map_data( params['map_file_path']) mapName = 'combined_map' map_data = getattr(map_data_table, mapName) # Compute metric data: bundleDict = calcMetrics(opsim_db, runName, mapName) # Plot the number of visits per HEALpixel as a function of space and # scientific priority: plot_3d_healpix_nvis_priority(runName, bundleDict, map_data)
def test_rubin_visibility(): runName = os.path.split(TEST_DB_PATH)[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(TEST_DB_PATH) mapName = 'combined_map' log = open('./results/test_visibility_data.log', 'w') log.write('# Run NHealpix_vis>1 NHealpix_valid\n') for i in range(0, 10, 1): bundleDict = compare_survey_footprints.calcNVisits( opsim_db, runName, mapName) rubin_visibility_zone1, rubin_visibility_zone2 = compare_survey_footprints.calc_rubin_visibility( bundleDict, runName) #map = bundleDict[runName.replace('.','_')+\ # '_Nvis_fiveSigmaDepth_gt_21_5_HEAL'].metricValues #file_name = './results/rubin_visibility_map.png' #compare_survey_footprints.plot_map_data(map, file_name) log.write( str(i) + ' ' + str(len(rubin_visibility_zone1)) + ' ' + str(len(rubin_visibility_zone2)) + '\n') log.close()
def run_metrics(): params = get_args() # Load the current OpSim database runName = os.path.split(params['opSim_db_file'])[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(params['opSim_db_file']) # Load the Galactic Plane Survey footprint map data map_data_table = load_map_data(params['map_file_path']) # Log file: logfile = open( os.path.join(output_dir, runName + '_survey_cadence_data.txt'), 'w') logfile.write('# Col 1: runName\n') logfile.write('# Col 2: mapName\n') logfile.write('# Col 3: tau\n') logfile.write('# Col 4: tau_var\n') logfile.write('# Col 5: Sum(VisitIntervalMetric)\n') logfile.write('# Col 6: Sum(SeasonVisibilityGapsMetric)\n') logfile.write('# Col 7: Sum(Priority*VIM)\n') logfile.write('# Col 8: %OfIdeal(VisitIntervalMetric)\n') logfile.write('# Col 9: %OfIdeal(SeasonVisibilityGapsMetric)\n') logfile.write('# Col 10: %OfIdeal(Priority*VIM)\n') debug = open(os.path.join(output_dir, 'survey_cadence_debug.log'), 'w') debug.write('OpSim runName = ' + runName + '\n') # Loop over all science survey regions: for column in map_data_table.columns: mapName = column.name map_data = getattr(map_data_table, mapName) debug.write('Map name = ' + mapName) # Compute the metrics for the current map bundleDict = calc_cadence_metrics(opsim_db, runName, mapName) outputName1 = 'GalPlaneVisitIntervalsTimescales_' + mapName + '_Tau_' + str( tau).replace('.', '_') outputName2 = 'GalPlaneSeasonGapsTimescales_' + mapName + '_Tau_' + str( tau_var).replace('.', '_') metric1_data = bundleDict[outputName1].metricValues metric2_data = bundleDict[outputName2].metricValues debug.write('Metric 1 data: ' + repr(metric1_data[-10:]) + '\n') debug.write('Metric 2 data: ' + repr(metric2_data[-10:]) + '\n') # Calculate the Rubin visibility zone: rubin_visibility_zone = compare_survey_footprints.calc_rubin_visibility( bundleDict, runName) debug.write('Npix Rubin visibility zone: ' + str(len(rubin_visibility_zone)) + '\n') debug.write('Rubin visibility zone data: ' + repr(rubin_visibility_zone[0:10]) + '\n') # Determine the HEALpix index of the desired science survey region, # taking the Rubin visbility zone into account: desired_healpix = compare_survey_footprints.calc_desired_survey_map( mapName, map_data, rubin_visibility_zone) debug.write('Npix Desired healpix: ' + str(len(desired_healpix)) + '\n') debug.write('Desired healpix data: ' + repr(desired_healpix[0:10]) + '\n') # Loop over each cadence category: for i in range(0, len(tau_obs), 1): FoM = eval_metrics_by_region(bundleDict, map_data, runName, mapName, tau_obs[i], rubin_visibility_zone, desired_healpix) logfile.write(runName+' '+mapName+' '+str(FoM.tau)+' '+str(FoM.tau_var)+' '+\ str(FoM.sumVIM)+' '+str(FoM.sumSVGM)+' '+str(FoM.sumVIP)+' '+\ str(FoM.percent_sumVIM)+' '+\ str(FoM.percent_sumSVGM)+' '+\ str(FoM.percent_sumVIP)+'\n') debug.write('\n=================================\n') logfile.close() debug.close()
plotDict=plotDict)) bundleDict = maf.metricBundles.makeBundlesDictFromList(bundleList) bundleGroup = maf.MetricBundleGroup(bundleDict, opsim_db, outDir='test', resultsDb=None) bundleGroup.runAll() if diagnostics: bundleGroup.plotAll(closefigs=False) return bundleDict runName = os.path.split(TEST_DB_PATH)[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(TEST_DB_PATH) mapName = 'combined_map' tau_obs = 11.0 tau_var = tau_obs * 5.0 old_metric1_data = None old_metrid2_data = None nLoop = 10 for i in range(0, nLoop, 1): bundleDict = calc_cadence_metrics(opsim_db, runName, mapName) outputName1 = 'GalPlaneVisitIntervalsTimescales_' + mapName + '_Tau_' + str( tau_obs).replace('.', '_') outputName2 = 'GalPlaneSeasonGapsTimescales_' + mapName + '_Tau_' + str( tau_var).replace('.', '_')
def test_metric_calculation(): runName = os.path.split(TEST_DB_PATH)[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(TEST_DB_PATH) map_data_table = compare_survey_footprints.load_map_data(TEST_MAP_PATH) #print(map_data_table.columns) mapName = 'combined_map' tau_obs = 11.0 tau_var = tau_obs * 5.0 log = open('./cadence_results/test_data.log', 'w') nLoop = 10 for i in range(0, nLoop, 1): log.write('mapName: ' + mapName + '\n') log.write('runName: ' + runName + '\n') log.write('tau_obs: ' + str(tau_obs) + '\n') FoM = eval_survey_cadence.TimeFiguresOfMerit() bundleDict = eval_survey_cadence.calc_cadence_metrics( opsim_db, runName, mapName) outputName1 = 'GalPlaneVisitIntervalsTimescales_' + mapName + '_Tau_' + str( tau_obs).replace('.', '_') outputName2 = 'GalPlaneSeasonGapsTimescales_' + mapName + '_Tau_' + str( tau_var).replace('.', '_') metric1_data = bundleDict[outputName1].metricValues metric2_data = bundleDict[outputName2].metricValues np.savetxt( os.path.join('./cadence_results/', outputName1 + '_run' + str(i) + '.txt'), metric1_data) np.savetxt( os.path.join('./cadence_results/', outputName2 + '_run' + str(i) + '.txt'), metric2_data) log.write('OutputName: ' + outputName1 + '\n') log.write('OutputName: ' + outputName2 + '\n') log.write('metricData 1: ' + repr(metric1_data) + '\n') log.write('metricData 2: ' + repr(metric2_data) + '\n') rubin_visibility_zone = compare_survey_footprints.calc_rubin_visibility( bundleDict, runName) log.write('rubin_visibility_zone: ' + repr(rubin_visibility_zone) + '\n') log.write('Npix rubin_visibility_zone: ' + str(len(rubin_visibility_zone)) + '\n') map_data = getattr(map_data_table, mapName) log.write('map_data: ' + repr(map_data) + '\n') log.write('Npix map_data: ' + str(len(map_data)) + '\n') desired_healpix = compare_survey_footprints.calc_desired_survey_map( mapName, map_data, rubin_visibility_zone) log.write('N survey pixels: ' + str(len(desired_healpix)) + '\n') log.write('Desired survey pixels: ' + repr(desired_healpix[0:10]) + '\n') FoM = eval_survey_cadence.eval_metrics_by_region(bundleDict, map_data, runName, mapName, tau_obs, rubin_visibility_zone, desired_healpix, datalog=log) log.write('VIM: ' + str(FoM.sumVIM) + '\n') log.write('Percent VIM: ' + str(FoM.percent_sumVIM) + '\n') log.write('SVGM: ' + str(FoM.sumSVGM) + '\n') log.write('Percent SVGM: ' + str(FoM.percent_sumSVGM) + '\n') log.write('VIP: ' + str(FoM.sumVIP) + '\n') log.write('Percent VIP: ' + str(FoM.percent_sumVIP) + '\n') idx = np.argwhere(np.isnan(metric1_data)) log.write('NaN values metric 1: ' + repr(idx) + '\n') log.write('metricData entries: ' + repr(metric1_data[idx]) + '\n') log.write('Metric sum values: ' + repr(metric1_data.sum()) + '\n') log.write('-------------------------------------------\n') log.close()
def diff_footprint(): params = get_args() # Load the current OpSim database runName = os.path.split(params['opSim_db_file'])[-1].replace('.db', '') opsim_db = maf.OpsimDatabase(params['opSim_db_file']) # Load the Galactic Plane Survey footprint map data map_data_table = load_map_data(params['map_file_path']) #mapName = os.path.basename(params['map_file_path'].replace('.fits','')) print('Total number of pixels in map: ' + str(len(map_data_table))) # Assign threshold numbers of visits: n_visits_thresholds = calcNVisitsThresholds() # Start logging, and loop metric over all science maps: logfile = open( os.path.join(output_dir, runName + '_footprint_metric_data.txt'), 'w') logfile.write( '# runName mapName tau_obs NVisits_threshold Npix_overlap %overlap Npix_missing %missing footprint_priority ideal_footprint_priority %ofPriority %ofNObsPriority\n' ) for column in map_data_table.columns: mapName = column.name # Extract the map data for the current science map: map_data = getattr(map_data_table, mapName) print('Calculating survey region overlap for ' + mapName) # Calculate the metrics for this science map bundleDict = calcNVisits(opsim_db, runName, mapName, diagnostics=False) print(bundleDict.keys()) #test_bundle(bundleDict, mapName, 'STEP 1:') # Calculate the Rubin visibility zone: rubin_visibility_zone = calc_rubin_visibility(bundleDict, runName) # Determine the HEALpix index of the desired science survey region, # taking the Rubin visbility zone into account: desired_healpix = calc_desired_survey_map(mapName, map_data, rubin_visibility_zone) #test_bundle(bundleDict, mapName, 'STEP 2:') # Loop over each cadence category: for i in range(0, len(tau_obs), 1): # Instantiate FiguresOfMerit object to hold results of analysis: FoM = FiguresOfMerit() # Calculate the overlap between the OpSim and the desired survey region FoM = calcFootprintOverlap(runName, mapName, tau_obs[i], desired_healpix, bundleDict, FoM) #test_bundle(bundleDict, mapName, 'STEP 3:') # Calculate the sum priority of all surveyed HEALpixels # as a percentage of the ideal values expected from the survey region use_metric = True if use_metric: FoM = eval_footprint_priority(map_data, runName, mapName, tau_obs[i], desired_healpix, bundleDict, FoM) #test_bundle(bundleDict, mapName, 'STEP 4:') # Calculate the sum of nObservations and priority of all surveyed HEALpixels # as a percentage of the ideal values expected from the survey region FoM = eval_footprint_nobs_priority(map_data, runName, column.name, tau_obs[i], desired_healpix, bundleDict, FoM) # Record to the log: logfile.write(runName+' '+mapName+' '+str(tau_obs[i])+' '+\ str(round(n_visits_thresholds[i],0))+' '+\ str(FoM.overlap_healpix)+' '+\ str(FoM.overlap_percent)+' '+\ str(FoM.missing_healpix)+' '+\ repr(FoM.missing_percent)+' '+\ repr(FoM.footprint_priority)+' '+\ repr(FoM.ideal_footprint_priority)+' '+\ repr(FoM.region_priority_percent)+' '+\ repr(FoM.nobs_priority_percent)+'\n') logfile.close()
# Check if user passed directory + filename as opsimDb. if len(os.path.dirname(args.opsimDb)) > 0: raise Exception( 'OpsimDB should be just the filename of the sqlite file (not %s). Use --dbDir.' % (args.opsimDb)) opsimName = args.opsimDb.replace('_sqlite.db', '') opsimName = opsimName.replace('.db', '') metadata = args.sqlConstraint.replace('=', '').replace( 'filter', '').replace("'", '').replace('"', '').replace('/', '.') if not args.skipComp: verbose = False # Get db connection info, and connect to database. dbfile = os.path.join(args.dbDir, args.opsimDb) oo = maf.OpsimDatabase(dbfile) sqlconstraint = args.sqlConstraint # Fetch the data from opsim. simdata = getData(oo, sqlconstraint) # Set up the time bins for the movie slicer. start_date = simdata['observationStartMJD'][0] if args.movieStepsize == 0: bins = simdata['observationStartMJD'] else: end_date = simdata['observationStartMJD'].max() bins = np.arange(start_date, end_date + args.movieStepSize / 2.0, args.movieStepSize, float) if args.addPreviousObs: # Go back and grab all the data, including all previous observations. if "night =" in sqlconstraint: sqlconstraint = sqlconstraint.replace("night =", "night <=")