def generate_greedy(nside): target_maps, norm_factor = generate_target_maps(nside) cloud_map = fs.generate_cloud_map(target_maps, filtername='i', wfd_cloud_max=0.7, scp_cloud_max=0.7, gp_cloud_max=0.7, nes_cloud_max=0.7) # filters = ['u', 'g', 'r', 'i', 'z', 'y'] filters = ['g', 'r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = list() bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_maps[filtername][0], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) bfs.append( fs.Aggressive_Slewtime_basis_function(filtername=filtername, nside=nside, order=6., hard_max=120.)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) bfs.append( fs.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=0., max_alt=76.)) bfs.append( fs.Bulk_cloud_basis_function(max_cloud_map=cloud_map, nside=nside)) bfs.append( fs.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) weights = np.array([3., 1., 3., 3., 0., 0, 0]) surveys.append( fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, tag_fields=True, tag_map=target_maps[filtername][1], tag_names=target_maps[filtername][2], ignore_obs='DD')) return surveys
def testBaseline(self): """ Set up a baseline survey and run for a few days. A crude way to touch lots of code. """ nside = fs.set_default_nside(nside=32) survey_length = 2.1 # days # Define what we want the final visit ratio map to look like target_map = fs.standard_goals(nside=nside) filters = ['u', 'g', 'r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = [] bfs.append(fs.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=hp.UNSEEN, nside=nside)) bfs.append(fs.North_south_patch_basis_function(zenith_min_alt=50., nside=nside)) bfs.append(fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) weights = np.array([3.0, 0.3, 1., 3., 3.]) surveys.append(fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside)) surveys.append(fs.Pairs_survey_scripted([], [], ignore_obs='DD')) # Set up the DD dd_surveys = fs.generate_dd_surveys() surveys.extend(dd_surveys) scheduler = fs.Core_scheduler(surveys, nside=nside) observatory = Speed_observatory(nside=nside) observatory, scheduler, observations = fs.sim_runner(observatory, scheduler, survey_length=survey_length, filename=None) # Check that a second part of a pair was taken assert('scripted' in observations['note']) # Check that the COSMOS DD was observed assert('DD:COSMOS' in observations['note']) # And the u-band assert('DD:u,COSMOS' in observations['note']) # Make sure a few different filters were observed assert(len(np.unique(observations['filter'])) > 3) # Make sure lots of observations executed assert(observations.size > 1000)
def testsched(self): target_map = fs.standard_goals()['r'] bfs = [] bfs.append(fs.M5_diff_basis_function()) bfs.append(fs.Target_map_basis_function(target_map=target_map)) weights = np.array([1., 1]) survey = fs.Simple_greedy_survey_fields(bfs, weights) scheduler = fs.Core_scheduler([survey]) observatory = Speed_observatory() # Check that we can update conditions scheduler.update_conditions(observatory.return_status()) # Check that we can get an observation out obs = scheduler.request_observation() assert (obs is not None) # Check that we can flush the Queue scheduler.flush_queue() assert (len(scheduler.queue) == 0) # Check that we can add an observation scheduler.add_observation(obs)
if __name__ == "__main__": nside = 64 mask = np.zeros(hp.nside2npix(nside), dtype=bool) ra, dec = utils.hpid2RaDec(nside, np.arange(mask.size)) mregion = np.where((dec > -35) & (dec < -10)) mask[mregion] = True for survey_length in [365.25, 365.25 * 2]: year = np.round(survey_length / 365.25) # Define what we want the final visit ratio map to look like target_map = fs.standard_goals()['r'] filtername = 'r' bfs = [] bfs.append(fs.M5_diff_basis_function(filtername=filtername)) bfs.append( fs.Target_map_basis_function(target_map=target_map, filtername=filtername, out_of_bounds_val=hp.UNSEEN)) bfs.append(fs.North_south_patch_basis_function(zenith_min_alt=50.)) bfs.append(fs.Slewtime_basis_function(filtername=filtername)) bfs.append( fs.Rolling_mask_basis_function(mask=mask, mjd_start=59580.035)) weights = np.array([1., 0.2, 1., 2., 1.]) survey = fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername, dither=True)
odd_year_target[filtername][top_half_wfd] *= down odd_year_target[filtername][bottom_half_wfd] *= up even_norm = fs.calc_norm_factor(even_year_target) odd_norm = fs.calc_norm_factor(odd_year_target) surveys = [] mod_year = 2 offset = 1 # Set up observations to be taken in blocks filter1s = ['u', 'g', 'r', 'i', 'z', 'y'] filter2s = [None, 'r', 'i', 'z', None, None] for filtername, filtername2 in zip(filter1s, filter2s): bfs = [] bfs.append(fs.M5_diff_basis_function(filtername=filtername, nside=nside)) if filtername2 is not None: bfs.append( fs.M5_diff_basis_function(filtername=filtername2, nside=nside)) bfs.append( Target_map_modulo_basis_function( filtername=filtername, target_map=even_year_target[filtername], mod_year=mod_year, offset=0, out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=even_norm)) if filtername2 is not None: bfs.append( Target_map_modulo_basis_function(
def generate_slair_scheduler(): nside = fs.set_default_nside(nside=32) # get rid of silly northern strip. target_map = fs.standard_goals(nside=nside) norm_factor = fs.calc_norm_factor(target_map) # List to hold all the surveys (for easy plotting later) surveys = [] # Set up observations to be taken in blocks filter1s = ['u', 'g', 'r', 'i', 'z', 'y'] filter2s = [None, 'g', 'r', 'i', None, None] pair_surveys = [] for filtername, filtername2 in zip(filter1s, filter2s): bfs = [] bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) if filtername2 is not None: bfs.append( fs.M5_diff_basis_function(filtername=filtername2, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) if filtername2 is not None: bfs.append( fs.Target_map_basis_function( filtername=filtername2, target_map=target_map[filtername2], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) bfs.append( fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) bfs.append( fs.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) weights = np.array([3.0, 3.0, .3, .3, 3., 3., 0.]) if filtername2 is None: # Need to scale weights up so filter balancing still works properly. weights = np.array([6.0, 0.6, 3., 3., 0.]) # XXX- # This is where we could add a look-ahead basis function to include m5_diff in the future. # Actually, having a near-future m5 would also help prevent switching to u or g right at twilight? # Maybe just need a "filter future" basis function? if filtername2 is None: survey_name = 'blob, %s' % filtername else: survey_name = 'blob, %s%s' % (filtername, filtername2) surveys.append( fs.Blob_survey(bfs, weights, filtername=filtername, filter2=filtername2, survey_note=survey_name)) pair_surveys.append(surveys[-1]) # Let's set up some standard surveys as well to fill in the gaps. This is my old silly masked version. # It would be good to put in Tiago's verion and lift nearly all the masking. That way this can also # chase sucker holes. filters = ['u', 'g', 'r', 'i', 'z', 'y'] #filters = ['i', 'z', 'y'] greedy_surveys = [] for filtername in filters: bfs = [] bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) bfs.append( fs.North_south_patch_basis_function(zenith_min_alt=50., nside=nside)) bfs.append( fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) bfs.append( fs.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) weights = np.array([3.0, 0.3, 1., 3., 3., 0.]) # Might want to try ignoring DD observations here, so the DD area gets covered normally--DONE surveys.append( fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD')) greedy_surveys.append(surveys[-1]) # Set up the DD surveys dd_surveys = fs.generate_dd_surveys() surveys.extend(dd_surveys) survey_list_o_lists = [dd_surveys, pair_surveys, greedy_surveys] # put in as list-of-lists so pairs get evaluated first. scheduler = fs.Core_scheduler(survey_list_o_lists, nside=nside) return scheduler
def generate_blob_surveys(nside): # Define what we want the final visit ratio map to look like target_maps, norm_factor = generate_target_maps(nside) # set up a cloud map cloud_map = target_maps['r'][0] * 0 + 0.7 # Set up observations to be taken in blocks filter1s = ['u', 'g', 'r', 'i', 'z', 'y'] filter2s = [None, 'g', 'r', 'i', None, None] pair_surveys = [] for filtername, filtername2 in zip(filter1s, filter2s): bfs = [] bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) if filtername2 is not None: bfs.append( fs.M5_diff_basis_function(filtername=filtername2, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_maps[filtername][0], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) if filtername2 is not None: bfs.append( fs.Target_map_basis_function( filtername=filtername2, target_map=target_maps[filtername2][0], out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) bfs.append( fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append( fs.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( fs.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append( fs.Bulk_cloud_basis_function(max_cloud_map=cloud_map, nside=nside)) weights = np.array([3.0, 3.0, .3, .3, 3., 3., 0., 0., 0.]) if filtername2 is None: # Need to scale weights up so filter balancing still works properly. weights = np.array([6.0, 0.6, 3., 3., 0., 0., 0.]) if filtername2 is None: survey_name = 'blob, %s' % filtername else: survey_name = 'blob, %s%s' % (filtername, filtername2) pair_surveys.append( fs.Blob_survey(bfs, weights, filtername=filtername, filter2=filtername2, survey_note=survey_name, ignore_obs='DD', tag_fields=True, tag_map=target_maps[filtername][1], tag_names=target_maps[filtername][2])) return pair_surveys
if __name__ == '__main__': nside = fs.set_default_nside(nside=32) survey_length = 365.25 * 10 # days # Define what we want the final visit ratio map to look like years = np.round(survey_length / 365.25) target_map = fs.standard_goals(nside=nside) filters = ['u', 'g', 'r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = [] bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=hp.UNSEEN, nside=nside)) bfs.append( fs.North_south_patch_basis_function(zenith_min_alt=50., nside=nside)) #bfs.append(fs.Zenith_mask_basis_function(maxAlt=78., penalty=-100, nside=nside)) bfs.append( fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) weights = np.array([3.0, 0.2, 1., 3., 3.])
import numpy as np import lsst.sims.featureScheduler as fs from lsst.sims.speedObservatory import Speed_observatory import healpy as hp if __name__ == "__main__": survey_length = 365.25 # days # Define what we want the final visit ratio map to look like target_map = fs.standard_goals()['r'] filtername = 'r' bfs = [] bfs.append(fs.M5_diff_basis_function(filtername=filtername, teff=False)) bfs.append( fs.Target_map_basis_function(target_map=target_map, filtername=filtername, out_of_bounds_val=hp.UNSEEN)) bfs.append(fs.North_south_patch_basis_function(zenith_min_alt=50.)) bfs.append(fs.Slewtime_basis_function(filtername=filtername)) weights = np.array([1., 0.2, 1., 2.]) surveys = [] surveys.append( fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername)) surveys.append(fs.Pairs_survey_scripted([], [])) scheduler = fs.Core_scheduler(surveys)
def year_1_surveys(nside=32, mjd0=None): """ Generate a list of surveys for executing in year 1 """ nside = nside filters = ['u', 'g', 'r', 'i', 'z', 'y'] target_map = large_target_map(nside, dec_max=34.3) norm_factor = fs.calc_norm_factor({'r': target_map}) # set up a cloud map cloud_map = target_map * 0 + 0.7 # Set up map m5-depth limits: m5_limits = {} percentile_cut = 0.7 m52per = sb.M5percentiles() for filtername in filters: m5_limits[filtername] = m52per.percentile2m5map(percentile_cut, filtername=filtername, nside=nside) surveys = [] for filtername in filters: bfs = [] bfs.append( fs.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( fs.Target_map_basis_function(filtername=filtername, target_map=target_map, out_of_bounds_val=hp.UNSEEN, nside=nside, norm_factor=norm_factor)) bfs.append( fs.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(fs.Strict_filter_basis_function(filtername=filtername)) bfs.append( fs.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=0., max_alt=76.)) bfs.append( fs.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append( fs.Bulk_cloud_basis_function(max_cloud_map=cloud_map, nside=nside)) weights = [3.0, 0.3, 3., 3., 0, 0., 0.] # add in some constriants to make sure we only observe in good conditions and shut off after 3 good ones bfs.append( Limit_m5_map_basis_function(m5_limits[filtername], nside=nside, filtername=filtername)) bfs.append( Seeing_limit_basis_function(nside=nside, filtername=filtername)) bfs.append(Time_limit_basis_function(day_max=365.25)) # XXX--Do I need a m5-depth limit on here too? bfs.append( Nvis_limit_basis_function(nside=nside, filtername=filtername, n_limit=3, seeing_limit=1.2, time_lag=0.45, m5_limit_map=m5_limits[filtername])) weights.extend([0, 0, 0, 0]) #weights.extend([0., 0., 0.]) weights = np.array(weights) # Might want to try ignoring DD observations here, so the DD area gets covered normally--DONE surveys.append( fs.Greedy_survey_fields(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD', survey_name='templates')) # Do we want to cover all the potential area LSST could observe? In case a GW goes off # in the north not in the NES. return surveys