def add_mag_clouds(inmap=None, nside=32): if inmap is None: result = standard_goals(nside=nside) else: result = inmap ra, dec = utils.ra_dec_hp_map(nside=nside) lmc_ra = np.radians(80.893860) lmc_dec = np.radians(-69.756126) lmc_radius = np.radians(10.) smc_ra = np.radians(13.186588) smc_dec = np.radians(-72.828599) smc_radius = np.radians(5.) dist_to_lmc = _angularSeparation(lmc_ra, lmc_dec, ra, dec) lmc_pix = np.where(dist_to_lmc < lmc_radius) dist_to_smc = _angularSeparation(smc_ra, lmc_dec, ra, dec) smc_pix = np.where(dist_to_smc < smc_radius) for key in result: result[key][lmc_pix] = np.max(result[key]) result[key][smc_pix] = np.max(result[key]) return result
def gen_greedy_surveys(nside): """ Make a quick set of greedy surveys """ target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) # Let's remove the bluer filters since this should only be near twilight filters = ['r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = [] bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Clouded_out_basis_function()) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) surveys.append(Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD')) return surveys
def generate_third(nside, nexp=1, min_area=100.): """A survey to take a third observation in a night """ # OK, I want to see if there have been a successful pair taken in the night target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) surveys = [] to_pair = ['gi', 'rz'] for pairs in to_pair: for filtername in pairs: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[pairs[0]], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append( bf.Third_observation_basis_function(filtername1=pairs[0], filtername2=pairs[1], gap_min=40., gap_max=120.)) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=30.)) bfs.append(bf.Clouded_out_basis_function()) filternames = [fn for fn in pairs] bfs.append( bf.Filter_loaded_basis_function(filternames=filternames)) bfs.append(bf.Not_twilight_basis_function()) weights = [1., 1., 1., 0, 0, 0, 0, 0] survey = Blob_survey(bfs, weights, filtername1=pairs[0], filtername2=None, ignore_obs='DD', nexp=nexp, nside=nside, min_area=min_area, survey_note='third, %s' % pairs[0]) surveys.append(survey) return surveys
def testsched(self): target_map = standard_goals()['r'] bfs = [] bfs.append(basis_functions.M5_diff_basis_function()) bfs.append( basis_functions.Target_map_basis_function(target_map=target_map)) weights = np.array([1., 1]) survey = surveys.Greedy_survey(bfs, weights) scheduler = Core_scheduler([survey]) observatory = Model_observatory() # Check that we can update conditions scheduler.update_conditions(observatory.return_conditions()) # Check that we can get an observation out obs = scheduler.request_observation() assert (obs is not None) # Check that we can flush the Queue scheduler.flush_queue() assert (len(scheduler.queue) == 0) # Check that we can add an observation scheduler.add_observation(obs)
def blob_comcam(nexp=1, nside=256, filters=['g', 'r', 'i']): target_map = standard_goals(nside=nside) ra, dec = ra_dec_hp_map(nside=nside) # out_region = np.where((dec > np.radians(-40)) | (dec < np.radians(-50.))) in_region = np.where((dec <= np.radians(-40.)) & (dec >= np.radians(-50.))) for key in target_map: target_map[key] *= 0. target_map[key][in_region] = 1. final_tm = {} for key in filters: final_tm[key] = target_map[key] target_map = final_tm norm_factor = calc_norm_factor(target_map) survey_list = [] time_needed = 23. for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Clouded_out_basis_function()) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) bfs.append(bf.Time_to_twilight_basis_function(time_needed=time_needed)) bfs.append(bf.Not_twilight_basis_function()) weights = np.array([3.0, 0.3, 6., 3., 0., 0., 0., 0, 0, 0]) # XXX-Note, need a new detailer here!, have to have dither=False until that can get passed through sv = surveys.Blob_survey( bfs, weights, filtername1=filtername, filtername2=None, dither=False, nside=nside, ignore_obs='DD', nexp=nexp, camera='comcam', detailers=[fs.detailers.Comcam_90rot_detailer()]) survey_list.append(sv) return survey_list
def gen_greedy_surveys(nside, nexp=1, target_maps=None, mod_year=None, day_offset=None, norm_factor=None, max_season=10): """ Make a quick set of greedy surveys """ target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) # Let's remove the bluer filters since this should only be near twilight filters = ['r', 'i', 'z', 'y'] surveys = [] detailer = detailers.Camera_rot_detailer(min_rot=-87., max_rot=87.) for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) target_list = [tm[filtername] for tm in target_maps] bfs.append( bf.Target_map_modulo_basis_function(filtername=filtername, target_maps=target_list, season_modulo=mod_year, day_offset=day_offset, out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor, max_season=max_season)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) bfs.append(bf.Planet_mask_basis_function(nside=nside)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) surveys.append( Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD', nexp=nexp, detailers=[detailer])) return surveys
def stuck_rolling(nside=32, scale_down_factor=0.2): """A bit of a trolling footprint. See what happens if we use a rolling footprint, but don't roll it. """ sg = standard_goals() footprints = slice_wfd_area(2, sg, scale_down_factor=scale_down_factor) # Only take the first set footprints = footprints[0] return footprints
def ss_footprints(nside=32, dist_to_eclip=25.): fp = standard_goals(nside=nside) et = ecliptic_target(nside=nside, dist_to_eclip=dist_to_eclip) ra, dec = _hpid2RaDec(nside, np.arange(hp.nside2npix(nside))) bridge = np.where((et > 0) & (dec < 0) & (fp['r'] != 1)) for key in fp: fp[key][bridge] = np.max(fp[key]) return fp
def simple_footprint(nside=32): """Make a WFD-only footprint """ sg = standard_goals(nside=nside) non_wfd_pix = np.where(sg['r'] != 1) for key in sg: sg[key][non_wfd_pix] = 0 return sg
def add_mag_clouds(inmap=None, nside=32): if inmap is None: result = standard_goals(nside=nside) else: result = inmap mag_clouds_hpix = utils.magellanic_clouds_healpixels(nside) for key in result: result[key][np.where(mag_clouds_hpix == 1)[0]] = np.max(result[key]) return result
def make_rolling_footprints(mjd_start=59853.5, sun_RA_start=3.27717639, nslice=2, scale=0.8): hp_footprints = standard_goals() down = 1. - scale up = nslice - down * (nslice - 1) start = [1., 1., 1.] end = [1., 1., 1., 1., 1., 1.] if nslice == 2: rolling = [up, down, up, down, up, down] elif nslice == 3: rolling = [up, down, down, up, down, down] elif nslice == 6: rolling = [up, down, down, down, down, down] all_slopes = [ start + np.roll(rolling, i).tolist() + end for i in range(nslice) ] fp_non_wfd = Footprint(mjd_start, sun_RA_start=sun_RA_start) rolling_footprints = [] for i in range(nslice): step_func = Step_slopes(rise=all_slopes[i]) rolling_footprints.append( Footprint(mjd_start, sun_RA_start=sun_RA_start, step_func=step_func)) wfd_indx = np.where(hp_footprints['r'] == 1)[0] non_wfd_indx = np.where(hp_footprints['r'] != 1)[0] wfd = hp_footprints['r'] * 0 wfd[wfd_indx] = 1 wfd_accum = np.cumsum(wfd) split_wfd_indices = np.floor( np.max(wfd_accum) / nslice * (np.arange(nslice) + 1)).astype(int) split_wfd_indices = split_wfd_indices.tolist() split_wfd_indices = [0] + split_wfd_indices for key in hp_footprints: temp = hp_footprints[key] + 0 temp[wfd_indx] = 0 fp_non_wfd.set_footprint(key, temp) for i, spi in enumerate(split_wfd_indices[0:-1]): temp = hp_footprints[key] + 0 temp[non_wfd_indx] = 0 indx = wfd_indx[split_wfd_indices[i]:split_wfd_indices[i + 1]] temp[indx] = 0 rolling_footprints[i].set_footprint(key, temp) result = Footprints([fp_non_wfd] + rolling_footprints) return result
def generate_blobs(nside): target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) # List to hold all the surveys (for easy plotting later) surveys = [] # Set up observations to be taken in blocks filter1s = ['u', 'g', 'r', 'i', 'z', 'y'] filter2s = [None, 'g', 'r', 'i', None, None] # Ideal time between taking pairs pair_time = 22. times_needed = [pair_time, pair_time*2] for filtername, filtername2 in zip(filter1s, filter2s): bfs = [] bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside)) if filtername2 is not None: bfs.append(bf.M5_diff_basis_function(filtername=filtername2, nside=nside)) bfs.append(bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) if filtername2 is not None: bfs.append(bf.Target_map_basis_function(filtername=filtername2, target_map=target_map[filtername2], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=30.)) bfs.append(bf.Clouded_out_basis_function()) filternames = [fn for fn in [filtername, filtername2] if fn is not None] bfs.append(bf.Filter_loaded_basis_function(filternames=filternames)) if filtername2 is None: time_needed = times_needed[0] else: time_needed = times_needed[1] bfs.append(bf.Time_to_twilight_basis_function(time_needed=time_needed)) bfs.append(bf.Not_twilight_basis_function()) weights = np.array([3.0, 3.0, .3, .3, 3., 3., 0., 0., 0., 0., 0., 0.]) if filtername2 is None: # Need to scale weights up so filter balancing still works properly. weights = np.array([6.0, 0.6, 3., 3., 0., 0., 0., 0., 0., 0.]) if filtername2 is None: survey_name = 'blob, %s' % filtername else: survey_name = 'blob, %s%s' % (filtername, filtername2) surveys.append(Blob_survey(bfs, weights, filtername1=filtername, filtername2=filtername2, ideal_pair_time=pair_time, nside=nside, survey_note=survey_name, ignore_obs='DD', dither=True)) return surveys
def generate_high_am(nside, nexp=1, n_high_am=2, template_weight=6.): """Let's set this up like the blob, but then give it a little extra weight. """ target_map = standard_goals(nside=nside)['r'] target_map[np.where(target_map > 0)] = 1. filters = ['u', 'g'] surveys = [] survey_name = 'high_am' blob_time = 22. # set to something for filtername in filters: detailer_list = [] detailer_list.append( detailers.Camera_rot_detailer(min_rot=-87., max_rot=87.)) detailer_list.append(detailers.Close_alt_detailer()) bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) bfs.append( bf.N_obs_high_am_basis_function(nside=nside, footprint=target_map, filtername=filtername, n_obs=n_high_am, season=300., out_of_bounds_val=np.nan)) bfs.append(bf.Constant_basis_function()) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=30.)) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) bfs.append(bf.Time_to_twilight_basis_function(time_needed=blob_time)) bfs.append(bf.Not_twilight_basis_function()) bfs.append(bf.Planet_mask_basis_function(nside=nside)) weights = np.array( [6., 0.6, 3., template_weight * 2, 1., 0., 0., 0., 0., 0., 0.]) surveys.append( Blob_survey(bfs, weights, filtername1=filtername, filtername2=None, ideal_pair_time=blob_time, nside=nside, survey_note=survey_name, ignore_obs='DD', dither=True, nexp=nexp, detailers=detailer_list)) return surveys
def gen_greedy_surveys(nside, nexp=1): """ Make a quick set of greedy surveys """ target_map = standard_goals(nside=nside) # Let's just set it to 1 in all filters for key in target_map: target_map[key][np.where(target_map[key] > 0)] = 1 norm_factor = calc_norm_factor(target_map) filters = ['u', 'g', 'r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Clouded_out_basis_function()) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) surveys.append( Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs=['DD', 'blob'], nexp=nexp, exptime=1., survey_name='twilight')) return surveys
def make_rolling_footprints(mjd_start=59853.5, sun_RA_start=3.27717639, nslice=2, scale=0.8, nside=32): hp_footprints = standard_goals(nside=nside) down = 1. - scale up = nslice - down * (nslice - 1) start = [1., 1., 1.] end = [1., 1., 1., 1., 1., 1.] if nslice == 2: rolling = [up, down, up, down, up, down] elif nslice == 3: rolling = [up, down, down, up, down, down] elif nslice == 6: rolling = [up, down, down, down, down, down] all_slopes = [ start + np.roll(rolling, i).tolist() + end for i in range(nslice) ] fp_non_wfd = Footprint(mjd_start, sun_RA_start=sun_RA_start) rolling_footprints = [] for i in range(nslice): step_func = Step_slopes(rise=all_slopes[i]) rolling_footprints.append( Footprint(mjd_start, sun_RA_start=sun_RA_start, step_func=step_func)) split_wfd_indices = slice_wfd_area_quad(hp_footprints, nslice=nslice) wfd = hp_footprints['r'] * 0 wfd_indx = np.where(hp_footprints['r'] == 1)[0] non_wfd_indx = np.where(hp_footprints['r'] != 1)[0] wfd[wfd_indx] = 1 roll = np.zeros(nslice) roll[-1] = 1 for key in hp_footprints: temp = hp_footprints[key] + 0 temp[wfd_indx] = 0 fp_non_wfd.set_footprint(key, temp) for i in range(nslice): temp = hp_footprints[key] + 0 temp[non_wfd_indx] = 0 for j in range(nslice * 2): indx = wfd_indx[split_wfd_indices[j]:split_wfd_indices[j + 1]] temp[indx] = temp[indx] * roll[(i + j) % nslice] rolling_footprints[i].set_footprint(key, temp) result = Footprints([fp_non_wfd] + rolling_footprints) return result
def wfd_half(target_map=None): """return Two maps that split the WFD in two dec bands """ if target_map is None: sg = standard_goals() target_map = sg['r'] + 0 wfd_pix = np.where(target_map == 1)[0] wfd_map = target_map * 0 wfd_map[wfd_pix] = 1 wfd_halves = slice_wfd_area(2, {'r': wfd_map}, scale_down_factor=0) result = [-wfd_halves[0]['r'], -wfd_halves[1]['r']] return result
def gen_greedy_surveys(nside, add_DD=True): """ Make a quick set of greedy surveys """ target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) filters = ['u', 'g', 'r', 'i', 'z', 'y'] surveys = [] for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Clouded_out_basis_function()) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) surveys.append( Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD')) surveys.append(Pairs_survey_scripted(None, ignore_obs='DD')) if add_DD: dd_surveys = generate_dd_surveys(nside=nside) surveys.extend(dd_surveys) return surveys
def greedy_footprint(nside=32, exclude_lat=17.): """Make a footprint just for the greedy algo """ base_footprints = standard_goals(nside=nside) ra, dec = ra_dec_hp_map(nside=nside) coord = SkyCoord(ra=ra * u.rad, dec=dec * u.rad, frame='icrs') eclip_lat = coord.barycentrictrueecliptic.lat.deg mask = np.where(np.abs(eclip_lat) <= exclude_lat) for key in base_footprints: base_footprints[key][mask] = 0 return base_footprints
def generate_high_am(nside, nexp=1, n_high_am=2, hair_weight=6., pair_time=22., camera_rot_limits=[-80., 80.], season=300., season_start_hour=-4., season_end_hour=2., shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD', m5_weight=6., footprint_weight=0.6, slewtime_weight=3., stayfilter_weight=3., template_weight=12., const_weight=1, min_area=288., mask_east=True, mask_west=False, survey_name='high_am', filters='ug'): """Let's set this up like the blob, but then give it a little extra weight. """ target_maps = standard_goals(nside=nside) surveys = [] for filtername in filters: survey_name_final = survey_name+', %s' % filtername target_map = target_maps[filtername]*0 target_map[np.where(target_maps[filtername] == np.max(target_maps[filtername]))] = 1. detailer_list = [] detailer_list.append(detailers.Spider_rot_detailer()) detailer_list.append(detailers.Close_alt_detailer()) bfs = [] bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight)) bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight)) bfs.append((bf.Strict_filter_basis_function(filtername=filtername), slewtime_weight)) bfs.append((bf.N_obs_high_am_basis_function(nside=nside, footprint=target_map, filtername=filtername, n_obs=n_high_am, season=season, out_of_bounds_val=np.nan), hair_weight)) bfs.append((bf.Constant_basis_function(), const_weight)) # Masks, give these 0 weight bfs.append((bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=shadow_minutes, max_alt=max_alt, penalty=np.nan, site='LSST'), 0.)) bfs.append((bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0.)) bfs.append((bf.Filter_loaded_basis_function(filternames=filtername), 0)) bfs.append((bf.Time_to_twilight_basis_function(time_needed=pair_time), 0.)) bfs.append((bf.Not_twilight_basis_function(), 0.)) bfs.append((bf.Planet_mask_basis_function(nside=nside), 0.)) if mask_east: bfs.append((bf.Mask_azimuth_basis_function(az_min=0., az_max=180.), 0.)) if mask_west: bfs.append((bf.Mask_azimuth_basis_function(az_min=180., az_max=360.), 0.)) weights = [val[1] for val in bfs] basis_functions = [val[0] for val in bfs] surveys.append(Blob_survey(basis_functions, weights, filtername1=filtername, filtername2=None, ideal_pair_time=pair_time, nside=nside, survey_note=survey_name_final, ignore_obs=ignore_obs, dither=True, nexp=nexp, detailers=detailer_list, min_area=min_area)) return surveys
def generate_greedy_sched(nexp=1, nside=32, filters=['r']): # Generate a target map target_map = standard_goals(nside=nside) norm_factor = calc_norm_factor(target_map) greedy_surveys = [] for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside, norm_factor=norm_factor)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append(bf.Clouded_out_basis_function()) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) sv = surveys.Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside, ignore_obs='DD', nexp=nexp) greedy_surveys.append(sv) survey_list = greedy_surveys return survey_list
def make_scheduler(scale_down=False, max_dither=0.7, nexp=1): nside = 32 per_night = True # Dither DDF per night camera_ddf_rot_limit = 75. extra_info = {} exec_command = '' for arg in sys.argv: exec_command += ' ' + arg extra_info['exec command'] = exec_command try: extra_info['git hash'] = subprocess.check_output( ['git', 'rev-parse', 'HEAD']) except subprocess.CalledProcessError: extra_info['git hash'] = 'Not in git repo' extra_info['file executed'] = os.path.realpath(__file__) fileroot = 'baseline_nexp%i_' % nexp file_end = 'v1.6_' if scale_down: footprints = nes_light_footprints(nside=nside) fileroot = fileroot + 'scaleddown_' else: footprints = standard_goals(nside=nside) # Set up the DDF surveys to dither dither_detailer = detailers.Dither_detailer(per_night=per_night, max_dither=max_dither) details = [ detailers.Camera_rot_detailer(min_rot=-camera_ddf_rot_limit, max_rot=camera_ddf_rot_limit), dither_detailer ] ddfs = generate_dd_surveys(nside=nside, nexp=nexp, detailers=details) greedy = gen_greedy_surveys(nside, nexp=nexp, footprints=footprints) blobs = generate_blobs(nside, nexp=nexp, footprints=footprints) surveys = [ddfs, blobs, greedy] scheduler = Core_scheduler(surveys, nside=nside) return scheduler
def gen_greedy_surveys(nside): """ Make a quick set of greedy surveys """ target_map = standard_goals(nside=nside) filters = ['g', 'r', 'i', 'z', 'y'] surveys = [] cloud_map = target_map['r'][0] * 0 + 0.7 for filtername in filters: bfs = [] bfs.append( bf.M5_diff_basis_function(filtername=filtername, nside=nside)) bfs.append( bf.Target_map_basis_function(filtername=filtername, target_map=target_map[filtername], out_of_bounds_val=np.nan, nside=nside)) bfs.append( bf.Slewtime_basis_function(filtername=filtername, nside=nside)) bfs.append(bf.Strict_filter_basis_function(filtername=filtername)) # Masks, give these 0 weight bfs.append( bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.)) bfs.append( bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.)) bfs.append( bf.Bulk_cloud_basis_function(max_cloud_map=cloud_map, nside=nside)) bfs.append(bf.Filter_loaded_basis_function(filternames=filtername)) weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.]) surveys.append( Greedy_survey(bfs, weights, block_size=1, filtername=filtername, dither=True, nside=nside)) return surveys
def bulge_footprint(nside=32, bulge_frac=1., ll_frac=1., i_heavy=False): sg = standard_goals(nside=nside) wfd_north = np.radians(12.4) wfd_south = np.radians(-72.25) ra, dec = _hpid2RaDec(nside, np.arange(hp.nside2npix(nside))) wfd_pix = np.where(sg['r'] == 1) # Zero out the bulge as it is now #bulge_pix = np.where((sg['r'] == 0.15) & (dec > wfd_south)) #for key in sg: # sg[key][bulge_pix] = 0 coord = SkyCoord(ra=ra * u.rad, dec=dec * u.rad) g_long, g_lat = coord.galactic.l.deg, coord.galactic.b.deg bulge_pix = np.where((g_long > -20) & (g_long < 20.) & (g_lat > -10) & (g_lat < 10.)) lo_lat_pix = np.where((np.abs(g_lat) < 10.) & (dec < wfd_north)) # scale things as desired for key in sg: # sg[key][lo_lat_pix] = ll_frac*np.max(sg[key][wfd_pix]) sg[key][bulge_pix] = bulge_frac * np.max(sg[key][wfd_pix]) if i_heavy: i_val = np.max(sg['i'][bulge_pix]) total_val = 0 for key in sg: total_val += np.max(sg[key][bulge_pix]) not_i = total_val - i_val scale_down = (not_i - i_val) / (not_i) for key in sg: if key == 'i': sg[key][bulge_pix] = 2. * sg[key][bulge_pix] else: sg[key][bulge_pix] = scale_down * sg[key][bulge_pix] return sg
def wfd_scale(scale=.95, nside=32): """ Let's scale the wfd region up to be 95% of the survey """ sg = standard_goals(nside=nside) wfd_pix = np.where(sg['r'] == 1) other_pix = np.where((sg['r'] > 0) & (sg['r'] != 1)) all_weight = 0 wfd_weight = 0 for key in sg: all_weight += np.sum(sg[key]) wfd_weight += np.sum(sg[key][wfd_pix]) other_weight = all_weight - wfd_weight scale_other = (1. - scale) * all_weight / other_weight for key in sg: sg[key][other_pix] = sg[key][other_pix] * scale_other return sg
try: extra_info['git hash'] = subprocess.check_output( ['git', 'rev-parse', 'HEAD']) except subprocess.CalledProcessError: extra_info['git hash'] = 'Not in git repo' extra_info['file executed'] = os.path.realpath(__file__) fileroot = 'weather_%.1f_' % cloud_limit file_end = 'v1.6_' if scale_down: footprints_hp = nes_light_footprints(nside=nside) fileroot = fileroot + 'scaleddown_' else: footprints_hp = standard_goals(nside=nside) observatory = Model_observatory(nside=nside) conditions = observatory.return_conditions() footprints = Footprint(conditions.mjd_start, sun_RA_start=conditions.sun_RA_start, nside=nside) for i, key in enumerate(footprints_hp): footprints.footprints[i, :] = footprints_hp[key] # Set up the DDF surveys to dither dither_detailer = detailers.Dither_detailer(per_night=per_night, max_dither=max_dither) details = [ detailers.Camera_rot_detailer(min_rot=-camera_ddf_rot_limit, max_rot=camera_ddf_rot_limit),
def gen_greedy_surveys(nside=32, nexp=1, exptime=30., filters=['r', 'i', 'z', 'y'], camera_rot_limits=[-80., 80.], shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD', m5_weight=3., footprint_weight=0.3, slewtime_weight=3., stayfilter_weight=3.): """ Make a quick set of greedy surveys This is a convienence function to generate a list of survey objects that can be used with lsst.sims.featureScheduler.schedulers.Core_scheduler. To ensure we are robust against changes in the sims_featureScheduler codebase, all kwargs are explicitly set. Parameters ---------- nside : int (32) The HEALpix nside to use nexp : int (1) The number of exposures to use in a visit. exptime : float (30.) The exposure time to use per visit (seconds) filters : list of str (['r', 'i', 'z', 'y']) Which filters to generate surveys for. camera_rot_limits : list of float ([-80., 80.]) The limits to impose when rotationally dithering the camera (degrees). shadow_minutes : float (60.) Used to mask regions around zenith (minutes) max_alt : float (76. The maximium altitude to use when masking zenith (degrees) moon_distance : float (30.) The mask radius to apply around the moon (degrees) ignore_obs : str or list of str ('DD') Ignore observations by surveys that include the given substring(s). m5_weight : float (3.) The weight for the 5-sigma depth difference basis function footprint_weight : float (0.3) The weight on the survey footprint basis function. slewtime_weight : float (3.) The weight on the slewtime basis function stayfilter_weight : float (3.) The weight on basis function that tries to stay avoid filter changes. """ # Define the extra parameters that are used in the greedy survey. I # think these are fairly set, so no need to promote to utility func kwargs greed_survey_params = { 'block_size': 1, 'smoothing_kernel': None, 'seed': 42, 'camera': 'LSST', 'dither': True, 'survey_name': 'greedy' } footprints = standard_goals(nside=nside) sum_footprints = 0 for key in footprints: sum_footprints += np.sum(footprints[key]) surveys = [] detailer = detailers.Spider_rot_detailer() for filtername in filters: bfs = [] bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight)) bfs.append( (bf.Footprint_basis_function(filtername=filtername, footprint=footprints[filtername], out_of_bounds_val=np.nan, nside=nside, all_footprints_sum=sum_footprints), footprint_weight)) bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight)) bfs.append((bf.Strict_filter_basis_function(filtername=filtername), stayfilter_weight)) # Masks, give these 0 weight bfs.append((bf.Zenith_shadow_mask_basis_function( nside=nside, shadow_minutes=shadow_minutes, max_alt=max_alt), 0)) bfs.append( (bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0)) bfs.append( (bf.Filter_loaded_basis_function(filternames=filtername), 0)) bfs.append((bf.Planet_mask_basis_function(nside=nside), 0)) weights = [val[1] for val in bfs] basis_functions = [val[0] for val in bfs] surveys.append( Greedy_survey(basis_functions, weights, exptime=exptime, filtername=filtername, nside=nside, ignore_obs=ignore_obs, nexp=nexp, detailers=[detailer], **greed_survey_params)) return surveys
def generate_blobs(nside, nexp=1, exptime=30., filter1s=['u', 'u', 'u', 'g', 'r', 'i', 'z', 'y'], filter2s=['u', 'g', 'r', 'r', 'i', 'z', 'y', 'y'], pair_time=22., camera_rot_limits=[-80., 80.], n_obs_template=3, season=300., season_start_hour=-4., season_end_hour=2., shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD', m5_weight=6., footprint_weight=0.6, slewtime_weight=3., stayfilter_weight=3., template_weight=12.): """ Generate surveys that take observations in blobs. Parameters ---------- nside : int (32) The HEALpix nside to use nexp : int (1) The number of exposures to use in a visit. exptime : float (30.) The exposure time to use per visit (seconds) filter1s : list of str The filternames for the first set filter2s : list of str The filter names for the second in the pair (None if unpaired) pair_time : float (22) The ideal time between pairs (minutes) camera_rot_limits : list of float ([-80., 80.]) The limits to impose when rotationally dithering the camera (degrees). n_obs_template : int (3) The number of observations to take every season in each filter season : float (300) The length of season (i.e., how long before templates expire) (days) season_start_hour : float (-4.) For weighting how strongly a template image needs to be observed (hours) sesason_end_hour : float (2.) For weighting how strongly a template image needs to be observed (hours) shadow_minutes : float (60.) Used to mask regions around zenith (minutes) max_alt : float (76. The maximium altitude to use when masking zenith (degrees) moon_distance : float (30.) The mask radius to apply around the moon (degrees) ignore_obs : str or list of str ('DD') Ignore observations by surveys that include the given substring(s). m5_weight : float (3.) The weight for the 5-sigma depth difference basis function footprint_weight : float (0.3) The weight on the survey footprint basis function. slewtime_weight : float (3.) The weight on the slewtime basis function stayfilter_weight : float (3.) The weight on basis function that tries to stay avoid filter changes. template_weight : float (12.) The weight to place on getting image templates every season """ blob_survey_params = { 'slew_approx': 7.5, 'filter_change_approx': 140., 'read_approx': 2., 'min_pair_time': 15., 'search_radius': 30., 'alt_max': 85., 'az_range': 90., 'flush_time': 30., 'smoothing_kernel': None, 'nside': nside, 'seed': 42, 'dither': True, 'twilight_scale': True } footprints = standard_goals(nside=nside) sum_footprints = 0 for key in footprints: sum_footprints += np.sum(footprints[key]) surveys = [] times_needed = [pair_time, pair_time * 2] for filtername, filtername2 in zip(filter1s, filter2s): detailer_list = [] detailer_list.append(detailers.Spider_rot_detailer()) detailer_list.append(detailers.Close_alt_detailer()) # List to hold tuples of (basis_function_object, weight) bfs = [] if filtername2 is not None: bfs.append( (bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight / 2.)) bfs.append( (bf.M5_diff_basis_function(filtername=filtername2, nside=nside), m5_weight / 2.)) else: bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight)) if filtername2 is not None: bfs.append((bf.Footprint_basis_function( filtername=filtername, footprint=footprints[filtername], out_of_bounds_val=np.nan, nside=nside, all_footprints_sum=sum_footprints), footprint_weight / 2.)) bfs.append((bf.Footprint_basis_function( filtername=filtername2, footprint=footprints[filtername2], out_of_bounds_val=np.nan, nside=nside, all_footprints_sum=sum_footprints), footprint_weight / 2.)) else: bfs.append((bf.Footprint_basis_function( filtername=filtername, footprint=footprints[filtername], out_of_bounds_val=np.nan, nside=nside, all_footprints_sum=sum_footprints), footprint_weight)) bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight)) bfs.append((bf.Strict_filter_basis_function(filtername=filtername), stayfilter_weight)) if filtername2 is not None: bfs.append((bf.N_obs_per_year_basis_function( filtername=filtername, nside=nside, footprint=footprints[filtername], n_obs=n_obs_template, season=season, season_start_hour=season_start_hour, season_end_hour=season_end_hour), template_weight / 2.)) bfs.append((bf.N_obs_per_year_basis_function( filtername=filtername2, nside=nside, footprint=footprints[filtername2], n_obs=n_obs_template, season=season, season_start_hour=season_start_hour, season_end_hour=season_end_hour), template_weight / 2.)) else: bfs.append((bf.N_obs_per_year_basis_function( filtername=filtername, nside=nside, footprint=footprints[filtername], n_obs=n_obs_template, season=season, season_start_hour=season_start_hour, season_end_hour=season_end_hour), template_weight)) # Masks, give these 0 weight bfs.append((bf.Zenith_shadow_mask_basis_function( nside=nside, shadow_minutes=shadow_minutes, max_alt=max_alt, penalty=np.nan, site='LSST'), 0.)) bfs.append( (bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0.)) filternames = [ fn for fn in [filtername, filtername2] if fn is not None ] bfs.append( (bf.Filter_loaded_basis_function(filternames=filternames), 0)) if filtername2 is None: time_needed = times_needed[0] else: time_needed = times_needed[1] bfs.append( (bf.Time_to_twilight_basis_function(time_needed=time_needed), 0.)) bfs.append((bf.Not_twilight_basis_function(), 0.)) bfs.append((bf.Planet_mask_basis_function(nside=nside), 0.)) # unpack the basis functions and weights weights = [val[1] for val in bfs] basis_functions = [val[0] for val in bfs] if filtername2 is None: survey_name = 'blob, %s' % filtername else: survey_name = 'blob, %s%s' % (filtername, filtername2) if filtername2 is not None: detailer_list.append( detailers.Take_as_pairs_detailer(filtername=filtername2)) surveys.append( Blob_survey(basis_functions, weights, filtername1=filtername, filtername2=filtername2, exptime=exptime, ideal_pair_time=pair_time, survey_note=survey_name, ignore_obs=ignore_obs, nexp=nexp, detailers=detailer_list, **blob_survey_params)) return surveys
nside = 32 extra_info = {} exec_command = '' for arg in sys.argv: exec_command += ' ' + arg extra_info['exec command'] = exec_command extra_info['git hash'] = subprocess.check_output( ['git', 'rev-parse', 'HEAD']) extra_info['file executed'] = os.path.realpath(__file__) # Just load up all the potential footprints, then use the arg to grab the right one target_maps = {} target_maps['big_sky'] = big_sky(nside=nside) target_maps['gp_heavy'] = gp_smooth(nside=nside) target_maps['baseline'] = standard_goals(nside=nside) target_maps['big_sky_nouiy'] = big_sky_nouiy(nside=nside) target_maps['newA'] = newA(nside=nside) target_maps['newB'] = newB(nside=nside) target_maps['bluer_footprint'] = bluer_footprint(nside=nside) target_maps['stuck_rolling'] = stuck_rolling(nside=nside) target_maps['big_sky_dust'] = big_sky_dust(nside=nside) greedy = gen_greedy_surveys(nside, nexp=nexp, target_map=target_maps[target_name]) ddfs = generate_dd_surveys(nside=nside, nexp=nexp) blobs = generate_blobs(nside, nexp=nexp, mixed_pairs=True, target_map=target_maps[target_name])
max_dither=max_dither) details = [ detailers.Camera_rot_detailer(min_rot=-camera_ddf_rot_limit, max_rot=camera_ddf_rot_limit), dither_detailer ] ddfs = generate_dd_surveys(nside=nside, nexp=nexp, detailers=details) observatory = Model_observatory(nside=nside) conditions = observatory.return_conditions() # Mark position of the sun at the start of the survey. Usefull for rolling cadence. sun_ra_0 = conditions.sunRA # radians offset = create_season_offset(nside, sun_ra_0) max_season = 6 sg = standard_goals() #roll_maps = slice_wfd_area(mod_year, sg, scale_down_factor=scale_down_factor) #footprints = roll_maps + [sg] footprints = [sg] * (mod_year + 1) all_footprints_sum = 0 all_rolling_sum = 0 wfd_indx = np.where(sg['r'] == 1) for fp in sg: all_footprints_sum += np.sum(sg[fp]) all_rolling_sum += np.sum(sg[fp][wfd_indx]) greedy = gen_greedy_surveys(nside, nexp=nexp, footprints=footprints,
def u_heavy_footprint(nside=32, u_boost=2.): target_map = standard_goals(nside=nside) # Use the fact that things are wfd_area = np.where(target_map['r'] == 1)[0] target_map['u'][wfd_area] *= u_boost return target_map