def get_args(): navtask = nec.nav_env_base_config() navtask.task_params.type = 'rng_rejection_sampling_many' navtask.task_params.rejection_sampling_M = 2000 navtask.task_params.min_dist = 10 sz = FLAGS.image_size navtask.camera_param.fov = FLAGS.fov navtask.camera_param.height = sz navtask.camera_param.width = sz navtask.task_params.img_height = sz navtask.task_params.img_width = sz # navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table'] # navtask.task_params.type = 'to_nearest_obj_acc' LOGGING.info('navtask: %s', navtask) return navtask
def get_args(): navtask = nec.nav_env_base_config() navtask.task_params.type = 'rng_rejection_sampling_many' navtask.task_params.rejection_sampling_M = 2000 navtask.task_params.min_dist = 10 sz = FLAGS.image_size navtask.camera_param.fov = FLAGS.fov navtask.camera_param.height = sz navtask.camera_param.width = sz navtask.task_params.img_height = sz navtask.task_params.img_width = sz # navtask.task_params.semantic_task.class_map_names = ['chair', 'door', 'table'] # navtask.task_params.type = 'to_nearest_obj_acc' logging.info('navtask: %s', navtask) return navtask
def process_navtask_str(navtask_str): navtask = nec.nav_env_base_config() # Clobber with overrides from strings. navtask_vars = get_navtask_vars(navtask_str) navtask.task_params.n_ori = int(navtask_vars.n_ori) navtask.task_params.max_dist = int(navtask_vars.max_dist) navtask.task_params.num_steps = int(navtask_vars.num_steps) navtask.task_params.step_size = int(navtask_vars.step_size) navtask.task_params.data_augment.delta_xy = int( navtask_vars.step_size) / 2. n_aux_views_each = int(navtask_vars.aux_views[2]) aux_delta_thetas = np.concatenate( (np.arange(n_aux_views_each) + 1, -1 - np.arange(n_aux_views_each))) aux_delta_thetas = aux_delta_thetas * np.deg2rad(navtask.camera_param.fov) navtask.task_params.aux_delta_thetas = aux_delta_thetas if navtask_vars.data_aug == 'aug': navtask.task_params.data_augment.structured = False elif navtask_vars.data_aug == 'straug': navtask.task_params.data_augment.structured = True else: logging.fatal('Unknown navtask_vars.data_aug %s.', navtask_vars.data_aug) assert (False) navtask.task_params.num_history_frames = int(navtask_vars.history[1:]) navtask.task_params.n_views = 1 + navtask.task_params.num_history_frames navtask.task_params.goal_channels = int(navtask_vars.n_ori) if navtask_vars.task == 'hard': navtask.task_params.type = 'rng_rejection_sampling_many' navtask.task_params.rejection_sampling_M = 2000 navtask.task_params.min_dist = 10 elif navtask_vars.task == 'r2r': navtask.task_params.type = 'room_to_room_many' elif navtask_vars.task == 'ST': # Semantic task at hand. navtask.task_params.goal_channels = \ len(navtask.task_params.semantic_task.class_map_names) navtask.task_params.rel_goal_loc_dim = \ len(navtask.task_params.semantic_task.class_map_names) navtask.task_params.type = 'to_nearest_obj_acc' else: logging.fatal('navtask_vars.task: should be hard or r2r, ST') assert (False) if navtask_vars.modality == 'rgb': navtask.camera_param.modalities = ['rgb'] navtask.camera_param.img_channels = 3 elif navtask_vars.modality == 'd': navtask.camera_param.modalities = ['depth'] navtask.camera_param.img_channels = 2 navtask.task_params.img_height = navtask.camera_param.height navtask.task_params.img_width = navtask.camera_param.width navtask.task_params.modalities = navtask.camera_param.modalities navtask.task_params.img_channels = navtask.camera_param.img_channels navtask.task_params.img_fov = navtask.camera_param.fov navtask.dataset = factory.get_dataset(navtask_vars.dataset_name) return navtask