def main_with_seed(eval_setup_name, fold_id, use_test_split, max_test_attempts_per_task, output_dir, agent_type, **agent_kwargs): train_task_ids, eval_task_ids = get_train_test(eval_setup_name, fold_id, use_test_split) agent_kwargs['tier'] = phyre.eval_setup_to_action_tier(eval_setup_name) agent = find_all_agents()[agent_type] # It's fine to use eval_task_ids iff it's dev. dev_tasks_ids = None if use_test_split else eval_task_ids logging.info('Starting training') state = agent.train(train_task_ids, output_dir=output_dir, dev_tasks_ids=dev_tasks_ids, **agent_kwargs) logging.info('Starting eval') evaluation = agent.eval(state, eval_task_ids, max_test_attempts_per_task, output_dir=output_dir, **agent_kwargs) num_tasks = len(eval_task_ids) results = {} results['num_eval_tasks'] = num_tasks results['metrics'] = evaluation.compute_all_metrics() results['args'] = sys.argv results['parsed_args'] = dict( agent_kwargs=agent_kwargs, main_kwargs=dict(eval_setup_name=eval_setup_name, fold_id=fold_id, use_test_split=use_test_split, agent_type=agent_type, max_test_attempts_per_task=max_test_attempts_per_task, output_dir=output_dir)) print(results['parsed_args']) results['target_metric'] = ( results['metrics']['independent_solved_by_aucs'] [max_test_attempts_per_task]) logging.info('FINAL: %s', results['target_metric']) if not os.path.exists(output_dir): os.makedirs(output_dir) out_path = os.path.join(output_dir, 'results.json') with open(out_path, 'w') as stream: json.dump(results, stream)
def __init__(self, data_root, split, image_ext='.jpg'): self.data_root = data_root self.split = split self.image_ext = image_ext self.input_size = C.RPIN.INPUT_SIZE # number of input images self.pred_size = eval( f'C.RPIN.PRED_SIZE_{"TRAIN" if split == "train" else "TEST"}') self.seq_size = self.input_size + self.pred_size self.input_height, self.input_width = C.RPIN.INPUT_HEIGHT, C.RPIN.INPUT_WIDTH protocal = C.PHYRE_PROTOCAL fold = C.PHYRE_FOLD num_pos = 400 if split == 'train' else 100 num_neg = 1600 if split == 'train' else 400 eval_setup = f'ball_{protocal}_template' train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, fold) tasks = train_tasks + dev_tasks if split == 'train' else test_tasks action_tier = phyre.eval_setup_to_action_tier(eval_setup) # all the actions cache = phyre.get_default_100k_cache('ball') training_data = cache.get_sample(tasks, None) # (100000 x 3) actions = training_data['actions'] # (num_tasks x 100000) sim_statuses = training_data['simulation_statuses'] self.simulator = phyre.initialize_simulator(tasks, action_tier) self.video_info = np.zeros((0, 4)) for t_id, t in enumerate(tqdm(tasks)): sim_status = sim_statuses[t_id] pos_acts = actions[sim_status == 1].copy() neg_acts = actions[sim_status == -1].copy() np.random.shuffle(pos_acts) np.random.shuffle(neg_acts) pos_acts = pos_acts[:num_pos] neg_acts = neg_acts[:num_neg] acts = np.concatenate([pos_acts, neg_acts]) video_info = np.zeros((acts.shape[0], 4)) video_info[:, 0] = t_id video_info[:, 1:] = acts self.video_info = np.concatenate([self.video_info, video_info])
def simulate_result(chosen_action, chosen_score, model_number, generation_number): eval_setup = 'ball_cross_template' fold_id = 0 # For simplicity, we will just use one fold for evaluation. train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, 0) action_tier = phyre.eval_setup_to_action_tier(eval_setup) tasks = dev_tasks[0:1] simulator = phyre.initialize_simulator(tasks, action_tier) evaluator = phyre.Evaluator(tasks) # Simulate the given action and add the status from taking the action to the evaluator. simulation_result = simulator.simulate_action(0, chosen_action, need_images=True, need_featurized_objects=True) simulation_score = sf.ScoreFunctionValue(simulation_result) pair = np.array([chosen_action, simulation_score]) timestr = time.strftime("%Y%m%d-%H%M%S") score_pair = [ chosen_score, simulation_score, model_number, generation_number ] score_string = "ScoreLog" + timestr path = "/home/kyra/Desktop/phyre/agents/Scores" np.save(os.path.join(path, score_string), score_pair) return pair, simulation_result
def test(self, start_id=0, end_id=25): random.seed(0) np.random.seed(0) protocal, fold_id = C.PHYRE_PROTOCAL, C.PHYRE_FOLD self.score_model.eval() print(f'testing using protocal {protocal} and fold {fold_id}') # setup the PHYRE evaluation split eval_setup = f'ball_{protocal}_template' action_tier = phyre.eval_setup_to_action_tier(eval_setup) _, _, test_tasks = phyre.get_fold(eval_setup, fold_id) # PHYRE setup candidate_list = [f'{i:05d}' for i in range(start_id, end_id)] # filter tasks test_list = [ task for task in test_tasks if task.split(':')[0] in candidate_list ] simulator = phyre.initialize_simulator(test_list, action_tier) # the action candidates are provided by the author of PHYRE benchmark num_actions = 10000 cache = phyre.get_default_100k_cache('ball') acts = cache.action_array[:num_actions] training_data = cache.get_sample(test_list, None) # some statistics variable when doing the evaluation auccess = np.zeros((len(test_list), 100)) batched_pred = C.SOLVER.BATCH_SIZE objs_color = None all_data, all_acts, all_rois, all_image = [], [], [], [] # cache the initial bounding boxes from the simulator os.makedirs('cache', exist_ok=True) t_list = tqdm(test_list, 'Task') for task_id, task in enumerate(t_list): sim_statuses = training_data['simulation_statuses'][task_id] confs, successes = [], [] boxes_cache_name = f'cache/{task.replace(":", "_")}.hkl' use_cache = os.path.exists(boxes_cache_name) all_boxes = hickle.load(boxes_cache_name) if use_cache else [] valid_act_id = 0 for act_id, act in enumerate( tqdm(acts, 'Candidate Action', leave=False)): sim = simulator.simulate_action(task_id, act, stride=60, need_images=True, need_featurized_objects=True) assert sim.status == sim_statuses[ act_id], 'sanity check not passed' if sim.status == phyre.SimulationStatus.INVALID_INPUT: if act_id == len(acts) - 1 and len( all_data) > 0: # final action is invalid conf_t = self.batch_score(all_data, all_rois, all_image, objs_color) confs = confs + conf_t all_data, all_acts, all_rois, all_image = [], [], [], [] continue successes.append(sim.status == phyre.SimulationStatus.SOLVED) # parse object, prepare input for network, the logic is the same as tools/gen_phyre.py image = cv2.resize(sim.images[0], (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST) all_image.append(image[::-1]) image = phyre.observations_to_float_rgb(image) objs_color = sim.featurized_objects.colors objs_valid = [('BLACK' not in obj_color) and ('PURPLE' not in obj_color) for obj_color in objs_color] objs = sim.featurized_objects.features[:, objs_valid, :] objs_color = np.array(objs_color)[objs_valid] num_objs = objs.shape[1] if use_cache: boxes = all_boxes[valid_act_id] valid_act_id += 1 else: boxes = np.zeros((1, num_objs, 5)) for o_id in range(num_objs): mask = phyre.objects_util.featurized_objects_vector_to_raster( objs[0][[o_id]]) mask_im = phyre.observations_to_float_rgb(mask) mask_im[mask_im == 1] = 0 mask_im = mask_im.sum(-1) > 0 [h, w] = np.where(mask_im) x1, x2, y1, y2 = w.min(), w.max(), h.min(), h.max() x1 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) x2 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) y1 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) y2 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) boxes[0, o_id] = [o_id, x1, y1, x2, y2] all_boxes.append(boxes) data = image.transpose((2, 0, 1))[None, None, :] data = torch.from_numpy(data.astype(np.float32)) rois = torch.from_numpy(boxes[..., 1:].astype(np.float32))[None, :] all_data.append(data) all_rois.append(rois) if len(all_data) % batched_pred == 0 or act_id == len( acts) - 1: conf_t = self.batch_score(all_data, all_rois, all_image, objs_color) confs = confs + conf_t all_data, all_rois, all_image = [], [], [] if not use_cache: all_boxes = np.stack(all_boxes) hickle.dump(all_boxes, boxes_cache_name, mode='w', compression='gzip') info = f'current AUCESS: ' top_acc = np.array(successes)[np.argsort(confs)[::-1]] for i in range(100): auccess[task_id, i] = int(np.sum(top_acc[:i + 1]) > 0) w = np.array([np.log(k + 1) - np.log(k) for k in range(1, 101)]) s = auccess[:task_id + 1].sum(0) / auccess[:task_id + 1].shape[0] info += f'{np.sum(w * s) / np.sum(w) * 100:.2f}' t_list.set_description(info)
def gen_proposal(self, start_id=0, end_id=25): random.seed(0) np.random.seed(0) protocal = C.PHYRE_PROTOCAL fold_id = C.PHYRE_FOLD print(f'generate proposal for {protocal} fold {fold_id}') max_p_acts, max_n_acts, max_acts = 200, 800, 100000 self.proposal_dir = f'{self.output_dir.split("/")[-1]}_' \ f'p{max_p_acts}n{max_n_acts}a{max_acts // 1000}' eval_setup = f'ball_{protocal}_template' action_tier = phyre.eval_setup_to_action_tier(eval_setup) train_tasks, dev_tasks, test_tasks = phyre.get_fold( eval_setup, fold_id) # filter task train_tasks = train_tasks + dev_tasks candidate_list = [f'{i:05d}' for i in range(start_id, end_id)] for split in ['train', 'test']: train_list = [ task for task in train_tasks if task.split(':')[0] in candidate_list ] test_list = [ task for task in test_tasks if task.split(':')[0] in candidate_list ] if len(eval(f'{split}_list')) == 0: return simulator = phyre.initialize_simulator(eval(f'{split}_list'), action_tier) cache = phyre.get_default_100k_cache('ball') training_data = cache.get_sample(eval(f'{split}_list'), None) actions = cache.action_array[:max_acts] final_list = eval(f'{split}_list') t_list = tqdm(final_list, 'Task') for task_id, task in enumerate(t_list): box_cache_name = f'data/PHYRE_proposal/cache/{task.replace(":", "_")}_box.hkl' act_cache_name = f'data/PHYRE_proposal/cache/{task.replace(":", "_")}_act.hkl' use_cache = os.path.exists(box_cache_name) and os.path.exists( act_cache_name) if use_cache: acts = hickle.load(act_cache_name) all_boxes = hickle.load(box_cache_name) else: sim_statuses = training_data['simulation_statuses'][ task_id] pos_acts = actions[sim_statuses == 1] neg_acts = actions[sim_statuses == -1] np.random.shuffle(pos_acts) np.random.shuffle(neg_acts) pos_acts = pos_acts[:max_p_acts] neg_acts = neg_acts[:max_n_acts] acts = np.concatenate([pos_acts, neg_acts]) hickle.dump(acts, act_cache_name, mode='w', compression='gzip') all_boxes = [] valid_act_id = 0 for act_id, act in enumerate( tqdm(acts, 'Candidate Action', leave=False)): sim = simulator.simulate_action( task_id, act, stride=60, need_images=True, need_featurized_objects=True) if not use_cache: if act_id < len(pos_acts): assert sim.status == phyre.SimulationStatus.SOLVED else: assert sim.status == phyre.SimulationStatus.NOT_SOLVED assert sim.status != phyre.SimulationStatus.INVALID_INPUT raw_images = sim.images rst_images = np.stack([ np.ascontiguousarray( cv2.resize(rst_image, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)[::-1]) for rst_image in raw_images ]) # prepare input for network: image = cv2.resize(raw_images[0], (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST) image = phyre.observations_to_float_rgb(image) # parse object objs_color = sim.featurized_objects.colors objs_valid = [('BLACK' not in obj_color) and ('PURPLE' not in obj_color) for obj_color in objs_color] objs = sim.featurized_objects.features[:, objs_valid, :] objs_color = np.array(objs_color)[objs_valid] num_objs = objs.shape[1] if use_cache: boxes = all_boxes[valid_act_id] valid_act_id += 1 else: boxes = np.zeros((1, num_objs, 5)) for o_id in range(num_objs): mask = phyre.objects_util.featurized_objects_vector_to_raster( objs[0][[o_id]]) mask_im = phyre.observations_to_float_rgb(mask) mask_im[mask_im == 1] = 0 mask_im = mask_im.sum(-1) > 0 [h, w] = np.where(mask_im) x1, x2, y1, y2 = w.min(), w.max(), h.min(), h.max() x1 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) x2 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) y1 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) y2 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) boxes[0, o_id] = [o_id, x1, y1, x2, y2] all_boxes.append(boxes) data = image.transpose((2, 0, 1))[None, None, :] data = torch.from_numpy(data.astype(np.float32)) rois = torch.from_numpy(boxes[..., 1:].astype( np.float32))[None, :] bg_image = rst_images[0].copy() for fg_id in [1, 2, 3, 5]: bg_image[bg_image == fg_id] = 0 boxes, masks = self.generate_trajs(data, rois) rst_masks = np.stack([ self.render_mask_to_image(boxes[0, i], masks[0, i], images=bg_image.copy(), color=objs_color).astype( np.uint8) for i in range(self.pred_rollout) ]) output_dir = f'data/PHYRE_proposal/{self.proposal_dir}/{split}/' output_dir = output_dir + 'pos/' if sim.status == phyre.SimulationStatus.SOLVED else output_dir + 'neg/' output_dir = output_dir + f'{task.replace(":", "_")}/' os.makedirs(output_dir, exist_ok=True) rst_dict = {'gt_im': rst_images, 'pred_im': rst_masks} hickle.dump(rst_dict, f'{output_dir}/{act_id}.hkl', mode='w', compression='gzip') if not use_cache: all_boxes = np.stack(all_boxes) hickle.dump(all_boxes, box_cache_name, mode='w', compression='gzip')
import random import numpy as np import phyre from tqdm import tqdm_notebook import animations random.seed(0) # Evaluation Setup eval_setup = 'ball_cross_template' fold_id = 0 # For simplicity, we will just use one fold for evaluation. train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, 0) action_tier = phyre.eval_setup_to_action_tier(eval_setup) tasks = dev_tasks[0:1] print((tasks)) simulator = phyre.initialize_simulator(tasks, action_tier) actions = simulator.build_discrete_action_space(max_actions=1000) def evaluate_random_agent(tasks, tier): # Create a simulator for the task and tier. simulator = phyre.initialize_simulator(tasks, tier) evaluator = phyre.Evaluator(tasks) assert tuple(tasks) == simulator.task_ids images = [] actions = [] for task_index in tqdm_notebook(range(len(tasks)), desc='Evaluate tasks'): while evaluator.get_attempts_for_task( task_index) < phyre.MAX_TEST_ATTEMPTS:
def main(cfg): """Run the training and testing.""" # Make a copy of overrides/etc files; so that if this code is run # again with a different override param (eg to generate vis etc), even if # it overwrites the config files and destroy that information, the original # info is stored and avlbl when making graphs etc if not os.path.exists('.hydra.orig'): subprocess.call('cp -r .hydra .hydra.orig', shell=True) templates_tasks = None if ':' in cfg.eval_setup_name: # Means that we only want template IDs defined after the ":" # The tasks itself would have "00001:<task_id>", hence splitting only 1 cfg.eval_setup_name, templates_tasks = cfg.eval_setup_name.split( ':', 1) train_task_ids, eval_task_ids = get_train_test(cfg.eval_setup_name, cfg.fold_id, cfg.use_test_split) if templates_tasks is not None: # Subselect the train/eval task ids to only keep the ones in task_ids templates_tasks = templates_tasks.split(';') final_templates = [] for temp_task in templates_tasks: if ':' in temp_task: temp, task = temp_task.split(':') else: temp = temp_task task = '' if '-' in temp_task: final_templates += [ '{:05d}:{}'.format(el, task) for el in range(int(temp.split('-')[0]), int(temp.split('-')[1]) + 1) ] else: final_templates += ['{:05d}:{}'.format(int(temp), task)] templates_tasks = sorted(list(set(final_templates))) logging.info('Running on %s templates/tasks', templates_tasks) def fits_templates_tasks(task_id): for temp_task in templates_tasks: if task_id.startswith(temp_task): return True return False train_task_ids = [ el for el in train_task_ids if fits_templates_tasks(el) ] eval_task_ids = [ el for el in eval_task_ids if fits_templates_tasks(el) ] assert len(train_task_ids) > 0 or len(eval_task_ids) > 0, ( 'At least one of train or test should have a task in it') train_task_ids = sorted(train_task_ids) eval_task_ids = sorted(eval_task_ids) logging.info('Final train task ids: %s', train_task_ids) logging.info('Final eval task ids: %s', eval_task_ids) assert 0.0 <= cfg.data_ratio_train <= 1.0, 'Should be within limits' assert 0.0 <= cfg.data_ratio_eval <= 1.0, 'Should be within limits' train_task_ids = get_subset_tasks(train_task_ids, cfg.data_ratio_train) eval_task_ids = get_subset_tasks(eval_task_ids, cfg.data_ratio_eval) assert cfg.tier is None, ( 'Do not set this beforehand; will figure from eval_setup') cfg.tier = phyre.eval_setup_to_action_tier(cfg.eval_setup_name) agent = find_all_agents()[cfg.agent.type] output_dir = os.getcwd() max_test_attempts_per_task = (cfg.max_test_attempts_per_task or phyre.MAX_TEST_ATTEMPTS) # Validate the config # If the following are not true, it gives weird errors, eg missing argument # in forward assert cfg.num_gpus == 0 or cfg.train.batch_size % cfg.num_gpus == 0 if cfg.eval.batch_size is not None: assert cfg.num_gpus == 0 or cfg.eval.batch_size % cfg.num_gpus == 0 # Scale the number of iters if cfg.train.scale_num_iter != 1.0: for param_name in [ 'num_iter', 'report_every', 'save_checkpoints_every', 'full_eval_every' ]: logging.info( f'cfg.train.scale_num_iter {cfg.train.scale_num_iter}') logging.info(f'param_name {param_name}') old_val = getattr(cfg.train, param_name) logging.info(f'old_val {old_val}') new_val = type(old_val)(old_val * cfg.train.scale_num_iter) setattr(cfg.train, param_name, new_val) logging.warning('Setting cfg.train.%s to %s using scale %f', param_name, new_val, cfg.train.scale_num_iter) # It's fine to use eval_task_ids iff it's dev. dev_tasks_ids = None if cfg.use_test_split else eval_task_ids summary_writer = SummaryWriter(log_dir=os.path.join(output_dir, 'logs')) full_eval_fn = partial(agent.eval, task_ids=eval_task_ids, max_attempts_per_task=max_test_attempts_per_task, cfg=cfg) logging.info('Starting training') state = agent.train(train_task_ids, dev_tasks_ids, full_eval_fn, output_dir=output_dir, summary_writer=summary_writer, cfg=cfg) ## Evaluation out_path = os.path.join( output_dir, 'results-vis.json' if cfg.eval.store_vis else 'results.json') # Don't stop re-evaluations if doing vis if (os.path.exists(out_path) and not cfg.force_eval and not cfg.eval.store_vis): logging.warning('Eval out path exists (%s). Del or no eval.', out_path) return 0 # Moved all of this to train, so the final prediction would be stored # in results_intermediate as well. However keeping the code here too since # it's used when only running testing. logging.info('Starting final eval') evaluation = full_eval_fn(state) num_tasks = len(eval_task_ids) results = {} results['num_eval_tasks'] = num_tasks results['metrics'] = evaluation.compute_all_metrics() results['metrics_rollout'] = evaluation.compute_all_metrics_over_rollout() results['metrics_per_task'] = evaluation.compute_all_metrics_per_task() results['args'] = sys.argv results['parsed_args'] = dict( # cfg=cfg, # Not json serializable, anyway will be stored in dir main_kwargs=dict(eval_setup_name=cfg.eval_setup_name, fold_id=cfg.fold_id, use_test_split=cfg.use_test_split, agent_type=cfg.agent.type, max_test_attempts_per_task=max_test_attempts_per_task, output_dir=output_dir)) print(results['parsed_args']) results['target_metric'] = ( results['metrics']['independent_solved_by_aucs'] [max_test_attempts_per_task]) results['target_metric_over_time'] = [ el['independent_solved_by_aucs'][max_test_attempts_per_task] for el in results['metrics_rollout'] ] logging.info('FINAL: %s; Over rollout: %s', results['target_metric'], results['target_metric_over_time']) summary_writer.add_scalar('AUCCESS-full/eval', results['target_metric']) summary_writer.close() if not os.path.exists(output_dir): os.makedirs(output_dir) with open(out_path, 'w') as stream: json.dump(results, stream)
def worker(fold_id, eval_setup): __, __, test_tasks = phyre.get_fold(eval_setup, fold_id) action_tier = phyre.eval_setup_to_action_tier(eval_setup) tasks = test_tasks evaluator = evaluate_simple_agent(tasks, action_tier) return evaluator.get_aucess()
def worker(fold_id, eval_setup): train, dev, test = phyre.get_fold(eval_setup, fold_id) action_tier = phyre.eval_setup_to_action_tier(eval_setup) solved_actions_pdf = train_kde(train, action_tier) return evaluate_agent(test, action_tier, solved_actions_pdf)
def worker(fold_id, eval_setup): __, __, test_tasks = phyre.get_fold(eval_setup, fold_id) action_tier = phyre.eval_setup_to_action_tier(eval_setup) tasks = test_tasks return evaluate_agent(tasks, action_tier)
def generate_data(args): eval_setup = 'ball_cross_template' fold_id = 0 train_tasks, dev_tasks, test_tasks = phyre.get_fold(eval_setup, fold_id) action_tier = phyre.eval_setup_to_action_tier(eval_setup) tasks_2 = [x for x in train_tasks if x.startswith(args.template)==True] tasks = tasks_2 simulator = phyre.initialize_simulator(tasks, action_tier) actions = simulator.build_discrete_action_space(max_actions=100000) try: os.mkdir('./data-generation/numpys') except: pass try: os.mkdir('./data-generation/episodes') except: pass for task_index in tqdm_notebook(range(1, len(tasks_2))): solution_found_counter = 0 # 10 rollouts for each for k in range(10): if solution_found_counter < 5: action = random.choice(actions) simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True, stride=4) num_trys = 0 while (simulation.status != phyre.simulation_cache.SOLVED and num_trys < 10000): num_trys += 1 action = random.choice(actions) simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True, stride=4) if str(simulation.status) == "SimulationStatus.INVALID_INPUT": num_trys -= 1 else: action = random.choice(actions) simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True, stride=4) while (simulation.status != phyre.simulation_cache.NOT_SOLVED): action = random.choice(actions) simulation = simulator.simulate_action(task_index, action, need_images=True, need_featurized_objects=True, stride=4) print(simulation.status) filename = 'data-generation/numpys/task-00023:' + str(task_index + 1) + '_' + str(k) + '.npy' print(filename) np.save(filename, simulation.images) if str(simulation.status) == "SimulationStatus.SOLVED": solution_found_counter += 1 dataset_path = './data-generation/numpys' IMAGE_WIDTH = 256 IMAGE_HEIGHT = 256 max_episode_len = 120 for filename in os.listdir(dataset_path): if filename.startswith('Task'): continue counter = 0 print("Laded file: ", filename) data = os.path.join(dataset_path, filename) try: data = np.load(data) except: continue print(data.shape) if (len(data) >= max_episode_len): images = np.zeros((len(data), 64, 64, 3), dtype=np.uint8) rewards = np.ones((len(data)), dtype=np.float16) actions = np.ones((len(data), 6), dtype=np.float16) orientations = np.ones((len(data), 14), dtype=np.float16) velocity = np.ones((len(data), 9), dtype=np.float16) height = np.ones((len(data)), dtype=np.float16) discount = np.ones((len(data)), dtype=np.float16) for k, scene in enumerate(data): current_image = np.zeros((256, 256, 3), dtype=np.uint8) channel_0 = np.copy(np.flipud((scene))) channel_1 = np.copy(np.flipud((scene))) channel_0[channel_0 == 6] = 0 channel_0[channel_0 > 0] = 255 channel_1[channel_1 != 6] = 0 channel_1[channel_1 == 6] = 255 current_image[:, :, 0] = np.copy(channel_0) current_image[:, :, 1] = np.copy(channel_1) scaled_down_image = np.copy(current_image[::4,::4,:]) images[k] = scaled_down_image numpy_dict = {"image": images[:max_episode_len], "action": actions[:max_episode_len], "reward": rewards[:max_episode_len], "orientations": orientations[:max_episode_len], "velocity": velocity[:max_episode_len], "dsicount": discount[:max_episode_len], "height": height[:max_episode_len]} timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S') identifier = str(uuid.uuid4().hex) length = len(numpy_dict['reward']) directory = pathlib.Path('data-generation/episodes') filename = pathlib.Path(f'{timestamp}-{identifier}-{length}.npz') filename = directory / filename with io.BytesIO() as f1: np.savez_compressed(f1, **numpy_dict) f1.seek(0) with filename.open('wb') as f2: f2.write(f1.read())
def test(self, start_id=0, end_id=25, fold_id=0, protocal='within'): random.seed(0) print(f'testing {protocal} fold {fold_id}') eval_setup = f'ball_{protocal}_template' action_tier = phyre.eval_setup_to_action_tier(eval_setup) _, _, test_tasks = phyre.get_fold(eval_setup, fold_id) # PHYRE setup candidate_list = [f'{i:05d}' for i in range(start_id, end_id)] # filter tasks test_list = [task for task in test_tasks if task.split(':')[0] in candidate_list] simulator = phyre.initialize_simulator(test_list, action_tier) # PHYRE evaluation num_all_actions = [1000, 2000, 5000, 8000, 10000] auccess = np.zeros((len(num_all_actions), len(test_list), 100)) batched_pred = C.SOLVER.BATCH_SIZE # DATA for network: all_data, all_acts, all_rois, all_image = [], [], [], [] cache = phyre.get_default_100k_cache('ball') acts = cache.action_array[:10000] # actions = cache.action_array[:100000] # training_data = cache.get_sample(test_list, None) pos_all, neg_all, pos_correct, neg_correct = 0, 0, 0, 0 objs_color = None for task_id, task in enumerate(test_list): confs, successes, num_valid_act_idx = [], [], [] boxes_cache_name = f'cache/{task.replace(":", "_")}.hkl' use_cache = os.path.exists(boxes_cache_name) all_boxes = hickle.load(boxes_cache_name) if use_cache else [] valid_act_cnt = 0 # sim_statuses = training_data['simulation_statuses'][task_id] # pos_acts = actions[sim_statuses == 1] # neg_acts = actions[sim_statuses == -1] # np.random.shuffle(pos_acts) # np.random.shuffle(neg_acts) # pos_acts = pos_acts[:50] # neg_acts = neg_acts[:200] # acts = np.concatenate([pos_acts, neg_acts]) for act_id, act in enumerate(acts): if act_id == 0: pprint(f'{task}: {task_id} / {len(test_list)}') sim = simulator.simulate_action(task_id, act, stride=60, need_images=True, need_featurized_objects=True) if sim.status == phyre.SimulationStatus.INVALID_INPUT: num_valid_act_idx.append(0) if act_id == len(acts) - 1 and len(all_data) > 0: # final action is invalid conf_t = self.batch_score(all_data, all_acts, all_rois, all_image, objs_color, task) confs = confs + conf_t all_data, all_acts, all_rois, all_image = [], [], [], [] continue num_valid_act_idx.append(1) successes.append(sim.status == phyre.SimulationStatus.SOLVED) if self.score_with_heuristic or self.score_with_mask: # parse object, prepare input for network: image = cv2.resize(sim.images[0], (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST) all_image.append(image[::-1]) # for heuristic method to detect goal location, need to flip image = phyre.observations_to_float_rgb(image) objs_color = sim.featurized_objects.colors objs_valid = [('BLACK' not in obj_color) and ('PURPLE' not in obj_color) for obj_color in objs_color] objs = sim.featurized_objects.features[:, objs_valid, :] objs_color = np.array(objs_color)[objs_valid] num_objs = objs.shape[1] if use_cache: boxes = all_boxes[valid_act_cnt] valid_act_cnt += 1 else: boxes = np.zeros((1, num_objs, 5)) for o_id in range(num_objs): mask = phyre.objects_util.featurized_objects_vector_to_raster(objs[0][[o_id]]) mask_im = phyre.observations_to_float_rgb(mask) mask_im[mask_im == 1] = 0 mask_im = mask_im.sum(-1) > 0 [h, w] = np.where(mask_im) x1, x2, y1, y2 = w.min(), w.max(), h.min(), h.max() x1 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) x2 *= (self.input_width - 1) / (phyre.SCENE_WIDTH - 1) y1 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) y2 *= (self.input_height - 1) / (phyre.SCENE_HEIGHT - 1) boxes[0, o_id] = [o_id, x1, y1, x2, y2] all_boxes.append(boxes) data = image.transpose((2, 0, 1))[None, None, :] data = torch.from_numpy(data.astype(np.float32)) rois = torch.from_numpy(boxes[..., 1:].astype(np.float32))[None, :] all_data.append(data) all_rois.append(rois) elif self.score_with_act: init = np.ascontiguousarray(simulator.initial_scenes[task_id][::-1]) init128 = cv2.resize(init, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST) all_data.append(torch.from_numpy(init128)) all_acts.append(torch.from_numpy(act[None, :])) elif self.score_with_vid_cls: rst_images = np.stack([np.ascontiguousarray( cv2.resize(rst_image, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)[::-1] ) for rst_image in sim.images]) all_data.append(torch.from_numpy(rst_images)) else: raise NotImplementedError if len(all_data) % batched_pred == 0 or act_id == len(acts) - 1: conf_t = self.batch_score(all_data, all_acts, all_rois, all_image, objs_color, task) confs = confs + conf_t all_data, all_acts, all_rois, all_image = [], [], [], [] if self.score_with_heuristic or self.score_with_mask: if not use_cache: all_boxes = np.stack(all_boxes) hickle.dump(all_boxes, boxes_cache_name, mode='w', compression='gzip') else: assert valid_act_cnt == len(all_boxes) pred = np.array(confs) >= 0.5 labels = np.array(successes) pos_all += (labels == 1).sum() neg_all += (labels == 0).sum() pos_correct += (pred == labels)[labels == 1].sum() neg_correct += (pred == labels)[labels == 0].sum() pos_acc = (pred == labels)[labels == 1].sum() / (labels == 1).sum() neg_acc = (pred == labels)[labels == 0].sum() / (labels == 0).sum() info = f'{pos_acc * 100:.1f} / {neg_acc * 100:.1f} ' # info = f'{task}: ' for j, num_acts in enumerate(num_all_actions): num_valid = np.sum(num_valid_act_idx[:num_acts]) top_acc = np.array(successes[:num_valid])[np.argsort(confs[:num_valid])[::-1]] for i in range(100): auccess[j, task_id, i] = int(np.sum(top_acc[:i + 1]) > 0) w = np.array([np.log(k + 1) - np.log(k) for k in range(1, 101)]) s = auccess[j, :task_id + 1].sum(0) / auccess[j, :task_id + 1].shape[0] info += f'{np.sum(w * s) / np.sum(w) * 100:.2f} {np.sum(successes[:num_valid])}/{num_acts // 1000}k | ' pprint(info) pprint(pos_correct, pos_all, pos_correct / pos_all) pprint(neg_correct, neg_all, neg_correct / neg_all) cache_output_dir = f'{self.output_dir.replace("figures/", "")}/' \ f'{self.proposal_setting}_{self.method}_{protocal}_fold_{fold_id}/' os.makedirs(cache_output_dir, exist_ok=True) print(cache_output_dir) stats = { 'auccess': auccess, 'p_c': pos_correct, 'p_a': pos_all, 'n_c': neg_correct, 'n_a': neg_all, } with open(f'{cache_output_dir}/{start_id}_{end_id}.pkl', 'wb') as f: pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)